blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7dd4922bab790648e68d11e5d8b1b94521794f2f
|
098361c611ddd688adb9df74d277f8b5d45a5229
|
/.history/polls/views_20200213232747.py
|
1db6df062816d87e2363de7b596780615c21d095
|
[] |
no_license
|
SamirIngley/django-documentation
|
1ac5ba903891f44e08c2fdb6f9d41bfcc836cce6
|
57e4b1aeab7a4f892fe89a0741ce7831d5c0f2d9
|
refs/heads/master
| 2020-12-22T00:42:30.258237
| 2020-02-14T18:42:34
| 2020-02-14T18:42:34
| 236,615,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.http import HttpResponse, HttpResponse
from datetime import datetime
from .models import Question, Choice
from django.views import Views, generic
from django.urls import reverse
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
# context maps template variable names to python objects
return render(request, 'polls/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
response = "you're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
class ShowTimeView(View):
def get(self, request):
now = datetime.now()
html = "<html><body>It is now {}<body></html>".format(now)
return HttpResponse(html)
|
[
"samir.ingle7@gmail.com"
] |
samir.ingle7@gmail.com
|
24ce3a0a5d44460e58071c55aa0d938906e466da
|
ffd6d6c768d5c6cb05539200809d7163a922dadb
|
/common/callbacks.py
|
cefd9e13d49a5fdd60d978fdb1a930c30c03650f
|
[
"MIT"
] |
permissive
|
danilojodas/keras-YOLOv3-model-set
|
95001092835e2ec37ae3b56e4a14ec68aad1349f
|
da6d1af57bcea139d548843f0488cf61ab00f965
|
refs/heads/master
| 2023-01-07T04:36:50.381877
| 2020-11-07T13:14:17
| 2020-11-07T13:14:17
| 285,853,247
| 0
| 0
|
MIT
| 2020-08-07T14:49:05
| 2020-08-07T14:49:05
| null |
UTF-8
|
Python
| false
| false
| 4,731
|
py
|
#!/usr/bin/python3
# -*- coding=utf-8 -*-
"""custom model callbacks."""
import os, sys, random, tempfile
import numpy as np
from tensorflow_model_optimization.sparsity import keras as sparsity
#from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import Callback
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from yolo3.model import get_yolo3_model
from yolo2.model import get_yolo2_model
from eval import eval_AP
class DatasetShuffleCallBack(Callback):
def __init__(self, dataset):
self.dataset = dataset
def on_epoch_end(self, epoch, logs=None):
np.random.shuffle(self.dataset)
class EvalCallBack(Callback):
def __init__(self, model_type, annotation_lines, anchors, class_names, model_image_size, model_pruning, log_dir, eval_epoch_interval=10, save_eval_checkpoint=False):
self.model_type = model_type
self.annotation_lines = annotation_lines
self.anchors = anchors
self.class_names = class_names
self.model_image_size = model_image_size
self.model_pruning = model_pruning
self.log_dir = log_dir
self.eval_epoch_interval = eval_epoch_interval
self.save_eval_checkpoint = save_eval_checkpoint
self.best_mAP = 0.0
self.eval_model = self.get_eval_model()
def get_eval_model(self):
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
#YOLOv3 model has 9 anchors and 3 feature layers but
#Tiny YOLOv3 model has 6 anchors and 2 feature layers,
#so we can calculate feature layers number to get model type
num_feature_layers = num_anchors//3
if num_anchors == 5:
# YOLOv2 use 5 anchors
eval_model, _ = get_yolo2_model(self.model_type, num_anchors, num_classes, input_shape=self.model_image_size + (3,), model_pruning=self.model_pruning)
else:
eval_model, _ = get_yolo3_model(self.model_type, num_feature_layers, num_anchors, num_classes, input_shape=self.model_image_size + (3,), model_pruning=self.model_pruning)
return eval_model
def update_eval_model(self, train_model):
# create a temp weights file to save training result
tmp_weights_path = os.path.join(tempfile.gettempdir(), str(random.randint(10, 1000000)) + '.h5')
train_model.save_weights(tmp_weights_path)
# load the temp weights to eval model
self.eval_model.load_weights(tmp_weights_path)
os.remove(tmp_weights_path)
if self.model_pruning:
eval_model = sparsity.strip_pruning(self.eval_model)
else:
eval_model = self.eval_model
return eval_model
#def update_eval_model(self, model):
## We strip the extra layers in training model to get eval model
#num_anchors = len(self.anchors)
#if num_anchors == 9:
## YOLOv3 use 9 anchors and 3 prediction layers.
## Has 7 extra layers (including metrics) in training model
#y1 = model.layers[-10].output
#y2 = model.layers[-9].output
#y3 = model.layers[-8].output
#eval_model = Model(inputs=model.input[0], outputs=[y1,y2,y3])
#elif num_anchors == 6:
## Tiny YOLOv3 use 6 anchors and 2 prediction layers.
## Has 6 extra layers in training model
#y1 = model.layers[-8].output
#y2 = model.layers[-7].output
#eval_model = Model(inputs=model.input[0], outputs=[y1,y2])
#elif num_anchors == 5:
## YOLOv2 use 5 anchors and 1 prediction layer.
## Has 6 extra layers in training model
#eval_model = Model(inputs=model.input[0], outputs=model.layers[-7].output)
#else:
#raise ValueError('Invalid anchor set')
#return eval_model
def on_epoch_end(self, epoch, logs=None):
if (epoch+1) % self.eval_epoch_interval == 0:
# Do eval every eval_epoch_interval epochs
eval_model = self.update_eval_model(self.model)
mAP = eval_AP(eval_model, 'H5', self.annotation_lines, self.anchors, self.class_names, self.model_image_size, eval_type='VOC', iou_threshold=0.5, conf_threshold=0.001, save_result=False)
if self.save_eval_checkpoint and mAP > self.best_mAP:
# Save best mAP value and model checkpoint
self.best_mAP = mAP
self.model.save(os.path.join(self.log_dir, 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}-mAP{mAP:.3f}.h5'.format(epoch=(epoch+1), loss=logs.get('loss'), val_loss=logs.get('val_loss'), mAP=mAP)))
|
[
"david8862@gmail.com"
] |
david8862@gmail.com
|
d4ddc55bfbfb111820fbda1228542338ab805acb
|
aa9297175621fcd499cad5a0373aaad15f33cde8
|
/py-collections.py
|
cbcb56c1a3d7a5f9613740de433e1f5e6bfdca54
|
[] |
no_license
|
eflipe/python-exercises
|
a64e88affe8f9deb34e8aa29a23a68c25e7ba08a
|
b7a429f57a5e4c5dda7c77db5721ca66a401d0a3
|
refs/heads/master
| 2023-04-26T19:19:28.674350
| 2022-07-19T20:53:09
| 2022-07-19T20:53:09
| 192,589,885
| 0
| 0
| null | 2023-04-21T21:23:14
| 2019-06-18T18:06:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,026
|
py
|
# https://stackabuse.com/introduction-to-pythons-collections-module/
"""
The Counter() function in collections module takes an iterable or a mapping as the argument and returns a Dictionary. In this dictionary, a key is an element in the iterable or the mapping and value is the number of times that element exists in the iterable or the mapping.
"""
from collections import Counter, defaultdict, OrderedDict, deque, ChainMap, namedtuple
# crear Counter objects
cnt = Counter()
# You can pass an iterable (list) to Counter() function to create a counter object.
list = [1,2,3,4,1,2,6,7,3,8,1]
cnt = Counter(list)
print(cnt)
# You can access any counter item with its key as shown below:
print(cnt[1])
"""
cnt is an object of Counter class which is a subclass of dict. So it has all the methods of dict class.
Apart from that, Counter has three additional functions:
Elements
Most_common([n])
Subtract([interable-or-mapping])
"""
# elements() function. It returns a list containing all the elements in the Counter object.
cnt_2 = Counter({1:3, 2:4}) # le pasamos un dict}
cnt_element = cnt.elements()
print(cnt_element)
print(tuple(cnt_2.elements()))
# most_common()
# The Counter() function returns a dictionary which is unordered. You can sort it according to the number of counts in each element using most_common() function of the Counter object.
list = [1,2,3,4,1,2,6,7,3,8,1]
cnt = Counter(list)
print(cnt.most_common())
# subtract() takes iterable (list) or a mapping (dictionary) as an argument and deducts elements count using that argument
cnt = Counter({1:3,2:4})
deduct = {1:1, 2:2}
cnt.subtract(deduct)
print(cnt)
# defaultdict
# works exactly like a python dictionary, except for it does not throw KeyError when you try to access a non-existent key.
# Instead, it initializes the key with the element of the data type that you pass as an argument at the creation of defaultdict. The data type is called default_factory.
nums = defaultdict(int)
nums['one'] = 1
nums['two'] = 2
print(nums['three'])
# OrderedDict
# OrderedDict is a dictionary where keys maintain the order in which they are inserted, which means if you change the value of a key later, it will not change the position of the key.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
od['c'] = 3
print(od)
list = ["a","c","c","a","b","a","a","b","c"]
cnt = Counter(list)
od = OrderedDict(cnt.most_common())
for key, value in od.items():
print(key, value)
# deque is a list optimized for inserting and removing items
list = ["a","b","c"]
deq = deque(list)
print(deq)
"""
You can easily insert an element to the deq we created at either of the ends. To add an element to the right of the deque, you have to use append() method.
If you want to add an element to the start of the deque, you have to use appendleft() method.
To remove an element from the right end, you can use pop() function and to remove an element from left, you can use popleft().
If you want to remove all elements from a deque, you can use clear() function.
"""
deq.append("d")
deq.appendleft("e")
print(deq)
deq.pop()
deq.popleft()
print(deq)
list = ["a","b","c"]
deq = deque(list)
print(deq)
print(deq.clear())
# count(x) function. You have to specify the element for which you need to find the count, as the argument.
list = ["a","b","c"]
deq = deque(list)
print(deq.count("a"))
# ChainMap is used to combine several dictionaries or mappings. It returns a list of dictionaries.
dict1 = { 'a' : 1, 'b' : 2 }
dict2 = { 'c' : 3, 'b' : 4 }
chain_map = ChainMap(dict1, dict2)
print(chain_map.maps)
print(chain_map['c'])
dict2['c'] = 5
print(chain_map.maps)
# You can access the keys of a ChainMap with keys() function. Similarly, you can access the values of elements with values() function, as shown below:
dict1 = { 'a' : 1, 'b' : 2 }
dict2 = { 'c' : 3, 'b' : 4 }
chain_map = ChainMap(dict1, dict2)
print (list(chain_map.keys()))
print (list(chain_map.values()))
"""
Notice that the value of the key 'b' in the output is the value of key 'b' in dict1. As a rule of thumb, when one key appears in more than one associated dictionaries, ChainMap takes the value for that key from the first dictionary.
"""
# If you want to add a new dictionary to an existing ChainMap, use new_child() function. It creates a new ChainMap with the newly added dictionary.
dict3 = {'e':5, 'f':6}
new_chain_map = chain_map.new_child(dict3)
print(new_chain_map)
# namedtuple() returns a tuple with names for each position in the tuple.
"""
One of the biggest problems with ordinary tuples is that you have to remember the index of each field of a tuple object. This is obviously difficult. The namedtuple was introduced to solve this problem.
"""
Student = namedtuple('Student', 'fname, lname, age')
s1 = Student('John', 'Clarke', '13')
print(s1.fname)
# Creating a namedtuple Using List
# The namedtuple() function requires each value to be passed to it separately. Instead, you can use _make() to create a namedtuple instance with a list.
s2 = Student._make(['Adam','joe','18'])
print(s2)
|
[
"felipecabaleiro@gmail.com"
] |
felipecabaleiro@gmail.com
|
5d8cc59c1a7ae986669847eb53261c941778a28b
|
9ac793d32e70775bb119aaddeb832624e3cf9281
|
/consoverriding3.py
|
38d3f451eea5f6b0f68c2b227290384171731b84
|
[] |
no_license
|
prabhatpal77/Adv-python-polymorphism
|
9368311732e1bca9b54e099489c255e3498fbb9b
|
d68375e4816a746a1ffbffa6d179c50227267feb
|
refs/heads/master
| 2020-07-29T00:41:08.162385
| 2019-09-19T16:35:32
| 2019-09-19T16:35:32
| 209,601,547
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
# Another example of constructor overriding with many numbers of parameters.
class X:
def __init__(self, a, b):
self.a=a
self.b=b
def m1(self):
print("in m1 of x")
class Y(X):
def __init__(self, a, b, c, d):
self.c=c
self.d=d
super().__init__(a, b)
def m2(self):
print("in m2 of y")
y1=Y(1000, 2000, 3000, 4000)
y1.m1()
y1.m2()
print(y1.d)
print(y1.c)
print(y1.b)
print(y1.a)
|
[
"noreply@github.com"
] |
prabhatpal77.noreply@github.com
|
da94aa64917275137dfd2fdb5015db5f9092a981
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/sql/v20190601preview/get_server_azure_ad_administrator.py
|
743b19d247ce4c7b2f71ff70258a688a41cedc55
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 5,515
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetServerAzureADAdministratorResult',
'AwaitableGetServerAzureADAdministratorResult',
'get_server_azure_ad_administrator',
]
@pulumi.output_type
class GetServerAzureADAdministratorResult:
"""
Azure Active Directory administrator.
"""
def __init__(__self__, administrator_type=None, azure_ad_only_authentication=None, login=None, name=None, sid=None, tenant_id=None, type=None):
if administrator_type and not isinstance(administrator_type, str):
raise TypeError("Expected argument 'administrator_type' to be a str")
pulumi.set(__self__, "administrator_type", administrator_type)
if azure_ad_only_authentication and not isinstance(azure_ad_only_authentication, bool):
raise TypeError("Expected argument 'azure_ad_only_authentication' to be a bool")
pulumi.set(__self__, "azure_ad_only_authentication", azure_ad_only_authentication)
if login and not isinstance(login, str):
raise TypeError("Expected argument 'login' to be a str")
pulumi.set(__self__, "login", login)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sid and not isinstance(sid, str):
raise TypeError("Expected argument 'sid' to be a str")
pulumi.set(__self__, "sid", sid)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="administratorType")
def administrator_type(self) -> str:
"""
Type of the sever administrator.
"""
return pulumi.get(self, "administrator_type")
@property
@pulumi.getter(name="azureADOnlyAuthentication")
def azure_ad_only_authentication(self) -> bool:
"""
Azure Active Directory only Authentication enabled.
"""
return pulumi.get(self, "azure_ad_only_authentication")
@property
@pulumi.getter
def login(self) -> str:
"""
Login name of the server administrator.
"""
return pulumi.get(self, "login")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sid(self) -> str:
"""
SID (object ID) of the server administrator.
"""
return pulumi.get(self, "sid")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
Tenant ID of the administrator.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetServerAzureADAdministratorResult(GetServerAzureADAdministratorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServerAzureADAdministratorResult(
administrator_type=self.administrator_type,
azure_ad_only_authentication=self.azure_ad_only_authentication,
login=self.login,
name=self.name,
sid=self.sid,
tenant_id=self.tenant_id,
type=self.type)
def get_server_azure_ad_administrator(administrator_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServerAzureADAdministratorResult:
"""
Use this data source to access information about an existing resource.
:param str administrator_name: The name of server active directory administrator.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['administratorName'] = administrator_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:sql/v20190601preview:getServerAzureADAdministrator', __args__, opts=opts, typ=GetServerAzureADAdministratorResult).value
return AwaitableGetServerAzureADAdministratorResult(
administrator_type=__ret__.administrator_type,
azure_ad_only_authentication=__ret__.azure_ad_only_authentication,
login=__ret__.login,
name=__ret__.name,
sid=__ret__.sid,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
a18fef765fc554ae4c9d605b3638485f2b1e8c69
|
cb4be2d145c529192cad597ebf6bba8aed0ec12e
|
/2014-x64/prefs/00_important/mec_shelf_loader/shelves/00_Trash/tool_box.py
|
83cb83b1ecfec00fff4898263675f516c7b8d0dc
|
[] |
no_license
|
mclavan/Work-Maya-Folder
|
63e791fdbd6f8ac1f4fda2d46015cd98df38825c
|
c56dbdb85a7b1a87ef6dd35296c56e0057254617
|
refs/heads/master
| 2020-05-20T07:21:15.891179
| 2014-10-17T14:28:45
| 2014-10-17T14:28:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,647
|
py
|
'''
Aaron Stoll
tool_box.py
Description:
Need:
padding
# renaming
primming
hierarchy
How to run:
import tool_box
reload(tool_box)
'''
import pymel.core as pm
print 'Tool Box Open'
def padding_tool():
'''
This tool creates a world pad on the selected joint system.
Select the root and run the function.
import tool_box
reload(tool_box)
tool_box.padding_tool()
'''
selected = pm.ls(selection=True)
# print 'Current Selected', selected
root_joint = selected[0]
#create empty group
pad = pm.group(empty=True)
# pm.group(empty=true)
# need to move the group to the joints
kenny = pm.pointConstraint(root_joint, pad)
# delete the constraint. kill kenny
pm.delete(kenny)
# freeeze transforms on the group
pm.makeIdentity(pad, apply=True, t=1, r=1, s=1, n=0)
# then parent
pm.parent(root_joint, pad)
# need a new name
pad_name = root_joint.replace('01_bind', '00_pad')
pad.rename(pad_name)
print 'Padding Group Created'
def priming_tool():
'''
This tool will create locally oriented controls
able to control the joints of a system.
import tool_box
reload(tool_box)
tool_box.priming_tool()
'''
# get selected
selected = pm.ls(selection=True)
print 'Joints Selected', selected
last_control = ''
for target_joint in selected:
# target_joint = selected[0]
# create a control
# normal set to x
# radius is 1
control_icon = pm.circle(normal=[1,0,0], radius=1.8)[0]
control_icon_name = target_joint.replace('_bind', '_icon')
control_icon = control_icon.rename(control_icon_name)
# group control (not empty)
local_pad = pm.group()
local_pad_name = target_joint.replace('_bind', '_local')
local_pad = local_pad.rename(local_pad_name)
print 'Control Icon', control_icon
print 'Pad created:', local_pad
# move group not control to target joint
kenny = pm.parentConstraint(target_joint, local_pad)
# kill kenny
pm.delete(kenny)
# need to orient constraint
pm.orientConstraint(control_icon, target_joint)
# parent controls last control must = nothing. ex. last_control = ''
if last_control != '':
pm.parent(local_pad, last_control)
last_control = control_icon
print 'Local Oriented Controls Created'
def renaming_tool():
'''
this tool will rename the joints in the joint chain.
Create a function called joint rename
select root joint, loop through all joint in joint chain
'ori_name_count_suffix'
ct_back_01_bind
how to run:
import book
reload(book)
tool_box.renaming_tool()
'''
# what am i working on
# get all joints in joint chain
#renaming joints- function will rename joints in joint chain
selected_joints = pm.ls(selection=True, dag=True)
print 'selected joints', selected_joints
# build new name
# ori
# name
# count
# suffix
ori = raw_input()
name = raw_input()
count = 1
suffix = 'bind'
for selected_joint in selected_joints:
new_name = '{0}_{1}_0{2}_{3}'.format(ori, name, count, suffix)
print 'Joint Name:', new_name
count = count + 1
selected_joint.rename(new_name)
new_name = '{0}_{1}_{2}_{3}'.format(ori, name, count-1, 'waste')
print 'Joint Name:', new_name
count = count + 1
selected_joint.rename(new_name)
print 'Joint Chain Renamed'
def hierarchy():
'''
This function creates a hierarchy for the given system
select the root joint and run this fucntion.
import tool_box
reload(tool_box)
tool_box.hierarchy():
'''
print 'Hierarchy Generation'
# user will select the root joint and the tool
# will apply the systems
root_joint = 'lt_middle_01_bind'
second_joint = 'lt_middle_02_bind'
third_joint = 'lt_middle_03_bind'
'''
# pad root joint
'''
# create an empty group
pad = pm.group(empty=True, name='lt_middle_00_pad')
print 'Root Pad Created:', pad
# move group to target joint
# point contraint group to root joint
# maintain offet off (Snapping)
kenny = pm.pointConstraint(root_joint, pad)
# kill kenny (delete the contraint)
pm.delete(kenny)
# freeze transforms
pm.makeIdentity(pad, apply=True, t=1, r=1, s=1, n=0)
# parent root joint to group
pm.parent(root_joint, pad)
# create local oriented control for each joint
# lt_middle_01_bind,lt_middle_02_bind,lt_middle_03_bind
# create control (circle)
root_icon = pm.circle(name='lt_middle_01_icon', normal=[1,0,0])[0]
# delete history
pm.delete(root_icon, ch=True)
# create a group (not empty)
# this will automatically parent the control under the group
root_local = pm.group(name='lt_middle_01_local')
# move group to target joint
kenny = pm.parentConstraint(root_joint, root_local)
# use parent contraint driver = joint, driven = control
# maintaint offset off (Snapping)
# kill kenny
pm.delete(kenny)
# orient contraint joint: driver- control, driven- joint.
pm.orientConstraint(root_icon, root_joint)
# second joint---------------------------------------
second_icon = pm.circle(name='lt_middle_02_icon',normal=[1,0,0])[0]
pm.delete(second_icon, ch=True)
second_local = pm.group(name='lt_middle_02_local')
kenny = pm.parentConstraint(second_joint, second_local)
pm.delete(kenny)
pm.orientConstraint(second_icon, second_joint)
#third Joint----------------------------------
third_icon = pm.circle(name='lt_middle_03_icon',normal=[1,0,0])[0]
pm.delete(third_icon, ch=True)
third_local = pm.group(name='lt_middle_03_local')
kenny = pm.parentConstraint(third_joint, third_local)
pm.delete(kenny)
pm.orientConstraint(third_icon, third_joint)
#parenting the icons
# child- parent ex. second icon, root icon
pm.parent(third_local, second_icon)
pm.parent(second_local, root_icon)
print'Controls Established'
|
[
"mclavan@gmail.com"
] |
mclavan@gmail.com
|
df486c199b4d2a366b87a245a6ce53f17114dbd2
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/451/usersdata/299/103697/submittedfiles/chuva.py
|
b06f047fa906f5a3f5fe8b2fcce848502a635410
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
# -*- coding: utf-8 -*-
piscina=[]
n=int(input(''))
for i in range(0,n,1):
piscina.append(int(input('')))
for j in range(0,n,1):
print(piscina[j])
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
cce4d23da8b4f3b32837659e90b9a0ddf0bed8b1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2242/60683/253412.py
|
2f040f3ab7a6e8a658e96759124645a212c8c803
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
nums1 = [int(x) for x in input().split(',')] # 左xia 右shang
nums2 = [int(x) for x in input().split(',')]
LD1 = [nums1[0], nums1[1]]
RU1 = [nums1[2], nums1[3]]
LU1 = [nums1[0], nums1[3]]
RD1 = [nums1[2], nums1[1]]
LD2 = [nums2[0], nums2[1]]
RU2 = [nums2[2], nums2[3]]
LU2 = [nums2[0], nums2[3]]
RD2 = [nums2[2], nums2[1]]
if LU1[0] < RD2[0] and LU1[1] > RD2[1] and RU1[0] > LD2[0] and RU1[1] > LD2[1]:
print(True)
elif LD1[0] < RU2[0] and LD1[1] < RU2[1] and RD1[0] > LU2[0] and RD1[1] < LU2[1]:
print(True)
else:
print(False)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e582688e7c9713c302008796c709b4b68e79b9d7
|
63b741505720be2eb792123b9a3fc92f6c46cd44
|
/website/members/management/commands/generate_member_invoices.py
|
61821383d5e38f6f96ecba23cc3c6fca1cb52023
|
[
"MIT"
] |
permissive
|
matuu/asoc_members
|
d62286958b1a20468f26ad13eeb44a0df50d18c8
|
bc6691a4595bcdf16f7b23cad43b671d7f682f9a
|
refs/heads/master
| 2020-04-26T16:25:04.457732
| 2019-06-06T18:20:35
| 2019-06-06T18:20:35
| 173,677,417
| 0
| 0
| null | 2019-03-04T05:08:21
| 2019-03-04T05:08:21
| null |
UTF-8
|
Python
| false
| false
| 6,520
|
py
|
import datetime
import os
from django.conf import settings
from django.core.mail import EmailMessage
from django.core.management.base import BaseCommand
from django.db.models import Max
from members import logic
from members.models import Quota, Person, Payment
from . import _afip, _gdrive
INVOICES_FROM = '2018-08-01 00:00+03'
GMTminus3 = datetime.timezone(datetime.timedelta(hours=-3))
# mail stuff
MAIL_SUBJECT = "Factura por pago de cuota(s) a la Asociación Civil Python Argentina"
MAIL_TEXT = """\
Hola!
Adjunta va la factura por el pago hecho en fecha {payment_date:%Y-%m-%d}.
¡Gracias! Saludos,
--
. Lalita
.
Asociación Civil Python Argentina
http://ac.python.org.ar/
(claro, este mail es automático, soy une bot, pero contestá el mail sin problemas que
le va a llegar al humane correspondiente)
"""
PDF_MIMETYPE = 'application/pdf'
def _send_mail(payment_date, recipient, attach_path):
text = MAIL_TEXT.format(payment_date=payment_date)
mail = EmailMessage(MAIL_SUBJECT, text, settings.EMAIL_FROM, [recipient])
filename = os.path.basename(attach_path)
with open(attach_path, "rb") as fh:
attach_content = fh.read()
mail.attach(filename, attach_content, PDF_MIMETYPE)
mail.send()
class Command(BaseCommand):
help = "Generate the missing invoices"
def add_arguments(self, parser):
parser.add_argument('limit', type=int, nargs='?', default=1)
def handle(self, *args, **options):
limit = options['limit']
records = []
# get the greatest invoice number used (once, will keep updated later)
_max_invoice_number_query = Payment.objects.aggregate(Max('invoice_number'))
max_invoice_number = _max_invoice_number_query['invoice_number__max']
print("Found max invoice number {}".format(max_invoice_number))
# get payments after we started automatically that still have no invoice generated
payments_per_invoice = {}
persons_per_invoice = {}
payments = (
Payment.objects.filter(timestamp__gte=INVOICES_FROM, invoice_ok=False)
.order_by('timestamp').all()
)
print("Found {} payments to process".format(len(payments)))
if len(payments) > limit:
payments = payments[:limit]
print(" truncating to {}".format(limit))
today = datetime.date.today()
for payment in payments:
print("Generating invoice for payment", payment)
record = {
'invoice_date': today,
}
records.append(record)
# if payment still doesn't have a number, add one to latest and save;
# in any case, use it
if not payment.invoice_number:
max_invoice_number += 1
payment.invoice_number = max_invoice_number
payment.invoice_spoint = settings.AFIP['selling_point']
payment.save()
assert payment.invoice_spoint == settings.AFIP['selling_point']
payments_per_invoice[payment.invoice_number] = payment
record['invoice'] = payment.invoice_number
# we bill one item, for the whole amount: "3 quotas for $300", instead of billing
# 3 x "1 quota for $100", which would be problematic if the paid amount is
# not exactly 300
record['amount'] = payment.amount
record['quantity'] = 1
# get all billing data from the person matching the member (if None, or multiple,
# still not supported!)
_persons = Person.objects.filter(membership__patron=payment.strategy.patron).all()
assert len(_persons) == 1, "multiple or no persons for the patron is not supported"
person = _persons[0]
print(" person found", person)
persons_per_invoice[payment.invoice_number] = person
record['dni'] = person.document_number
record['fullname'] = person.full_name
record['address'] = person.street_address
record['city'] = person.city
record['zip_code'] = person.zip_code
record['province'] = person.province
tstamp_argentina = payment.timestamp.astimezone(GMTminus3)
record['payment_comment'] = "Pago via {} ({:%Y-%m-%d %H:%M})".format(
payment.strategy.platform_name, tstamp_argentina)
# get quotas for the payment; we don't show the period in the description
# as there's a specific field for that
quotas = list(Quota.objects.filter(payment=payment).all())
assert quotas
if len(quotas) == 1:
description = "1 cuota social"
else:
description = "{} cuotas sociales".format(len(quotas))
record['description'] = description
from_quota = quotas[0]
from_day = datetime.date(from_quota.year, from_quota.month, 1)
to_quota = quotas[-1]
ny, nm = logic.increment_year_month(to_quota.year, to_quota.month)
to_day = datetime.date(ny, nm, 1) - datetime.timedelta(days=1)
record['service_date_from'] = from_day.strftime("%Y%m%d")
record['service_date_to'] = to_day.strftime("%Y%m%d")
print(" found {} quota(s) ({} - {})".format(
len(quotas), record['service_date_from'], record['service_date_to']))
results = _afip.generate_invoices(records)
# save the results for the generated ok invoices and send the proper mails
for invoice_number, result in sorted(results.items()):
print("Post-processing invoice {} at {}".format(
invoice_number, result.get('pdf_path')))
if not result['invoice_ok']:
print(" WARNING: invoice NOT authorized ok")
continue
payment = payments_per_invoice[invoice_number]
payment.invoice_ok = True
payment.save()
# upload the invoice to google drive
_gdrive.upload_invoice(result['pdf_path'], today)
print(" uploaded to gdrive OK")
# send the invoice by mail
person = persons_per_invoice[invoice_number]
_send_mail(payment.timestamp, person.email, result['pdf_path'])
print(" sent by mail OK")
# invoice uploaded to gdrive and sent ok, don't need it here anymore
os.remove(result['pdf_path'])
|
[
"facundo@taniquetil.com.ar"
] |
facundo@taniquetil.com.ar
|
8ae23a04e6eec0115c0ae6e38256d5509c74f998
|
b677894966f2ae2d0585a31f163a362e41a3eae0
|
/ns3/pybindgen-0.17.0.post57+nga6376f2/pybindgen/wrapper_registry.py
|
bdc14d6ac952d89e250a76ed9939be7aa9048499
|
[
"LGPL-2.1-only",
"Apache-2.0"
] |
permissive
|
cyliustack/clusim
|
667a9eef2e1ea8dad1511fd405f3191d150a04a8
|
cbedcf671ba19fded26e4776c0e068f81f068dfd
|
refs/heads/master
| 2022-10-06T20:14:43.052930
| 2022-10-01T19:42:19
| 2022-10-01T19:42:19
| 99,692,344
| 7
| 3
|
Apache-2.0
| 2018-07-04T10:09:24
| 2017-08-08T12:51:33
|
Python
|
UTF-8
|
Python
| false
| false
| 6,020
|
py
|
"""
The class that generates code to keep track of existing python
wrappers for a given root class.
"""
from pybindgen.typehandlers.base import NotSupportedError
class WrapperRegistry(object):
"""
Abstract base class for wrapepr registries.
"""
def __init__(self, base_name):
self.base_name = base_name
def generate_forward_declarations(self, code_sink, module):
raise NotImplementedError
def generate(self, code_sink, module, import_from_module):
raise NotImplementedError
def write_register_new_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
raise NotImplementedError
def write_lookup_wrapper(self, code_block, wrapper_type, wrapper_lvalue, object_rvalue):
raise NotImplementedError
def write_unregister_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
raise NotImplementedError
class NullWrapperRegistry(WrapperRegistry):
"""
A 'null' wrapper registry class. It produces no code, and does
not guarantee that more than one wrapper cannot be created for
each object. Use this class to disable wrapper registries entirely.
"""
def __init__(self, base_name):
super(NullWrapperRegistry, self).__init__(base_name)
def generate_forward_declarations(self, code_sink, module, import_from_module):
pass
def generate(self, code_sink, module):
pass
def generate_import(self, code_sink, module, import_from_module):
pass
def write_register_new_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
pass
def write_lookup_wrapper(self, code_block, wrapper_type, wrapper_lvalue, object_rvalue):
raise NotSupportedError
def write_unregister_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
pass
class StdMapWrapperRegistry(WrapperRegistry):
"""
A wrapper registry that uses std::map as implementation. Do not
use this if generating pure C wrapping code, else the code will
not compile.
"""
def __init__(self, base_name):
super(StdMapWrapperRegistry, self).__init__(base_name)
self.map_name = "%s_wrapper_registry" % base_name
def generate_forward_declarations(self, code_sink, module, import_from_module):
module.add_include("<map>")
module.add_include("<iostream>")
#code_sink.writeln("#include <map>")
#code_sink.writeln("#include <iostream>")
if import_from_module:
code_sink.writeln("extern std::map<void*, PyObject*> *_%s;" % self.map_name)
code_sink.writeln("#define %s (*_%s)" % (self.map_name, self.map_name))
else:
code_sink.writeln("extern std::map<void*, PyObject*> %s;" % self.map_name)
def generate(self, code_sink, module):
code_sink.writeln("std::map<void*, PyObject*> %s;" % self.map_name)
# register the map in the module namespace
module.after_init.write_code("PyModule_AddObject(m, (char *) \"_%s\", PyCObject_FromVoidPtr(&%s, NULL));"
% (self.map_name, self.map_name))
def generate_import(self, code_sink, code_block, module_pyobj_var):
code_sink.writeln("std::map<void*, PyObject*> *_%s;" % self.map_name)
code_block.write_code("PyObject *_cobj = PyObject_GetAttrString(%s, (char*) \"_%s\");"
% (module_pyobj_var, self.map_name))
code_block.write_code("if (_cobj == NULL) {\n"
" _%(MAP)s = NULL;\n"
" PyErr_Clear();\n"
"} else {\n"
" _%(MAP)s = reinterpret_cast< std::map<void*, PyObject*> *> (PyCObject_AsVoidPtr (_cobj));\n"
" Py_DECREF(_cobj);\n"
"}"
% dict(MAP=self.map_name))
def write_register_new_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
code_block.write_code("%s[(void *) %s] = (PyObject *) %s;" % (self.map_name, object_rvalue, wrapper_lvalue))
#code_block.write_code('std::cerr << "Register Wrapper: obj=" <<(void *) %s << ", wrapper=" << %s << std::endl;'
# % (object_rvalue, wrapper_lvalue))
def write_lookup_wrapper(self, code_block, wrapper_type, wrapper_lvalue, object_rvalue):
iterator = code_block.declare_variable("std::map<void*, PyObject*>::const_iterator", "wrapper_lookup_iter")
#code_block.write_code('std::cerr << "Lookup Wrapper: obj=" <<(void *) %s << " map size: " << %s.size() << std::endl;'
# % (object_rvalue, self.map_name))
code_block.write_code("%s = %s.find((void *) %s);" % (iterator, self.map_name, object_rvalue))
code_block.write_code("if (%(ITER)s == %(MAP)s.end()) {\n"
" %(WRAPPER)s = NULL;\n"
"} else {\n"
" %(WRAPPER)s = (%(TYPE)s *) %(ITER)s->second;\n"
" Py_INCREF(%(WRAPPER)s);\n"
"}\n"
% dict(ITER=iterator, MAP=self.map_name, WRAPPER=wrapper_lvalue, TYPE=wrapper_type))
def write_unregister_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
#code_block.write_code('std::cerr << "Erase Wrapper: obj=" <<(void *) %s << std::endl;'
# % (object_rvalue))
iterator = code_block.declare_variable("std::map<void*, PyObject*>::iterator", "wrapper_lookup_iter")
code_block.write_code("%(ITER)s = %(MAP)s.find((void *) %(OBJECT_VALUE)s);\n"
"if (%(ITER)s != %(MAP)s.end()) {\n"
" %(MAP)s.erase(%(ITER)s);\n"
"}\n"
% dict(ITER=iterator, MAP=self.map_name, WRAPPER=wrapper_lvalue, OBJECT_VALUE=object_rvalue))
|
[
"you@example.com"
] |
you@example.com
|
3e1f7559168e88b158942eb7a493718ec6108d87
|
4738129b25fceb5c8fdc83eebdd7621e41910230
|
/python-leetcode/sw_33.01.py
|
90bf5c1dc1e32fce0d32f105daf9235e85cbdda4
|
[
"MIT"
] |
permissive
|
MDGSF/JustCoding
|
43aa20773b9c8325e6ba632e9941d235e9e285aa
|
2faa46323df991a12014021b49d568387a882233
|
refs/heads/master
| 2023-07-21T19:07:15.899019
| 2023-07-09T07:29:59
| 2023-07-09T07:29:59
| 201,714,062
| 15
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
class Solution:
def verifyPostorder(self, postorder: List[int]) -> bool:
def recursion(startIdx, endIdx):
if startIdx >= endIdx: return True
curIdx = startIdx
while postorder[curIdx] < postorder[endIdx]:
curIdx += 1
rightStartIdx = curIdx
while postorder[curIdx] > postorder[endIdx]:
curIdx += 1
return curIdx == endIdx and \
recursion(startIdx, rightStartIdx - 1) and \
recursion(rightStartIdx, endIdx - 1)
return recursion(0, len(postorder) - 1)
|
[
"1342042894@qq.com"
] |
1342042894@qq.com
|
808f8f09efe32f810767f9476fbdf06034b93364
|
86ed811106eecf7aa3a15cf98537ef274b811ad7
|
/headmasters/migrations/0014_auto_20200118_1239.py
|
2dcabb7931d416505d44d26ddcea2f581734728f
|
[] |
no_license
|
SaifulAbir/Django-MIS
|
934ad39beff62f0e1cbe9377738b780122989662
|
d680a0a64211bc9cd7748364454c52b16398ea5c
|
refs/heads/master
| 2022-10-19T11:57:46.087577
| 2020-02-03T10:10:08
| 2020-02-03T10:10:08
| 271,542,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# Generated by Django 2.2.4 on 2020-01-18 12:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('headmasters', '0013_auto_20200115_0835'),
]
operations = [
migrations.AlterField(
model_name='headmasterprofile',
name='mobile',
field=models.CharField(max_length=20, unique=True),
),
]
|
[
"rashed@ishraak.com"
] |
rashed@ishraak.com
|
197156f1e6919a6fb85941c1f4078a1094cdc623
|
f3350367b97ba9d281be925ba520009a853fc0a3
|
/icarus/service/interface/icarus_server.py
|
7874dd52247cdd0b4554bee89d7f2cc229a393e6
|
[] |
no_license
|
f599gtb/PenguPilot
|
2f841e780661fde0399fd2ea11193896362f71ef
|
7ef485124e5f5b14c257fba915cd43aec8111f35
|
refs/heads/master
| 2021-01-22T09:16:47.066058
| 2014-08-15T14:46:32
| 2014-08-15T14:46:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,282
|
py
|
"""
___________________________________________________
| _____ _____ _ _ _ |
| | __ \ | __ (_) | | | |
| | |__) |__ _ __ __ _ _ _| |__) || | ___ | |_ |
| | ___/ _ \ '_ \ / _` | | | | ___/ | |/ _ \| __| |
| | | | __/ | | | (_| | |_| | | | | | (_) | |_ |
| |_| \___|_| |_|\__, |\__,_|_| |_|_|\___/ \__| |
| __/ | |
| GNU/Linux based |___/ Multi-Rotor UAV Autopilot |
|___________________________________________________|
ICARUS Server
responsible for receiving, delegating and replying commands
Copyright (C) 2014 Tobias Simon, Ilmenau University of Technology
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. """
from threading import Thread
from icarus_pb2 import IcarusReq, IcarusRep, OK, E_SYNTAX, E_SEMANTIC
class ICARUS_Exception(Exception):
def __init__(self, msg):
self.msg = msg
class ICARUS_Server(Thread):
'''
ICARUS server
responsible for receiving, delegating and IcarusReping incoming commands
'''
def __init__(self, socket, delegate):
'''
socket: a zmq socket
delegate: object providing handle(IcarusReq) routine, raising ICARUS_Exception
'''
Thread.__init__(self)
self._socket = socket
self._delegate = delegate
self.daemon = True
def run(self):
'''
receives, parses and executes commands using the submited delegate in a loop
'''
while True:
# receive message via SCL:
try:
data = self._socket.recv()
except:
# it would not make sense to send an error message here,
# as something seems to be wrong with the socket
print 'could not read SCL message'
continue
# parse message into protobuf structure:
req = IcarusReq()
try:
req.ParseFromString(data)
except:
# syntactic error in ParseFromString
self.send_err(E_SYNTAX, 'could not parse protobuf payload')
continue
# handle parsed protobuf message and send IcarusRep:
try:
self._delegate.handle(req)
self.send_ok()
except ICARUS_Exception, ex:
# semantic error:
self.send_err(E_SEMANTIC, ex.msg)
def send_err(self, code, msg):
'''
IcarusRep with error code and message
'''
rep = IcarusRep()
rep.status = code
rep.message = msg
self._send_rep(rep)
def send_ok(self):
'''
IcarusRep with OK message
'''
rep = IcarusRep()
rep.status = OK
self._send_rep(rep)
def _send_rep(self, rep):
'''
serialize and send message via _socket
'''
self._socket.send(rep.SerializeToString())
|
[
"tobias.simon@tu-ilmenau.de"
] |
tobias.simon@tu-ilmenau.de
|
a1380cf0b07a79d6ea3eec8e515d0dd844e61560
|
f66021ddd1a79f2d43c5b00a56e15ce13a9abfb2
|
/jbkxnoltmn_dev_1766/urls.py
|
8c33ce9bcb4fd1b67b9e4e28b751f3b7120e5cf5
|
[] |
no_license
|
crowdbotics-apps/jbkxnoltmn-dev-1766
|
28db8f66d7f012f7273855a3e7c25becb06a5321
|
e43e4d2761cae06d51f53af0102edfc868618fbd
|
refs/heads/master
| 2022-04-06T05:51:42.505572
| 2020-03-01T19:42:42
| 2020-03-01T19:42:42
| 244,212,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
"""jbkxnoltmn_dev_1766 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "jbkxnoltmn"
admin.site.site_title = "jbkxnoltmn Admin Portal"
admin.site.index_title = "jbkxnoltmn Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="jbkxnoltmn API",
default_version="v1",
description="API documentation for jbkxnoltmn App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
35a44548cde679b353b2ecfd460fbadd70c4f068
|
d9b286e3ed4038651f8a93ddad8fc7369b8e22ad
|
/reviewboard/webapi/tests/test_screenshot_comment.py
|
5362bcbc71c12d3eef9cb5242c2424f6ee47f5fc
|
[
"MIT"
] |
permissive
|
harrifeng/reviewboard
|
6456b1ba2fa953bdc83cb16681731bcef10430ee
|
f560679be34ab547ef0a4fbca959e244d6bf5a75
|
refs/heads/master
| 2016-10-16T14:05:54.102611
| 2013-09-06T21:23:30
| 2013-09-11T09:52:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,402
|
py
|
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.reviews.models import ScreenshotComment
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import screenshot_comment_list_mimetype
from reviewboard.webapi.tests.urls import get_screenshot_comment_list_url
class ScreenshotCommentResourceTests(BaseWebAPITestCase):
"""Testing the ScreenshotCommentResource APIs."""
fixtures = ['test_users', 'test_scmtools']
def test_get_screenshot_comments(self):
"""Testing the GET review-requests/<id>/screenshots/<id>/comments/ API"""
comment_text = "This is a test comment."
x, y, w, h = (2, 2, 10, 10)
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
self._postNewScreenshotComment(review_request, review.id, screenshot,
comment_text, x, y, w, h)
rsp = self.apiGet(
get_screenshot_comment_list_url(review),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
comments = ScreenshotComment.objects.filter(screenshot=screenshot)
rsp_comments = rsp['screenshot_comments']
self.assertEqual(len(rsp_comments), comments.count())
for i in range(0, len(comments)):
self.assertEqual(rsp_comments[i]['text'], comments[i].text)
self.assertEqual(rsp_comments[i]['x'], comments[i].x)
self.assertEqual(rsp_comments[i]['y'], comments[i].y)
self.assertEqual(rsp_comments[i]['w'], comments[i].w)
self.assertEqual(rsp_comments[i]['h'], comments[i].h)
@add_fixtures(['test_site'])
def test_get_screenshot_comments_with_site(self):
"""Testing the GET review-requests/<id>/screenshots/<id>/comments/ API with a local site"""
comment_text = 'This is a test comment.'
x, y, w, h = (2, 2, 10, 10)
user = self._login_user(local_site=True)
review_request = self.create_review_request(with_local_site=True,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
self._postNewScreenshotComment(review_request, review.id, screenshot,
comment_text, x, y, w, h)
rsp = self.apiGet(
get_screenshot_comment_list_url(review, self.local_site_name),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
comments = ScreenshotComment.objects.filter(screenshot=screenshot)
rsp_comments = rsp['screenshot_comments']
self.assertEqual(len(rsp_comments), comments.count())
for i in range(0, len(comments)):
self.assertEqual(rsp_comments[i]['text'], comments[i].text)
self.assertEqual(rsp_comments[i]['x'], comments[i].x)
self.assertEqual(rsp_comments[i]['y'], comments[i].y)
self.assertEqual(rsp_comments[i]['w'], comments[i].w)
self.assertEqual(rsp_comments[i]['h'], comments[i].h)
@add_fixtures(['test_site'])
def test_get_screenshot_comments_with_site_no_access(self):
"""Testing the GET review-requests/<id>/screenshots/<id>/comments/ API with a local site and Permission Denied error"""
comment_text = 'This is a test comment.'
x, y, w, h = (2, 2, 10, 10)
user = self._login_user(local_site=True)
review_request = self.create_review_request(with_local_site=True,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
self._postNewScreenshotComment(review_request, review.id, screenshot,
comment_text, x, y, w, h)
self._login_user()
rsp = self.apiGet(
get_screenshot_comment_list_url(review, self.local_site_name),
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
|
[
"chipx86@chipx86.com"
] |
chipx86@chipx86.com
|
ff97443bc42e9a21c435f0a5d1bea777d082d66d
|
1aa12d5735fb239c8be8f8a44881eb05908d6e4f
|
/deunionreserve/settings.py
|
a794f4a7c468667a858aed43da6fc8aa1c20a2f0
|
[] |
no_license
|
Justprince234/bank-api
|
218f14d15e3e76f5f0b1c5c1142f7d5417cc6e03
|
a2fa7d478c6ab88fc2369caaacbc407b5411da0d
|
refs/heads/master
| 2023-06-23T11:00:51.124883
| 2021-07-13T13:04:36
| 2021-07-13T13:04:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,631
|
py
|
"""
Django settings for deunionreserve project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-e&b^1n3d)hsb%ax3ayv%^d5m=)*2q5q5us+duf2+0nh@u373(5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Third party Apps
'rest_framework',
'knox',
'django_rest_passwordreset',
'corsheaders',
'rest_framework_simplejwt',
'drf_yasg',
#My Apps
'accounts.apps.AccountsConfig',
'customers.apps.CustomersConfig',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
SITE_ID = 1
AUTH_USER_MODEL = 'accounts.User'
ROOT_URLCONF = 'deunionreserve.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'deunionreserve.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 597
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_ROOT = os.path.join(BASE_DIR, 'photos/')
MEDIA_URL = '/photos/photos/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
'knox.auth.TokenAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
]
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Heroku settings.
import django_heroku
django_heroku.settings(locals())
|
[
"princewilljackson@ymail.com"
] |
princewilljackson@ymail.com
|
ea7d9fa0c40854eead9518afad3701d2d8eeb5b2
|
32b5beff459a4e130c3b231e32d717bed4178a1c
|
/src/bench/bench_python/plot_benchmark.py
|
4d0b590b602e3ac198077fd5613b194a6a9db094
|
[
"MIT"
] |
permissive
|
constantinpape/z5
|
9deb76fe52a1335dac7ef49e85c40cf7efbb8887
|
bd5cb52782a9cabf534ea77ba0823f207c8eccb8
|
refs/heads/master
| 2023-07-06T15:58:13.279554
| 2023-07-04T07:26:21
| 2023-07-04T07:26:21
| 101,700,504
| 94
| 28
|
MIT
| 2023-07-04T07:26:23
| 2017-08-29T00:31:10
|
C++
|
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_theme(style='whitegrid')
def plot_benchmark(path, chunk_key='25_125_125', skip=[]):
with open(path) as f:
results = json.load(f)
data = []
for format_ in ('h5', 'n5', 'zarr'):
read = results[format_]['read']
write = results[format_]['write']
sizes = results[format_]['sizes']
compressions = list(read.keys())
for compression in compressions:
if compression in skip:
continue
t_read = np.min(read[compression][chunk_key])
t_write = np.min(write[compression][chunk_key])
size = sizes[compression][chunk_key]
data.append([format_, compression, t_read, t_write, size])
data = pd.DataFrame(data, columns=['format', 'compression', 't-read [s]', 't-write [s]', 'size [MB]'])
fig, axes = plt.subplots(3)
sns.barplot(data=data, ax=axes[0], x="format", y="t-read [s]", hue="compression")
sns.barplot(data=data, ax=axes[1], x="format", y="t-write [s]", hue="compression")
sns.barplot(data=data, ax=axes[2], x="format", y="size [MB]", hue="compression")
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('path')
parser.add_argument('--skip', nargs="+", default=[])
args = parser.parse_args()
plot_benchmark(args.path, skip=args.skip)
|
[
"c.pape@gmx.net"
] |
c.pape@gmx.net
|
73e0fa7025c6612d5ef02836a41198b625ae0f24
|
6c2a41b67027f95cc9525a813455b07cdf49c5d7
|
/projects/models.py
|
7278428ef0ce110f669ab440107ddbcf21ef929c
|
[
"MIT"
] |
permissive
|
wilbrone/Awards
|
3c1ce7ec3ca3003ff9787529a903c55c02ea42ae
|
c4c87ca5d700a12dc8d23e2d6092ac59adada4af
|
refs/heads/master
| 2022-12-21T22:12:39.474327
| 2020-02-23T10:46:27
| 2020-02-23T10:46:27
| 240,668,985
| 0
| 0
|
MIT
| 2022-09-23T22:36:18
| 2020-02-15T08:19:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,092
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
bio = models.TextField(max_length = 500)
profile_pic = models.ImageField(upload_to='profile_pics', blank=True, default='default.png')
location = models.CharField(max_length = 100)
def __str__(self):
return f'{self.user.username} Profile'
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
def create_profile(self):
self.save()
def delete_profile(self):
self.delete()
def search_profile(self,username):
users = User.objects.filter(username=username)
return users
class Project(models.Model):
title = models.CharField(max_length = 100)
image = models.ImageField(upload_to='project_pics/',null=True,blank=True,default='default.png')
image_url = models.CharField(max_length=250, null= True)
description = models.TextField(max_length = 500)
posted = models.DateTimeField(auto_now_add=True, null=True)
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='projects', null=True)
def __str__(self):
return f'{self.user} Project'
# def get_absolute_url(self):
# return f"/single_post/{self.id}"
def save_project(self):
self.save()
def delete_project(self,id):
self.delete(id=id)
class Rating(models.Model):
rating = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10'),
)
design = models.IntegerField(choices=rating, default=0, blank=True)
usability = models.IntegerField(choices=rating, blank=True)
content = models.IntegerField(choices=rating, blank=True)
score = models.FloatField(default=0, blank=True)
design_average = models.FloatField(default=0, blank=True)
usability_average = models.FloatField(default=0, blank=True)
content_average = models.FloatField(default=0, blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, related_name='rater')
post = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='ratings', null=True)
class Meta:
get_latest_by='score'
@classmethod
def get_leading_project(cls):
post=cls.objects.latest()
return post
def save_rating(self):
self.save()
@classmethod
def get_ratings(cls, id):
ratings = Rating.objects.filter(post_id=id).all()
return ratings
def __str__(self):
return f'{self.post} Rating'
|
[
"wilbroneokoth@gmail.com"
] |
wilbroneokoth@gmail.com
|
3dfc9792128a7b662720ee5edda3d0b79bbc1671
|
a2d83ad6126403703e85ecd3e627ef402e1fb6cf
|
/setup.py
|
938f01e6cd380f0b81fc8141100e9881357bc040
|
[] |
no_license
|
argriffing/fiedlerology
|
5e0042f6e4b77be40208a29910978ee1e9a9846a
|
255cf1889fd9ac8619891ef8a10de813390247bd
|
refs/heads/master
| 2020-05-17T05:24:33.832399
| 2014-05-10T01:39:36
| 2014-05-10T01:39:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
#!/usr/bin/env python
"""Implementations of algorithms related to the work of Miroslav Fiedler.
"""
DOCLINES = __doc__.split('\n')
# This setup script is written according to
# http://docs.python.org/2/distutils/setupscript.html
#
# It is meant to be installed through github using pip.
from distutils.core import setup
setup(
name='fiedlerology',
version='0.1',
description=DOCLINES[0],
author='alex',
url='https://github.com/argriffing/fiedlerology/',
download_url='https://github.com/argriffing/fiedlerology/',
packages=['fiedlerology'],
test_suite='nose.collector',
package_data={'fiedlerology' : ['tests/test_*.py']},
)
|
[
"argriffi@ncsu.edu"
] |
argriffi@ncsu.edu
|
ec5fe9f24592abc674e2160d3674ec1936d8432f
|
d007f8d6c318c3d66e76d99715edf324c9fe0294
|
/recipes/luci_config.py
|
a0181931f1a2c7a0f24a5d32e0f34026fc861b60
|
[
"BSD-3-Clause"
] |
permissive
|
nirvus/infra-recipes
|
c0f9e5facca7ad1907d639eb8819a59dc8f3584e
|
a5dc52f47405dcce56fb43a3e8ac80a2fbd56717
|
refs/heads/master
| 2020-04-07T23:15:01.809232
| 2018-11-06T02:30:12
| 2018-11-06T17:37:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,266
|
py
|
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe for testing LUCI configs."""
import difflib
from recipe_engine.recipe_api import Property
DEPS = [
'infra/jiri',
'recipe_engine/buildbucket',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
]
PROPERTIES = {
'config_project':
Property(
kind=str,
help='Jiri remote manifest project containing the luci configs',
default=None),
'manifest':
Property(kind=str, help='Jiri manifest to use'),
'remote':
Property(kind=str, help='Remote manifest repository'),
}
_PREBUILT_PROJECT_REMOTE = 'https://fuchsia.googlesource.com/infra/prebuilt'
def RunSteps(api, config_project, manifest, remote):
api.jiri.ensure_jiri()
with api.context(infra_steps=True):
api.jiri.checkout(
manifest=manifest,
remote=remote,
build_input=api.buildbucket.build.input)
# Find the required jiri projects.
config_jiri_project, prebuilt_jiri_project = None, None
jiri_projects = api.jiri.project()
for jiri_project in jiri_projects.json.output:
if jiri_project['name'] == config_project:
config_jiri_project = jiri_project
if jiri_project['remote'] == _PREBUILT_PROJECT_REMOTE:
prebuilt_jiri_project = jiri_project
assert config_jiri_project, 'Failed to find project %s' % config_project
assert prebuilt_jiri_project, (
'Failed to find project with remote %s' % _PREBUILT_PROJECT_REMOTE)
# Needs to be kept in sync with //infra/prebuilt/tools/cipd.ensure.
flatten_buildbucket_path = api.path['start_dir'].join(
prebuilt_jiri_project['path'], 'tools', 'flatten_buildbucket_cfg',
'flatten_buildbucket_cfg')
services_path = api.path['start_dir'].join(config_jiri_project['path'],
'config', 'services')
buildbucket_config_paths = api.file.glob_paths(
name='glob buildbucket configs',
source=services_path,
pattern='*buildbucket*.cfg')
if not buildbucket_config_paths:
raise api.step.StepFailure(
'Found no buildbucket configs under %s' % services_path)
for buildbucket_config_path in buildbucket_config_paths:
# Flatten the existing config. Fails if it is not a valid config proto.
basename = api.path.basename(buildbucket_config_path)
flatten_step = api.step(
'flatten %s' % basename,
[flatten_buildbucket_path, buildbucket_config_path],
stdout=api.raw_io.output_text())
flattened_config = flatten_step.stdout
# Compare the flattened to the copy in generated/ sub-dir. This enforces
# that the generated copy stays up to date.
expected_config = api.file.read_text(
'read generated/%s' % basename, services_path.join(
'generated', basename))
with api.step.nest('diff %s' % basename):
expected_lines = expected_config.split('\n')
flattened_lines = flattened_config.split('\n')
diff = list(
difflib.context_diff(
expected_lines,
flattened_lines,
fromfile='generated/%s' % basename,
tofile='%s.flattened' % basename))
if diff:
api.step.active_result.presentation.logs['diff'] = diff
api.step.active_result.presentation.logs['expected'] = expected_lines
api.step.active_result.presentation.logs['flattened'] = flattened_lines
raise api.step.StepFailure('Found diff')
api.step.active_result.presentation.step_text = 'no diff'
def GenTests(api):
properties = api.properties(
config_project='fuchsia-infra/config',
manifest='manifest/infra',
remote='https://fuchsia.googlesource.com/manifest',
)
jiri_projects = api.step_data(
'jiri project',
api.jiri.project([{
'name': 'fuchsia-infra/config',
'path': 'config',
'remote': 'https://fuchsia.googlesource.com/infra/config'
}, {
'name': 'prebuilt',
'path': 'fuchsia-infra/prebuilt',
'remote': 'https://fuchsia.googlesource.com/infra/prebuilt'
}]))
glob_step_data = api.step_data(
'glob buildbucket configs',
api.file.glob_paths(names=('cr-buildbucket.cfg',)))
yield (api.test('no_diff') + properties + jiri_projects + glob_step_data +
api.step_data(
'flatten cr-buildbucket.cfg',
stdout=api.raw_io.output_text('foo\nbar\n')) + api.step_data(
'read generated/cr-buildbucket.cfg',
api.file.read_text(text_content='foo\nbar\n')))
yield (api.test('diff') + properties + jiri_projects + glob_step_data +
api.step_data(
'flatten cr-buildbucket.cfg',
stdout=api.raw_io.output_text('foo\nbaz\n')) + api.step_data(
'read generated/cr-buildbucket.cfg',
api.file.read_text(text_content='foo\nbar\n')))
yield (api.test('no_buildbucket_configs') + properties + jiri_projects +
api.step_data('glob buildbucket configs', api.file.glob_paths()))
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
fcfff712894869114932d6c0f9818c7e944cbc3f
|
7966fa31437cc8a539621a5a0642ce24c1c9de50
|
/PycharmProjects/leetcode/knapsack/474一和零.py
|
fe2c8b5eb5ad4dc876881797206d48967091368e
|
[] |
no_license
|
crystal30/DataStructure
|
4f938508f4c60af9c5f8ec5520d5acedbe2dc90e
|
c55b0cfd2967a2221c27ed738e8de15034775945
|
refs/heads/master
| 2021-06-25T17:49:03.048853
| 2021-01-22T00:37:04
| 2021-01-22T00:37:04
| 192,374,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,411
|
py
|
# coding:utf-8
class Solution:
def __init__(self):
self.memo = None
def findMaxForm(self, strs, m, n):
len_strs = len(strs)
if len_strs == 0:
return 0
self.memo = [[[-1 for _ in range(n+1)] for _ in range(m+1)] for _ in range(len_strs)]
return max(0, self.find_max_form(strs, len(strs)-1, m, n))
def find_max_form(self, strs, index, m, n):
if m == 0 and n == 0:
return 0
num_0 = 0
num_1 = 0
for e in strs[index]:
if e == '0':
num_0 += 1
else:
num_1 += 1
if index == 0:
if num_0 <= m and num_1 <= n:
return 1
else:
return 0
if self.memo[index][m][n] != -1:
return self.memo[index][m][n]
if m - num_0 >= 0 and n - num_1 >= 0:
res = max(self.find_max_form(strs, index-1, m, n),
1+self.find_max_form(strs, index-1, m-num_0, n-num_1))
else:
res = self.find_max_form(strs, index-1, m, n)
self.memo[index][m][n] = res
return res
class Solution1:
def findMaxForm(self, strs, m, n):
len_strs = len(strs)
memo = [[[0 for _ in range(n+1)] for _ in range(m+1)] for _ in range(len_strs)]
zero_num, one_num = self.calculate_zero_one(strs[0])
for j in range(m+1):
for k in range(n+1):
if zero_num <= j and one_num <= k:
memo[0][j][k] = 1
for i in range(1, len_strs):
zero_num, one_num = self.calculate_zero_one(strs[i])
for j in range(m+1):
for k in range(n+1):
if zero_num <= j and one_num <= k:
# print(i, k, j)
memo[i][j][k] = max(memo[i-1][j][k], 1+memo[i-1][j-zero_num][k-one_num])
else:
memo[i][j][k] = memo[i-1][j][k]
return memo[len_strs-1][m][n]
def calculate_zero_one(self, e):
zero_num = 0
one_num = 0
for sub_e in e:
if sub_e == '0':
zero_num += 1
else:
one_num += 1
return zero_num, one_num
if __name__ == "__main__":
so = Solution1()
strs = ["10"]
m = 5
n = 3
re = so.findMaxForm(strs, m, n)
print(re)
|
[
"zhao_crystal@126.com"
] |
zhao_crystal@126.com
|
88eb693481debe9f05c43f4a10cdaf4512efe676
|
7e8060ad317fe7d87fcbb1c756461cea8a067d08
|
/stepik/python67/03_07_03.py
|
0018a5ff2748b096d66fa7b893cd682c0b0bcb5b
|
[
"Unlicense"
] |
permissive
|
ornichola/learning-new
|
f9b8f2d210f36f2c05a14e7cca56d32495e60778
|
abb6e74f9f8794a0d3b897618207d4be0b0ff3e1
|
refs/heads/master
| 2023-05-25T19:18:42.931439
| 2023-04-17T17:22:57
| 2023-04-17T17:22:57
| 115,291,570
| 4
| 25
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
# [STEPIK]
# Программирование на Python https://stepik.org/67
# 03_07_03 Задачи по материалам недели
'''
Простейшая система проверки орфографии основана на использовании списка известных слов. Каждое слово в проверяемом тексте ищется в этом списке и, если такое слово не найдено, оно помечается, как ошибочное.
Напишем подобную систему.
Через стандартный ввод подаётся следующая структура: первой строкой — количество dd записей в списке известных слов, после передаётся dd строк с одним словарным словом на строку, затем — количество ll строк текста, после чего — ll строк текста.
Напишите программу, которая выводит слова из текста, которые не встречаются в словаре. Регистр слов не учитывается. Порядок вывода слов произвольный. Слова, не встречающиеся в словаре, не должны повторяться в выводе программы.
Sample Input:
3
a
bb
cCc
2
a bb aab aba ccc
c bb aaa
Sample Output:
aab
aba
c
aaa
'''
d = int(input())
#words = []
words = set()
#unknow_words = []
unknow_words = set()
for _ in range(d):
#words.append(input().lower())
words.add(input().lower())
l = int(input())
for _ in range(l):
string = input().lower().split()
for i in range(len(string)):
if string[i] not in words:
#unknow_words.append(string[i])
unknow_words.add(string[i])
for word in unknow_words:
print(word)
|
[
"1502708+ornichola@users.noreply.github.com"
] |
1502708+ornichola@users.noreply.github.com
|
af42b85e3f471e7e97d28b7be17f4a38e423e2dd
|
37adb80efb9b75e507440af38e116207b65039ec
|
/backend/green_rice_27742/wsgi.py
|
6b1b876222d02962b56137e0681369fb77328ada
|
[] |
no_license
|
crowdbotics-apps/green-rice-27742
|
c39ba932d25992eef96da74c57bcf5a479014727
|
f9c8cfc358c287ac7d32c983c523b463e5f5a1da
|
refs/heads/master
| 2023-05-25T18:47:07.307988
| 2021-06-04T23:47:41
| 2021-06-04T23:47:41
| 373,982,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
WSGI config for green_rice_27742 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'green_rice_27742.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
ddba473a2d93e43e7b7d79629b3f73dc5b86a764
|
4f7962d02254ab6e5cf692648c933394ff41c79d
|
/component_sdk/python/tests/google/dataflow/test__launch_python.py
|
246816dd10677750ebd91c1fc5a39a72f081beb4
|
[
"Apache-2.0"
] |
permissive
|
yebrahim/pipelines
|
5414131f5ab176aa7607114e3a0d23db73f5c8c8
|
77df6c2438f4cf6b81c97ecf4dac9fdbac0e3132
|
refs/heads/master
| 2020-04-08T13:23:50.628537
| 2019-03-01T18:35:47
| 2019-03-01T18:35:47
| 159,389,183
| 1
| 0
|
Apache-2.0
| 2018-11-27T19:37:57
| 2018-11-27T19:37:56
| null |
UTF-8
|
Python
| false
| false
| 2,989
|
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
import os
from kfp_component.google.dataflow import launch_python
MODULE = 'kfp_component.google.dataflow._launch_python'
@mock.patch('kfp_component.google.dataflow._common_ops.display')
@mock.patch(MODULE + '.stage_file')
@mock.patch(MODULE + '.KfpExecutionContext')
@mock.patch(MODULE + '.DataflowClient')
@mock.patch(MODULE + '.Process')
@mock.patch(MODULE + '.subprocess')
class LaunchPythonTest(unittest.TestCase):
def test_launch_python_succeed(self, mock_subprocess, mock_process,
mock_client, mock_context, mock_stage_file, mock_display):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_client().list_aggregated_jobs.return_value = {
'jobs': []
}
mock_process().read_lines.return_value = [
b'https://console.cloud.google.com/dataflow/locations/us-central1/jobs/job-1?project=project-1'
]
expected_job = {
'currentState': 'JOB_STATE_DONE'
}
mock_client().get_job.return_value = expected_job
result = launch_python('/tmp/test.py', 'project-1')
self.assertEqual(expected_job, result)
def test_launch_python_retry_succeed(self, mock_subprocess, mock_process,
mock_client, mock_context, mock_stage_file, mock_display):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_client().list_aggregated_jobs.return_value = {
'jobs': [{
'id': 'job-1',
'name': 'test_job-ctx-1'
}]
}
expected_job = {
'currentState': 'JOB_STATE_DONE'
}
mock_client().get_job.return_value = expected_job
result = launch_python('/tmp/test.py', 'project-1', job_name_prefix='test-job')
self.assertEqual(expected_job, result)
mock_process.assert_not_called()
def test_launch_python_no_job_created(self, mock_subprocess, mock_process,
mock_client, mock_context, mock_stage_file, mock_display):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_client().list_aggregated_jobs.return_value = {
'jobs': []
}
mock_process().read_lines.return_value = [
b'no job id',
b'no job id'
]
result = launch_python('/tmp/test.py', 'project-1')
self.assertEqual(None, result)
|
[
"k8s-ci-robot@users.noreply.github.com"
] |
k8s-ci-robot@users.noreply.github.com
|
bdafe21d3f847430f8a82c37360c237512b69a8c
|
83cf642504313b6ef6527dda52158a6698c24efe
|
/scripts/addons/fd_scripting_tools/autocompletion/suggestions/dynamic/_bpy_fake/__private__/blenddatalinestyles.py
|
da6be1867caf90383dcdbf8a7278bc00bf432072
|
[] |
no_license
|
PyrokinesisStudio/Fluid-Designer-Scripts
|
a4c40b871e8d27b0d76a8025c804d5a41d09128f
|
23f6fca7123df545f0c91bf4617f4de7d9c12e6b
|
refs/heads/master
| 2021-06-07T15:11:27.144473
| 2016-11-08T03:02:37
| 2016-11-08T03:02:37
| 113,630,627
| 1
| 0
| null | 2017-12-09T00:55:58
| 2017-12-09T00:55:58
| null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
from . struct import Struct
from . freestylelinestyle import FreestyleLineStyle
from . bpy_struct import bpy_struct
import mathutils
class BlendDataLineStyles(bpy_struct):
@property
def rna_type(self):
'''(Struct) RNA type definition'''
return Struct()
@property
def is_updated(self):
'''(Boolean)'''
return bool()
def tag(self, value):
'''tag
Parameter:
value: (Boolean)'''
return
def new(self, name):
'''Add a new line style instance to the main database
Parameter:
name: (String) New name for the datablock
Returns:
linestyle: (FreestyleLineStyle) New line style datablock'''
return FreestyleLineStyle()
def remove(self, linestyle):
'''Remove a line style instance from the current blendfile
Parameter:
linestyle: (FreestyleLineStyle) Line style to remove'''
return
def get(key): return FreestyleLineStyle()
def __getitem__(key): return FreestyleLineStyle()
def __iter__(key): yield FreestyleLineStyle()
|
[
"dev.andrewpeel@gmail.com"
] |
dev.andrewpeel@gmail.com
|
da52ce6e0f2e6dbc30ed36da552d9228c8915f07
|
d635abe4bcdb62818c12f00fa3664d4147ecbf8d
|
/bert/train/utils/stateload.py
|
dcae7ee45b2a1639bd0b4d8997cb5c6cdcb920df
|
[] |
no_license
|
mbilab/protein_understanding
|
e82badd8657a9f115d4a617112e00d9d69f19471
|
56a3f17f0557c57d3c25786d128d608629aecd69
|
refs/heads/master
| 2020-07-08T14:38:27.318254
| 2019-12-18T09:46:02
| 2019-12-18T09:46:02
| 203,119,288
| 0
| 0
| null | 2019-08-19T07:18:15
| 2019-08-19T07:18:14
| null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
def stateLoading(model, pretrained_path):
# Since issue: KeyError: 'unexpected key ...'
# See https://discuss.pytorch.org/t/solved-keyerror-unexpected-key-module-encoder-embedding-weight-in-state-dict/1686/3
# Build a new dict that contains no prefix 'module.', the length of the prefix is 7
# original saved file with DataParallel
from collections import OrderedDict
import torch
state_dict = torch.load(pretrained_path, map_location='cpu')['state_dict']
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[6:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
return model
|
[
"ztex030640417@gmail.com"
] |
ztex030640417@gmail.com
|
439c1fd1483a411f8f2aa19a035764258d8b0f1f
|
0a8a4bfd6b4ffcfb7c99119c83cb3abe17c4a8f6
|
/examples/frontend_example.py
|
14824c9662fd5ca82359b8f6b574ba602715a9d3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
google/openhtf
|
58c06e07508f9bb2079070a5ac03898fc68c1778
|
3a9a24987b2b34782fca55a8df8d007167dbb19a
|
refs/heads/master
| 2023-08-23T12:12:54.917649
| 2023-07-27T01:51:17
| 2023-07-27T01:51:43
| 41,519,483
| 471
| 253
|
Apache-2.0
| 2023-09-12T00:47:42
| 2015-08-28T01:14:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
# Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple OpenHTF test which launches the web GUI client."""
import openhtf as htf
from openhtf.output.servers import station_server
from openhtf.output.web_gui import web_launcher
from openhtf.plugs import user_input
from openhtf.util import configuration
CONF = configuration.CONF
@htf.measures(htf.Measurement('hello_world_measurement'))
def hello_world(test):
test.logger.info('Hello World!')
test.measurements.hello_world_measurement = 'Hello Again!'
def main():
CONF.load(station_server_port='4444')
with station_server.StationServer() as server:
web_launcher.launch('http://localhost:4444')
for _ in range(5):
test = htf.Test(hello_world)
test.add_output_callbacks(server.publish_final_state)
test.execute(test_start=user_input.prompt_for_test_start())
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
google.noreply@github.com
|
9f2a2c5e001af876451e73bc546f946af0eb6ed8
|
226e962457f3f9d271bdc0ec7cb999d45dd2ab92
|
/plugins/admin.py
|
2cd117d0f1eb608f3a981430e86f4d6694ebbb58
|
[
"Apache-2.0"
] |
permissive
|
Web5design/saxo
|
5109ed2a9c48b2c6e4a2afd6edfe2e28b8279b0f
|
1cf079d13be63557626ebf8163c65c16bd4856c8
|
refs/heads/master
| 2021-01-20T16:30:36.401591
| 2014-01-26T22:23:28
| 2014-01-26T22:23:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
# http://inamidst.com/saxo/
# Created by Sean B. Palmer
import saxo
@saxo.command("join", owner=True)
def join(irc):
if irc.arg.startswith("#"):
irc.client("join", irc.arg)
irc.say("Joining %s" % irc.arg)
@saxo.command("leave", owner=True)
def leave(irc):
if irc.arg.startswith("#"):
irc.send("PART", irc.arg)
irc.say("Leaving %s" % irc.arg)
@saxo.command("part", owner=True)
def part(irc):
if irc.arg.startswith("#"):
irc.client("part", irc.arg)
irc.say("Parting %s" % irc.arg)
@saxo.command("prefix", owner=True)
def prefix(irc):
irc.client("prefix", irc.arg)
irc.say("Setting prefix to %r" % irc.arg)
@saxo.command("quit", owner=True)
def quit(irc):
irc.client("quit")
@saxo.command("reload", owner=True)
def reload(irc):
irc.client("reload", irc.sender)
@saxo.command("visit", owner=True)
def visit(irc):
if irc.arg.startswith("#"):
irc.send("JOIN", irc.arg)
irc.say("Visiting %s" % irc.arg)
|
[
"sean@miscoranda.com"
] |
sean@miscoranda.com
|
ad0ce10579942c717769e5eb6262652d640dfcfa
|
3943378e160590751b195c58f5a817125e487686
|
/findit/engine/ocr.py
|
16990307cb26dfec42b936e006ab54d51c5f9c17
|
[
"MIT"
] |
permissive
|
uzstudio/findit
|
475d101e295aeb2c92df9f80dae78e88bc327a27
|
e3bb233b559f1882209fe95734a933bb82f71c80
|
refs/heads/master
| 2023-01-29T13:06:02.313593
| 2020-12-06T16:02:53
| 2020-12-06T16:02:53
| 319,051,716
| 0
| 0
|
MIT
| 2020-12-06T14:24:31
| 2020-12-06T14:24:31
| null |
UTF-8
|
Python
| false
| false
| 3,530
|
py
|
import numpy as np
import warnings
import typing
from findit.logger import logger
from findit.engine.base import FindItEngine, FindItEngineResponse
try:
import findtext
except ImportError:
logger.debug("findtext should be installed if you want to use OCR engine")
class OCREngine(FindItEngine):
""" OCR engine, binding to tesseract """
# language settings, same as tesseract
# if you want to use chi_sim and eng, you can set it 'chi_sim+eng'
DEFAULT_LANGUAGE: str = "eng"
# offset for words ( sometimes causes out of range, take care )
DEFAULT_OFFSET: int = 0
# deep query
DEFAULT_DEEP: bool = False
def __init__(self, engine_ocr_lang: str = None, *_, **__):
logger.info(f"engine {self.get_type()} preparing ...")
# check language data before execute function, not here.
self.engine_ocr_lang = engine_ocr_lang or self.DEFAULT_LANGUAGE
self.engine_ocr_offset = self.DEFAULT_OFFSET
self.engine_ocr_deep = self.DEFAULT_DEEP
assert findtext, "findtext should be installed if you want to use OCR engine"
self._ft = findtext.FindText(lang=engine_ocr_lang)
self.engine_ocr_tess_data_dir = self._ft.get_data_home()
self.engine_ocr_available_lang_list = self._ft.get_available_lang()
logger.debug(f"target lang: {self.engine_ocr_lang}")
logger.debug(f"tess data dir: {self.engine_ocr_tess_data_dir}")
logger.debug(f"available language: {self.engine_ocr_available_lang_list}")
logger.info(f"engine {self.get_type()} loaded")
def execute(
self,
template_object: np.ndarray,
target_object: np.ndarray,
engine_ocr_offset: int = None,
engine_ocr_deep: bool = None,
*_,
**__,
) -> FindItEngineResponse:
resp = FindItEngineResponse()
if engine_ocr_offset:
self.engine_ocr_offset = engine_ocr_offset
if engine_ocr_deep:
self.engine_ocr_deep = engine_ocr_deep
# _ft is not JSON serializable
conf_dict = {k: _ for k, _ in self.__dict__.items() if k != "_ft"}
resp.append("conf", conf_dict, important=True)
# check language
for each_lang in self.engine_ocr_lang.split("+"):
if each_lang not in self.engine_ocr_available_lang_list:
resp.append("raw", "this language not available", important=True)
resp.append("ok", False, important=True)
return resp
word_block_list = self._ft.find_word(
image_object=target_object,
deep=self.engine_ocr_deep,
offset=self.engine_ocr_offset,
)
available_result_list = [i for i in word_block_list if i.content]
result_text = self._improve_text_result(
[i.content for i in available_result_list]
)
resp.append("content", result_text, important=True)
resp.append("raw", [i.__dict__ for i in word_block_list])
resp.append("ok", True, important=True)
return resp
@staticmethod
def _improve_text_result(origin: typing.List[str]) -> typing.List[str]:
try:
import jieba
except ImportError:
warnings.warn(
"no package named jieba, you can install it for better ocr result"
)
return origin
new = list()
for each in origin:
text_cut = jieba.cut(each)
new.extend(text_cut)
return list(set(new))
|
[
"178894043@qq.com"
] |
178894043@qq.com
|
d19dc2f371b1a4f5f4558649314377073d2df972
|
40454f9a92ecbfc382d8bb611743bad4ecb76017
|
/service/log_handler.py
|
0d2507a5ad64cc8afdc55d03fbf6baac0d1e7018
|
[
"Apache-2.0"
] |
permissive
|
ibm-devops/devops-workshop-2020
|
cd83135edb5c7ec2adb4647d4b4997b9ff11fc46
|
33fec8ef8cb7e16548b4eb3381b441acbdce0e47
|
refs/heads/master
| 2023-09-06T08:45:35.184681
| 2021-11-23T21:46:49
| 2021-11-23T21:46:49
| 305,249,605
| 0
| 0
|
Apache-2.0
| 2020-10-19T21:40:57
| 2020-10-19T03:11:30
|
Python
|
UTF-8
|
Python
| false
| false
| 710
|
py
|
import logging
from . import app
############################################################
# set up logging for Flask applications
############################################################
if __name__ != "__main__":
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
app.logger.propagate = False
# Make all log formats consistent
formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] [%(module)s] %(message)s", "%Y-%m-%d %H:%M:%S %z"
)
for handler in app.logger.handlers:
handler.setFormatter(formatter)
app.logger.info("Logging handler established")
|
[
"johnnyroy@johnrofrano.com"
] |
johnnyroy@johnrofrano.com
|
1795fb529d885939637490ca274f1a05e3817c80
|
b44a984ac8cfd183e218d56e1ec5d0d3e72d20fd
|
/Binary Search/二分答案/617. Maximum Average Subarray II/binary_search_prefix_sum.py
|
8b867e3902d4d255a55a878fcda65c14538f88da
|
[] |
no_license
|
atomextranova/leetcode-python
|
61381949f2e78805dfdd0fb221f8497b94b7f12b
|
5fce59e6b9c4079b49e2cfb2a6d2a61a0d729c56
|
refs/heads/master
| 2021-07-15T20:32:12.592607
| 2020-09-21T00:10:27
| 2020-09-21T00:10:27
| 207,622,038
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
class Solution:
"""
@param nums: an array with positive and negative numbers
@param k: an integer
@return: the maximum average
"""
def maxAverage(self, nums, k):
# write your code here
if not nums:
return 0
start, end = min(nums), max(nums)
# binary search for possible answer
while start + 1e-5 < end:
mid = (start + end) / 2
if self.can_find_larger_mean(nums, mid, k):
start = mid
else:
end = mid
return start
def can_find_larger_mean(self, nums, target_average, k):
# Construct prefix sum with average deducted
prefix_sum = [0]
for num in nums:
prefix_sum.append(prefix_sum[-1] + num - target_average)
min_prefix_sum = 0
for i in range(k, len(nums) + 1):
# if > 0, => prefix_sum > average
if prefix_sum[i] - min_prefix_sum >= 0:
return True
min_prefix_sum = min(min_prefix_sum, prefix_sum[i - k + 1])
return False
|
[
"atomextranova@gmail.com"
] |
atomextranova@gmail.com
|
847cfce4f2586b32847784c08399cc36f2c1c6a0
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/odd_20200715173523.py
|
dce675361390068076286f115c10581f4c208933
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
def odd(A):
# return the value that doesn't have a pair
# a dictionary to keep track of the number of times
# an element has been repeated
newDict = {}
for i in A:
if i in newDict:
newDict[i] +=1
else:
newDict[i] = 1
odd([9,3,9,3,9,7,9])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
43ccfcb76efbc913bf2c01360150033452d7793c
|
25a27b6e7ad3f7ef90e7b70d7393fcf4b7cc16b0
|
/Pandas_Stats.py
|
b8a04e49bdb8d45ba1ff49fda54cb3a967501103
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Bhaney44/Patent-Valuation
|
aeef15892fa81be314799ad48d8669d6ad3f3bc6
|
57af26f9ec60c85cbf6217358b70520a7b916189
|
refs/heads/master
| 2021-03-05T19:19:31.799981
| 2020-09-29T05:41:16
| 2020-09-29T05:41:16
| 246,145,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
df = pd.read_csv('Lite_Coin_12_Mos.csv')
print(df.head())
print(df.tail())
print(df.index)
print(df.columns)
print("-----")
print(df.describe())
#print mediuan
print('median')
print(df.median())
print("-----")
#Mode
print('mode')
print(df.mode())
print("-----")
#Variance
##In probability theory and statistics, variance is the expectation of the squared deviation of a random variable from its mean.
##Informally, it measures how far a set of numbers are spread out from their average value.
print('variance')
print(df.var())
print("-----")
#Co-Variance
print('co-variance')
print(df.cov())
#Cumsum
#print('Cumsum')
#print(df.cumsum())
#Scalar Map
#print('Map')
#print(df.applymap())
#Multiply
#print('Multiply')
#print(df.mul())
#Modulo
#print('Modulo')
#print(df.mod())
|
[
"noreply@github.com"
] |
Bhaney44.noreply@github.com
|
67286f42c0f5c3196b3760aedd6bbfecb99ed3a6
|
26dcf8e0457156a8bde936d56a59e1099893f8c6
|
/tests/test_init.py
|
55608b06b9def4bb420070c424c03c56bf5aa666
|
[
"MIT"
] |
permissive
|
SilenceWinter/MicroTokenizer
|
fc4212fb9a324e93e707edbe130b518bd782d07a
|
0b617f4b107743f6c7c473a9fac9408d21c56931
|
refs/heads/master
| 2020-03-29T04:31:23.050836
| 2018-09-18T16:40:28
| 2018-09-18T16:40:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import MicroTokenizer
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_DAG(input_text):
result = MicroTokenizer.cut_by_DAG(input_text)
pytest.helpers.assert_token_equals(result, input_text)
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_HMM(input_text):
result = MicroTokenizer.cut_by_HMM(input_text)
pytest.helpers.assert_token_equals(result, input_text)
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_CRF(input_text):
result = MicroTokenizer.cut_by_CRF(input_text)
pytest.helpers.assert_token_equals(result, input_text)
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_joint_model(input_text):
result = MicroTokenizer.cut_by_joint_model(input_text)
pytest.helpers.assert_token_equals(result, input_text)
|
[
"u1mail2me@gmail.com"
] |
u1mail2me@gmail.com
|
9f4d4ded1ac98901226e2e651b5c2a77055979bd
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/175/usersdata/267/96307/submittedfiles/lista1.py
|
0d27230ca436ca13864d21627d406bd3d0fc5b36
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
# -*- coding: utf-8 -*-
n=int(input('Tamanho da lista: '))
a=[]
par=[]
impar=[]
somaPar=0
contPar=0
somaImpar=0
contImpar=0
for i in range(0,n,1):
elem=int(input('Digite o elemento: '))
a.append(elem)
for i in range(0,len(a),1):
if a[i]%2==0:
somaPar=somaPar+a[i]
contPar=contPar+1
else:
somaImpar=somaImpar+a[i]
contImpar=contImpar+1
print (somaPar)
print(contPar)
print()
print(somaImpar)
print(contImpar)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
bb9a5e375a56114623e45ce385c30a820fc1ac0a
|
c54f5a7cf6de3ed02d2e02cf867470ea48bd9258
|
/pyobjc/PyOpenGL-2.0.2.01/OpenGL/__init__.py
|
6efde92c2a11e6c461d897cf6ad1f636d7cff488
|
[] |
no_license
|
orestis/pyobjc
|
01ad0e731fbbe0413c2f5ac2f3e91016749146c6
|
c30bf50ba29cb562d530e71a9d6c3d8ad75aa230
|
refs/heads/master
| 2021-01-22T06:54:35.401551
| 2009-09-01T09:24:47
| 2009-09-01T09:24:47
| 16,895
| 8
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
# This is statement is required by the build system to query build info
if __name__ == '__build__':
raise Exception
def __set_attributes():
global __date__, __version__, __build__
import string, os.path
__date__ = string.join(string.split('$Date: 2004/11/14 23:33:24 $')[1:3], ' ')
filename = os.path.join(os.path.dirname(__file__), 'version')
__version__ = string.strip(open(filename).read())
__build__ = int(string.split(__version__, '.')[3])
__set_attributes()
__author__ = 'Tarn Weisner Burton <twburton@users.sourceforge.net>\nMike C. Fletcher <mcfletch@users.sourceforge.net>'
__doc__ = '''This is PyOpenGL 2. For information regarding PyOpenGL see:
http://pyopengl.sourceforge.net
For information on OpenGL see:
http://www.opengl.org'''
from GL._GL__init__ import __numeric_present__, __numeric_support__
|
[
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] |
ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25
|
c1d33748b6659265d0d0e48b25c94af00752e8c9
|
a7104434e0ddb4575ef0a6cd467bac6620570de8
|
/hunter111.py
|
4726dd46eef032be4a152c39cdce18b129aae670
|
[] |
no_license
|
GauthamAjayKannan/GUVI-1
|
7b276eef3195bec9671eec8bb6bcc588cb5c970e
|
fafabab93df55abcc399f6e2664286ed511fd683
|
refs/heads/master
| 2020-06-25T07:38:08.465414
| 2019-05-17T11:24:53
| 2019-05-17T11:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
n = int(input())
sum = 0
x = [ [int(e) for e in input().split()] for i in range(n)]
for i in range(n):
sum += x[i][(n-1)-i]
print(sum)
|
[
"noreply@github.com"
] |
GauthamAjayKannan.noreply@github.com
|
98f3200707f50fb960acc5eef201747d0b5dfb8a
|
351e2a5ab1a658dcfa6e760fcfb80671d4d95984
|
/ferrua/models/purchase.py
|
8ab59d7972cfc8498c3c4c4064f223afe5a4145d
|
[] |
no_license
|
eneldoserrata/marcos_community_addons
|
02462b006b6c4ece3cfca914bf11d72d9fbd2a0a
|
dfd1f4254c6a59725b32e240f1d654b360c9d7e1
|
refs/heads/master
| 2021-10-11T01:02:08.807210
| 2017-03-09T08:35:54
| 2017-03-09T08:35:54
| 45,713,685
| 4
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,471
|
py
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
@api.one
def _cal_msi(self):
if self.is_roll_order:
exact = 0
master = 0
lam = 0
for rec in self.roll_order_lines:
if rec.product_roll_id.product_tmpl_id.categ_id.extra_info == "exact":
exact += rec.msi
elif rec.product_roll_id.product_tmpl_id.categ_id.extra_info == "master":
master += rec.msi
elif rec.product_roll_id.product_tmpl_id.categ_id.extra_info == "lamination":
lam += rec.msi
self.msi_sub_exact = exact
self.msi_sub_master = master
self.msi_sub_lam = lam/5
self.msi_sub_total = self.msi_sub_exact+self.msi_sub_master+self.msi_sub_lam
is_roll_order = fields.Boolean("Pedido de rollos", copy=True)
roll_order_lines = fields.One2many("purchase.order.line.roll", "roll_order_id", copy=True)
msi_sub_exact = fields.Float("Subtotal Plan Exact", compute=_cal_msi, default=0)
msi_sub_master = fields.Float("Subtotal Master Rolls", compute=_cal_msi, default=0)
msi_sub_lam = fields.Float(u"Subtotal Laminación", compute=_cal_msi, default=0)
msi_sub_total = fields.Float("Total MSI", compute=_cal_msi, default=0)
@api.multi
def button_confirm(self):
for rec in self:
if rec.is_roll_order:
[line.unlink() for line in rec.order_line]
for roll in rec.roll_order_lines:
new_order_line = self.env["purchase.order.line"].new({"product_id": roll.product_roll_id.id,
"order_id": roll.roll_order_id.id})
new_order_line.onchange_product_id()
new_order_line.product_qty = roll.rolls
new_order_line.price_unit = roll.roll_price
new_order_line.create(new_order_line._convert_to_write(new_order_line._cache))
rec._amount_all()
return super(PurchaseOrder, self).button_confirm()
class PurchaseOrderLine(models.Model):
_name = 'purchase.order.line.roll'
@api.multi
def _cal_msi(self):
for rec in self:
if rec.product_roll_id:
attrs = dict([(att.attribute_id.name, att.name) for att in rec.product_roll_id.attribute_value_ids])
rec.ancho = float(attrs["Banda"])
rec.largo = float(attrs["Largo"])
rec.msi = rec.rolls*rec.ancho*(12*rec.largo/1000)
rec.total_price_msi = rec.msi*rec.price_msi
rec.roll_price = rec.total_price_msi/rec.rolls
roll_order_id = fields.Many2one("purchase.order")
product_roll_id = fields.Many2one('product.product', string='Product', domain=[('purchase_ok', '=', True),('categ_id.extra_info','in',('exact','master','lamination'))], change_default=True, required=True)
rolls = fields.Float("Rollos", default=1)
ancho = fields.Float(u'Ancho"', compute=_cal_msi)
largo = fields.Float(u"Largo'", compute=_cal_msi)
msi = fields.Float("MSI", compute=_cal_msi)
price_msi = fields.Float(string='Precio', required=True, digits=dp.get_precision('Product Price'), default=1)
total_price_msi = fields.Float(string='Total', required=True, digits=dp.get_precision('Product Price'), default=1, compute=_cal_msi)
roll_price = fields.Float(string='Precio por rollo', required=True, digits=dp.get_precision('Product Price'), default=1, compute=_cal_msi)
@api.onchange("rolls")
def onchange_rolls(self):
self._cal_msi()
@api.onchange("price_msi")
def onchange_price_msi(self):
self._cal_msi()
@api.onchange("product_roll_id")
def onchange_roll_order_id(self):
if self.product_roll_id:
attrs = dict([(att.attribute_id.name, att.name) for att in self.product_roll_id.attribute_value_ids])
if attrs.keys() == [u'Largo', u'Banda']:
self._cal_msi()
else:
return {"value": {"product_roll_id": False},
'warning': {'title': u"Error de configuración", 'message': u"El rollo selccionado debe tener las variantes Largo y Ancho definidas"}
}
|
[
"eneldoserrata@gmail.com"
] |
eneldoserrata@gmail.com
|
14aa6d7fea39d00ecd9884e761e2c35165614022
|
930c207e245c320b108e9699bbbb036260a36d6a
|
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Return_Fan_Status.py
|
672937506a4f0ced76a2b9ed9e98dedc286475e7
|
[] |
no_license
|
InnovationSE/BRICK-Generated-By-OLGA
|
24d278f543471e1ce622f5f45d9e305790181fff
|
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
|
refs/heads/master
| 2021-07-01T14:13:11.302860
| 2017-09-21T12:44:17
| 2017-09-21T12:44:17
| 104,251,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Fan_Status import Fan_Status
class Return_Fan_Status(Fan_Status):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Return_Fan_Status
|
[
"Andre.Ponnouradjane@non.schneider-electric.com"
] |
Andre.Ponnouradjane@non.schneider-electric.com
|
c6f531b07cc9ab53de8e28529b78d4cb2a3ae124
|
c3e34335fde6c8bec8d86f2c5651a7df55759406
|
/test.py
|
4d359a733dae1a5488f91b8e6f793c487b3a29ae
|
[
"MIT"
] |
permissive
|
fancybian/ner-crf2
|
a2ea23dfc0cf528ff103bf35d6f70b6a70fdad5f
|
e4f4fe973057ee5f6ffcc87c8dddc502c981b9bf
|
refs/heads/master
| 2021-05-14T00:24:14.683097
| 2018-01-07T05:26:06
| 2018-01-07T05:26:06
| 116,538,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,576
|
py
|
# -*- coding: utf-8 -*-
"""
Script to test a trained CRF model.
train.py must be used before this to train the CRF.
This file must be called with the same identifier that was used during training.
Example usage:
python test.py --identifier="my_experiment" --mycorpus
python test.py --identifier="my_experiment" --germeval
The first command tests on the corpus set in ARTICLES_FILEPATH.
The second command tests on the germeval corpus, whichs path is defined in GERMEVAL_FILEPATH.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import random
import pycrfsuite
from itertools import chain
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelBinarizer
from model.datasets import load_windows, load_articles, generate_examples, Article
import model.features as features
# All capitalized constants come from this file
import config as cfg
random.seed(42)
def main():
"""Main method to handle command line arguments and then call the testing methods."""
parser = argparse.ArgumentParser()
parser.add_argument("--identifier", required=True,
help="A short name/identifier for your experiment, e.g. 'ex42b'.")
parser.add_argument("--mycorpus", required=False, action="store_const", const=True,
help="Whether to test on your corpus, defined via the constant " \
"ARTICLES_FILEPATH.")
parser.add_argument("--germeval", required=False, action="store_const", const=True,
help="Whether to test on the german eval 2014 corpus.")
args = parser.parse_args()
# test on corpus set in ARTICLES_FILEPATH
if args.mycorpus:
test_on_mycorpus(args)
# test on germeval corpus
if args.germeval:
test_on_germeval(args)
if not args.mycorpus and not args.germeval:
print("Expected either --mycorpus or --germeval flag")
def test_on_mycorpus(args):
"""Tests on the corpus set in ARTICLES_FILEPATH.
Prints a full report, including precision, recall and F1 score per label.
Args:
args: Command line arguments as parsed by argparse.ArgumentParser.
"""
print("Testing on mycorpus (%s)..." % (cfg.ARTICLES_FILEPATH))
test_on_articles(args.identifier, load_articles(cfg.ARTICLES_FILEPATH),
nb_append=cfg.COUNT_WINDOWS_TEST)
def test_on_germeval(args):
"""Tests on the germeval corpus.
The germeval filepath is defined in GERMEVAL_FILEPATH.
See https://sites.google.com/site/germeval2014ner/data .
Args:
args: Command line arguments as parsed by argparse.ArgumentParser.
"""
print("Testing on germeval (%s)..." % (cfg.GERMEVAL_FILEPATH))
test_on_articles(args.identifier, load_germeval(cfg.GERMEVAL_FILEPATH))
def test_on_articles(identifier, articles, nb_append=None):
"""Test a trained CRF model on a list of Article objects (annotated text).
Will print a full classification report by label (f1, precision, recall).
Args:
identifier: Identifier of the trained model to be used.
articles: A list of Article objects or a generator for such a list. May only contain
one single Article object.
"""
print("Loading tagger...")
tagger = pycrfsuite.Tagger()
tagger.open(identifier)
# create feature generators
# this may take a while
print("Creating features...")
feature_generators = features.create_features()
# create window generator
print("Loading windows...")
windows = load_windows(articles, cfg.WINDOW_SIZE, feature_generators, only_labeled_windows=True)
# load feature lists and label lists (X, Y)
# this may take a while
all_feature_values_lists = []
correct_label_chains = []
for fvlist, labels in generate_examples(windows, nb_append=nb_append):
all_feature_values_lists.append(fvlist)
correct_label_chains.append(labels)
# generate predicted chains of labels
print("Testing on %d windows..." % (len(all_feature_values_lists)))
predicted_label_chains = [tagger.tag(fvlists) for fvlists in all_feature_values_lists]
# print classification report (precision, recall, f1)
print(bio_classification_report(correct_label_chains, predicted_label_chains))
def load_germeval(filepath):
"""Loads the source of the gereval 2014 corpus and converts it to a list of Article objects.
Args:
filepath: Filepath to the source file, e.g. "/var/foo/NER-de-test.tsv".
Returns:
List of Article
(will contain only one single Article object).
"""
lines = open(filepath, "r").readlines()
lines = [line.decode("utf-8").strip() for line in lines]
# remove lines that are comments
lines = [line for line in lines if line[0:1] != "#"]
# remove all empty lines
lines = [line for line in lines if len(line) > 0]
sentence = []
sentences = []
for line_idx, line in enumerate(lines):
blocks = line.split("\t")
(number, word, tag1, _) = blocks # 4th block would be tag2
number = int(number)
# if we reach the next sentence, add the previous sentence to the 'sentences' container
if (number == 1 and len(sentence) > 0) or line_idx == len(lines) - 1:
sentences.append(sentence)
sentence = []
# convert all labels containing OTH (OTHER) so MISC
if "OTH" in tag1:
tag1 = "MISC"
# Add the word in an annotated way if the tag1 looks like one of the labels in the
# allowed labels (config setting LABELS). We don't check for full equality here, because
# that allows BIO tags (e.g. B-PER) to also be accepted. They will automatically be
# normalized by the Token objects (which will also throw away unnormalizable annotations).
# Notice that we ignore tag2 as tag1 is usually the more important one.
contains_label = any([(label in tag1) for label in cfg.LABELS])
is_blacklisted = any([(bl_label in tag1) for bl_label in ["part", "deriv"]])
if contains_label and not is_blacklisted:
sentence.append(word + "/" + tag1)
else:
sentence.append(word)
return [Article(" ".join(sentence)) for sentence in sentences]
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
Note: This function was copied from
http://nbviewer.ipython.org/github/tpeng/python-crfsuite/blob/master/examples/CoNLL%202002.ipynb
Args:
y_true: True labels, list of strings
y_pred: Predicted labels, list of strings
Returns:
classification report as string
"""
lbin = LabelBinarizer()
y_true_combined = lbin.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lbin.transform(list(chain.from_iterable(y_pred)))
#tagset = set(lbin.classes_) - {NO_NE_LABEL}
tagset = set(lbin.classes_)
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lbin.classes_)}
return classification_report(
y_true_combined,
y_pred_combined,
labels=[class_indices[cls] for cls in tagset],
target_names=tagset,
)
# ----------------------
if __name__ == "__main__":
main()
|
[
"kontakt@ajung.name"
] |
kontakt@ajung.name
|
bb64d0fbc08d3ddcc68c3f4237687110590e6a79
|
de96be8237ee349bee3659adb34bf12e73334f85
|
/google/domains.py
|
543129b75f1341041bb9899956ece3bb035dc2a3
|
[] |
no_license
|
hezhen/spider-course-4
|
f79e44d6ab1001dbb80bb98ef78e9ecd41b75461
|
02e2f65c5625e02d301e920560918f10769f2d6e
|
refs/heads/master
| 2020-03-12T19:03:32.874875
| 2018-10-05T08:42:57
| 2018-10-05T08:42:57
| 130,776,197
| 43
| 33
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
import re
from lxml import etree
f = open('List of Google domains - Wikipedia.htm', 'rb+')
html = f.read()
f.close()
tree = etree.HTML(html)
google_links = []
external_links = tree.xpath('//td/span/a[@class="external text"]')
for external_link in external_links:
link_str = external_link.attrib['href']
if link_str.find('http://google.') != -1:
google_links.append(link_str[7:])
print( '[\"' + '\",\"'.join(google_links) + '\"]')
|
[
"hezhen112058@pwrd.com"
] |
hezhen112058@pwrd.com
|
57583ef833feb7fed1f2a1302277c5c6f0a9010c
|
6e6785851f2d149faa25f907995a167b4b9a2330
|
/app.py
|
4cab0948530d82f4776386da0b97b2334f0122d3
|
[] |
no_license
|
b1naryth1ef/catify
|
5456929dff5ec2d4e525be5f45e54581455d60d1
|
6b43b17894d8d331be36b6136a4f35023b351416
|
refs/heads/master
| 2020-04-26T15:52:41.036896
| 2013-05-08T07:55:50
| 2013-05-08T07:55:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
from flask import Flask, render_template, send_file, request
from StringIO import StringIO
import os, sys, time, base64
import Image
from face_detect import catify
app = Flask(__name__)
app.secret_key = "change_me"
@app.route('/')
def routeRoot():
return render_template("index.html")
@app.route("/catify", methods=["POST"])
def routeCatify():
if request.method == 'POST':
f = request.files['file']
if f and f.filename.rsplit('.', 1)[1] in ["png", "jpg"]:
out = catify(Image.open(f))
img_io = StringIO()
out.save(img_io, 'JPEG', quality=70)
img_io.seek(0)
img_data = base64.b64encode(img_io.read())
return render_template("index.html", imgdata=img_data)#send_file(img_io, mimetype='image/jpeg')
else:
print f.filename.rsplit('.', 1)[1]
return "Error #1"
return "Error #2"
if __name__ == '__main__':
app.run(debug=True)
|
[
"b1naryth1ef@gmail.com"
] |
b1naryth1ef@gmail.com
|
8af551e427cc06435e45752c8d1c0fe5586808de
|
846a7668ac964632bdb6db639ab381be11c13b77
|
/android/tools/test/connectivity/acts/tests/google/bt/car_bt/BtCarPairedConnectDisconnectTest.py
|
6a695d6f7fb1d7314d170dfe6d5d9c0e1b554119
|
[] |
no_license
|
BPI-SINOVOIP/BPI-A64-Android8
|
f2900965e96fd6f2a28ced68af668a858b15ebe1
|
744c72c133b9bf5d2e9efe0ab33e01e6e51d5743
|
refs/heads/master
| 2023-05-21T08:02:23.364495
| 2020-07-15T11:27:51
| 2020-07-15T11:27:51
| 143,945,191
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,208
|
py
|
#/usr/bin/env python3.4
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Test script to test connect and disconnect sequence between two devices which can run
SL4A. The script does the following:
Setup:
Clear up the bonded devices on both bluetooth adapters and bond the DUTs to each other.
Test (NUM_TEST_RUNS times):
1. Connect A2dpSink and HeadsetClient
1.1. Check that devices are connected.
2. Disconnect A2dpSink and HeadsetClient
2.1 Check that devices are disconnected.
"""
import time
from acts.test_decorators import test_tracker_info
from acts.test_utils.bt.BluetoothBaseTest import BluetoothBaseTest
from acts.base_test import BaseTestClass
from acts.test_utils.bt import bt_test_utils
from acts.test_utils.bt import BtEnum
from acts import asserts
class BtCarPairedConnectDisconnectTest(BluetoothBaseTest):
def setup_class(self):
self.car = self.android_devices[0]
self.ph = self.android_devices[1]
self.car_bt_addr = self.car.droid.bluetoothGetLocalAddress()
self.ph_bt_addr = self.ph.droid.bluetoothGetLocalAddress()
bt_test_utils.setup_multiple_devices_for_bt_test([self.car, self.ph])
# Pair the devices.
# This call may block until some specified timeout in bt_test_utils.py.
result = bt_test_utils.pair_pri_to_sec(
self.car, self.ph, auto_confirm=False)
asserts.assert_true(result, "pair_pri_to_sec returned false.")
# Check for successful setup of test.
devices = self.car.droid.bluetoothGetBondedDevices()
asserts.assert_equal(
len(devices), 1,
"pair_pri_to_sec succeeded but no bonded devices.")
@test_tracker_info(uuid='b0babf3b-8049-4b64-9125-408efb1bbcd2')
@BluetoothBaseTest.bt_test_wrap
def test_pairing(self):
"""
Tests if we can connect two devices over A2dp and then disconnect
Precondition:
1. Devices are paired.
Steps:
1. Set the priority to OFF for all profiles.
2. Initiate connection over A2dp Sink client profile.
Returns:
Pass if True
Fail if False
"""
# Set the priority to OFF for all profiles.
self.car.droid.bluetoothHfpClientSetPriority(
self.ph.droid.bluetoothGetLocalAddress(),
BtEnum.BluetoothPriorityLevel.PRIORITY_OFF.value)
self.ph.droid.bluetoothHspSetPriority(
self.car.droid.bluetoothGetLocalAddress(),
BtEnum.BluetoothPriorityLevel.PRIORITY_OFF.value)
addr = self.ph.droid.bluetoothGetLocalAddress()
if not bt_test_utils.connect_pri_to_sec(
self.car, self.ph,
set([BtEnum.BluetoothProfile.A2DP_SINK.value])):
if not bt_test_utils.is_a2dp_snk_device_connected(self.car, addr):
return False
return True
@test_tracker_info(uuid='a44f13e2-c012-4292-8dd5-9f32a023e297')
@BluetoothBaseTest.bt_test_wrap
def test_connect_disconnect_paired(self):
"""
Tests if we can connect two devices over Headset, A2dp and then disconnect them with success
Precondition:
1. Devices are paired.
Steps:
1. Initiate connection over A2dp Sink and Headset client profiles.
2. Check if the connection succeeded.
Returns:
Pass if True
Fail if False
Priority: 0
"""
NUM_TEST_RUNS = 2
failure = 0
addr = self.ph.droid.bluetoothGetLocalAddress()
for i in range(NUM_TEST_RUNS):
self.log.info("Running test [" + str(i) + "/" + str(NUM_TEST_RUNS)
+ "]")
success = bt_test_utils.connect_pri_to_sec(
self.car, self.ph,
set([
BtEnum.BluetoothProfile.HEADSET_CLIENT.value,
BtEnum.BluetoothProfile.A2DP_SINK.value
]))
# Check if we got connected.
if not success:
self.car.log.info("Not all profiles connected.")
if (bt_test_utils.is_hfp_client_device_connected(self.car,
addr) and
bt_test_utils.is_a2dp_snk_device_connected(self.car,
addr)):
self.car.log.info(
"HFP Client or A2DP SRC connected successfully.")
else:
failure = failure + 1
continue
# Disconnect the devices.
success = bt_test_utils.disconnect_pri_from_sec(
self.car, self.ph, [
BtEnum.BluetoothProfile.HEADSET_CLIENT.value,
BtEnum.BluetoothProfile.A2DP_SINK.value
])
if success is False:
self.car.log.info("Disconnect failed.")
if (bt_test_utils.is_hfp_client_device_connected(self.car,
addr) or
bt_test_utils.is_a2dp_snk_device_connected(self.car,
addr)):
self.car.log.info(
"HFP Client or A2DP SRC failed to disconnect.")
failure = failure + 1
continue
self.log.info("Failure {} total tests {}".format(failure,
NUM_TEST_RUNS))
if failure > 0:
return False
return True
|
[
"mingxin.android@gmail.com"
] |
mingxin.android@gmail.com
|
5128534db0c281779b7258ce43806ef7569a716f
|
c22b9c7c4a854ed985e777bcbecd18870439b334
|
/hardway/print.py
|
1206e22d567c83747e1a692a7553958fbbbdc237
|
[
"BSD-3-Clause"
] |
permissive
|
pezy/python_test
|
ceb35a8a63ca8ebe26ffa5c72ace664718c7b328
|
b019a0d9f267b5071c37fc85c9acaf27e9146625
|
refs/heads/master
| 2021-01-19T01:09:20.820202
| 2016-07-30T08:35:15
| 2016-07-30T08:35:15
| 18,096,404
| 0
| 2
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 334
|
py
|
# -- coding: utf-8 --
print "Hello, World!"
print "Hello Again"
print "I like typing this"
print "This is fun"
print 'Yet! Printing.'
print "I'd much rather you 'not'."
print 'I "said" do not touch this.'
print "ÖÐÎÄ£¡"
# this is comment
# you can print one line by ','
print "test just one line,","also one line,","yes good!"
|
[
"urbancpz@gmail.com"
] |
urbancpz@gmail.com
|
d7b93c89b2e2a0584c469b21d001ed6aeca48808
|
e0219f54839b1d19a2509d1320d2640c8fe9bb79
|
/zinnia/sitemaps.py
|
128a7a1fe160bfbf74fce522eadd2d57ece876dc
|
[] |
no_license
|
alsoicode/django-blog-zinnia
|
9648bd53e079e9ae1a8a0b64e5ef58821bb54cc2
|
6f015e0944ca60ea0e9cd7c1c2434666f5c544b5
|
refs/heads/master
| 2021-01-17T08:33:34.814990
| 2010-05-11T17:40:08
| 2010-05-11T17:40:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,762
|
py
|
"""Sitemaps for Zinnia"""
from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from tagging.models import Tag
from tagging.models import TaggedItem
from zinnia.models import Entry
from zinnia.models import Category
from zinnia.managers import entries_published
from zinnia.managers import authors_published
class EntrySitemap(Sitemap):
"""Sitemap for entries"""
priority = 0.5
changefreq = 'never'
def items(self):
return Entry.published.all()
def lastmod(self, obj):
return obj.last_update
class CategorySitemap(Sitemap):
"""Sitemap for categories"""
changefreq = 'monthly'
def cache(self, categories=[]):
len_entries = float(Entry.published.count())
self.cache_categories = {}
for cat in categories:
self.cache_categories[cat.pk] = cat.entries_published_set().count() / len_entries
def items(self):
categories = Category.objects.all()
self.cache(categories)
return categories
def lastmod(self, obj):
entries = entries_published(obj.entry_set)
if not entries:
return None
return entries[0].creation_date
def priority(self, obj):
priority = 0.5 + self.cache_categories[obj.pk]
if priority > 1.0:
priority = 1.0
return '%.1f' % priority
class AuthorSitemap(Sitemap):
"""Sitemap for authors"""
priority = 0.5
changefreq = 'monthly'
def items(self):
return authors_published()
def lastmod(self, obj):
entries = entries_published(obj.entry_set)
if not entries:
return None
return entries[0].creation_date
def location(self, obj):
return reverse('zinnia_author_detail', args=[obj.username])
class TagSitemap(Sitemap):
"""Sitemap for tags"""
changefreq = 'monthly'
def cache(self, tags=[]):
len_entries = float(Entry.published.count())
self.cache_tags = {}
for tag in tags:
entries = TaggedItem.objects.get_by_model(Entry.published.all(), tag)
self.cache_tags[tag.pk] = (entries, entries.count() / len_entries)
def items(self):
tags = Tag.objects.all()
self.cache(tags)
return tags
def lastmod(self, obj):
entries = self.cache_tags[obj.pk][0]
if not entries:
return None
return entries[0].creation_date
def priority(self, obj):
priority = 0.5 + self.cache_tags[obj.pk][1]
if priority > 1.0:
priority = 1.0
return '%.1f' % priority
def location(self, obj):
return reverse('zinnia_tagged_entry_list', args=[obj.name])
|
[
"fantomas42@gmail.com"
] |
fantomas42@gmail.com
|
eaa874e35cb8e52f4390453e8611fca1b9c5ce04
|
52a00bbbe9cb90e46a913e9ef5facb04d25cf8b7
|
/todolist/tasks/models.py
|
1fbec0968940b51982867f96c8a02c37a1be2c5e
|
[] |
no_license
|
agbin/todoList
|
9852378487e6d80bf6bf944f357d6b6824a4c694
|
3a5fee7d38c3f55c2f2432d5a47fc619cf2cfce0
|
refs/heads/master
| 2020-03-18T17:30:02.649979
| 2018-09-30T15:13:32
| 2018-09-30T15:13:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
from django.db import models
from colorfield.fields import ColorField
class Category(models.Model):
name = models.CharField(
verbose_name="Name",
max_length=120
)
color = ColorField(default='#FF0000')
class Meta:
verbose_name = "Category"
verbose_name_plural = "Categories"
def __str__(self):
return self.name
class Task(models.Model):
category = models.ForeignKey(
Category,
verbose_name="Category",
on_delete=models.CASCADE
)
name = models.CharField(
verbose_name="Name",
max_length=120
)
description = models.TextField(
verbose_name="Description"
)
class Meta:
verbose_name = "Task"
verbose_name_plural = "Task"
def __str__(self):
return self.name
|
[
"agnieszka.bin@gmail.com"
] |
agnieszka.bin@gmail.com
|
40c2680cba8adb4f45b6f169538fa7388bd4ffaf
|
ccd1dced3b39f970c4d1b41f03d372b71a360194
|
/property/migrations/0007_category_image.py
|
ab56a4074b926265ba07b2dd955a4f49c3fd6011
|
[] |
no_license
|
joescaos/hotel-ecommerce-site
|
5ab815f85b7e4d09a06b963a7785010c068a24d8
|
0b40aaf73d0d6241df88fa4dfe6fa63d868ee9aa
|
refs/heads/master
| 2022-09-29T07:59:02.474986
| 2020-06-06T00:51:55
| 2020-06-06T00:51:55
| 269,823,623
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
# Generated by Django 3.0.5 on 2020-05-31 01:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('property', '0006_auto_20200521_2139'),
]
operations = [
migrations.AddField(
model_name='category',
name='image',
field=models.ImageField(null=True, upload_to='category/'),
),
]
|
[
"jxexcxo@gmail.com"
] |
jxexcxo@gmail.com
|
b8e08f8929644608f02c3d6e6fc8a410cd9c05cd
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayFundEnterprisepayMemberModifyModel.py
|
d1d2f85425b69bf336ad21837ecf8dd2b333fe2d
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,152
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.FundExtInfo import FundExtInfo
class AlipayFundEnterprisepayMemberModifyModel(object):
def __init__(self):
self._account_id = None
self._agreement_no = None
self._biz_scene = None
self._fund_ext_info = None
self._group_id_list = None
self._open_id = None
self._operation_type_list = None
self._product_code = None
self._user_id = None
@property
def account_id(self):
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def fund_ext_info(self):
return self._fund_ext_info
@fund_ext_info.setter
def fund_ext_info(self, value):
if isinstance(value, FundExtInfo):
self._fund_ext_info = value
else:
self._fund_ext_info = FundExtInfo.from_alipay_dict(value)
@property
def group_id_list(self):
return self._group_id_list
@group_id_list.setter
def group_id_list(self, value):
if isinstance(value, list):
self._group_id_list = list()
for i in value:
self._group_id_list.append(i)
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def operation_type_list(self):
return self._operation_type_list
@operation_type_list.setter
def operation_type_list(self, value):
if isinstance(value, list):
self._operation_type_list = list()
for i in value:
self._operation_type_list.append(i)
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.account_id:
if hasattr(self.account_id, 'to_alipay_dict'):
params['account_id'] = self.account_id.to_alipay_dict()
else:
params['account_id'] = self.account_id
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.fund_ext_info:
if hasattr(self.fund_ext_info, 'to_alipay_dict'):
params['fund_ext_info'] = self.fund_ext_info.to_alipay_dict()
else:
params['fund_ext_info'] = self.fund_ext_info
if self.group_id_list:
if isinstance(self.group_id_list, list):
for i in range(0, len(self.group_id_list)):
element = self.group_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.group_id_list[i] = element.to_alipay_dict()
if hasattr(self.group_id_list, 'to_alipay_dict'):
params['group_id_list'] = self.group_id_list.to_alipay_dict()
else:
params['group_id_list'] = self.group_id_list
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.operation_type_list:
if isinstance(self.operation_type_list, list):
for i in range(0, len(self.operation_type_list)):
element = self.operation_type_list[i]
if hasattr(element, 'to_alipay_dict'):
self.operation_type_list[i] = element.to_alipay_dict()
if hasattr(self.operation_type_list, 'to_alipay_dict'):
params['operation_type_list'] = self.operation_type_list.to_alipay_dict()
else:
params['operation_type_list'] = self.operation_type_list
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundEnterprisepayMemberModifyModel()
if 'account_id' in d:
o.account_id = d['account_id']
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'fund_ext_info' in d:
o.fund_ext_info = d['fund_ext_info']
if 'group_id_list' in d:
o.group_id_list = d['group_id_list']
if 'open_id' in d:
o.open_id = d['open_id']
if 'operation_type_list' in d:
o.operation_type_list = d['operation_type_list']
if 'product_code' in d:
o.product_code = d['product_code']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
8dd0a54e2c18cc359f8bbc9a5a06659f45269823
|
4efd4fe7e848dc5973e350516ebfb57be015f5b6
|
/inline_media/tests/parser.py
|
111038f50b1f76514967b44f63baef82dc66db13
|
[
"BSD-2-Clause",
"CC-BY-2.5"
] |
permissive
|
MechanisM/django-inline-media
|
468648eb7a8b909a4204fc3c05788a73988a7855
|
e3ef3def173bcc9ac540123afe467894b635db85
|
refs/heads/master
| 2021-01-19T08:23:07.228198
| 2012-02-22T18:09:10
| 2012-02-22T18:09:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,221
|
py
|
#-*- coding: utf-8 -*-
import os
import shutil
import tempfile
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase as DjangoTestCase
from inline_media.models import InlineType
from inline_media.parser import MySoup, inlines, render_inline
from inline_media.widgets import TextareaWithInlines
from inline_media.tests.models import MediaModelTest
selfClosingTags = ['inline','img','br','input','meta','link','hr']
class ParserTestCase(DjangoTestCase):
def setUp(self):
test_content_type = ContentType.objects.get(app_label="tests", model="modeltest")
InlineType.objects.create(title="testobj", content_type=test_content_type)
self.obj = MediaModelTest.objects.create(title="The Title", description="Blah blah ...")
self.tag = u'<inline type="%(type)s" id="%(id)d" class="%(class)s" />' % {
"type": "tests.mediamodeltest", "id": self.obj.id, "class": "inline_small_left" }
def test_render_inline(self):
soup = MySoup(self.tag, selfClosingTags=selfClosingTags)
rendered_inline = render_inline(soup.find("inline"))
self.assert_(rendered_inline.get("context", None) != None)
self.assert_(rendered_inline.get("template", None) != None)
self.assert_(rendered_inline["context"]["object"] == self.obj)
self.assert_(rendered_inline["context"]["class"] == u'inline_small_left')
self.assert_(rendered_inline["context"]["content_type"] == u'tests.mediamodeltest')
self.assert_(rendered_inline["template"] == u'inline_media/tests_mediamodeltest.html')
def test_inlines_with_return_list_false(self):
html_content = inlines(self.tag, return_list=False)
self.assertEqual(
'<div class="inline_small_left"><H3>The Title</H3><p>Blah blah ...</p></div>\n',
html_content)
def test_inlines_with_return_list_true(self):
inline_list = inlines(self.tag, return_list=True)
self.assert_(len(inline_list) == 1)
self.assert_(inline_list[0]["object"] == self.obj)
self.assert_(inline_list[0]["class"] == u'inline_small_left')
self.assert_(inline_list[0]["content_type"] == u'tests.mediamodeltest')
|
[
"danirus@eml.cc"
] |
danirus@eml.cc
|
ae75a4e4e98e779462934871276bedbdd4d46e90
|
431a1f738b1edfba7dad8d10a6b7520d51d917cb
|
/Samples/UserSamples/2018/ggH_Splits/ggH2Config.py
|
88310b478b17319c11136fe426fcd9265a083a66
|
[] |
no_license
|
aloeliger/DatacardCreator
|
5ce702e46fbb77e843b44d8fe088c2645a4a8f66
|
5c7e890276a5be079ed3b677a471c1dcadcba52d
|
refs/heads/master
| 2022-02-26T19:52:30.563747
| 2022-02-16T20:24:48
| 2022-02-16T20:24:48
| 215,602,523
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
from Samples.SampleDefinition import Sample
from Samples.Uncertainties.UserUncertainties.TES import TESUncertainty
from Samples.Uncertainties.UserUncertainties.JES import JESUncertainty
from Samples.Uncertainties.UserUncertainties.ggHTheory import ggHTheoryUncertainty
from Samples.Uncertainties.UserUncertainties.MetRecoil import MetRecoilUncertainty
from Samples.Uncertainties.UserUncertainties.MuonES import MuonESUncertainty
from Samples.Uncertainties.UserUncertainties.TauID import TauIDUncertainty
from Samples.Uncertainties.UserUncertainties.Trigger17_18 import Trigger1718Uncertainty
from Samples.EventDefinition.UserEventDictionaries.MuTauEventDictionary import MuTauEventDictionary
ggHSample = Sample()
ggHSample.name = 'ggH_htt125'
ggHSample.path = '/data/aloeliger/SMHTT_Selected_2018_Deep/'
ggHSample.files = ['ggH.root']
ggHSample.definition = ''
ggHSample.uncertainties = [
TESUncertainty(),
JESUncertainty(),
ggHTheoryUncertainty(),
MetRecoilUncertainty(),
MuonESUncertainty(),
TauIDUncertainty(),
Trigger1718Uncertainty(),
]
ggHSample.eventDictionaryInstance = MuTauEventDictionary
ggHSample.CreateEventWeight = ggHSample.CreateEventWeight_Standard
ggHSample.startEntry = 30100
ggHSample.endEntry = 60200
|
[
"aloelige@cern.ch"
] |
aloelige@cern.ch
|
8e37aac511b0cae4341fac4bc5e433d598934167
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/mcl1_input/L41/41-35_wat_20Abox/set_2.py
|
0d09b429cc615aa8547489fd53727cb51b0633aa
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/mcl1/L41/wat_20Abox/ti_one-step/41_35/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
3d11100956a6fec50f50a061926d77f2677a4c40
|
a62d603a0b31ccd77f9b2035c2740d4d25f2408d
|
/artistforum/artistforum/urls.py
|
b2b0502da749d37a8d5dbe8825c2d844170f78a6
|
[] |
no_license
|
shushantkumar/Projects
|
44751949c2787e1ae4bb3909e0b37a6210680352
|
e6ce305dc80ec7bd258e213271d0292f8f1e3cfd
|
refs/heads/master
| 2021-01-02T22:34:35.854166
| 2018-04-22T07:30:37
| 2018-04-22T07:30:37
| 99,346,113
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
"""artistforum URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf.urls import url
# from artist.views import redirectToAuth,yellowantRedirectUrl
from artist.views import redirectToAuth,yellowantRedirectUrl
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('artist.urls')),
path('getyaauthurl/',redirectToAuth,name='redirectToAuth'),
path('redirecturl/',yellowantRedirectUrl, name = 'yellowantRedirectUrl')
]
|
[
"shushantkmr2@gmail.com"
] |
shushantkmr2@gmail.com
|
5ced8005d7ca2b0d24eada8cddaa00c4a3825af9
|
696c1a00fbf09da67c37de2406c5394a5edcb166
|
/tests/combined_model_tests/combined_model_tests.py
|
4854a4daeffff3293082bec9ada559fe26bfd617
|
[] |
no_license
|
mengyx-work/xgboost_hyperopt
|
7f52c5aa3fff81d029e3879630b373d79c4155bb
|
bbd80c489cb308309d45f1de1cc2676b13e29b6b
|
refs/heads/master
| 2021-01-15T15:41:38.622028
| 2017-05-14T21:05:46
| 2017-05-14T21:05:46
| 55,736,994
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,312
|
py
|
import pandas as pd
import numpy as np
import os, sys, time
import yaml
import cPickle as pickle
sys.path.append('/home/ymm/kaggle/xgboost_hyperopt')
from utils.bosch_functions import load_processed_bosch_data
from utils.models import CombinedModel
from utils.validation_tools import score_MCC, MCC, create_validation_index, cross_validate_model
dep_var_name = 'Response'
## params for combined model
raw_models_yaml_file = 'raw_combined_models.yml'
trained_model_yaml_file = 'trained_combined_model.yml'
project_path = './tmp'
raw_models_yaml_path = './'
## 15 bins data
project_yml_path = '/mnt/home/ymm/kaggle/compete/current'
data_path = '/home/ymm/kaggle/bosch_data/bosch_complete_processed_15_bins_data'
data_yaml_file = 'bosch_processed_data_dict.yml'
train = load_processed_bosch_data(data_path, data_yaml_file, data_index='0', nrows=10000)
## train the comined model
combined_model_params = {}
combined_model_params['raw_models_yaml_file'] = raw_models_yaml_file
combined_model_params['raw_models_yaml_path'] = raw_models_yaml_path
combined_model_params['project_path'] = project_path
combined_model_params['models_yaml_file'] = trained_model_yaml_file
## build the combined model
combined_model = CombinedModel(combined_model_params)
## warning! the multiple combined tests will overwite each ohter's results
#'''
############## Section of regular validation #######################
train_index, valid_index = create_validation_index(train, 0.5, dep_var_name, True)
valid_data = train.ix[valid_index]
tmp_train = train.ix[train_index]
combined_model.fit(train, dep_var_name)
pred_df = combined_model.predict(valid_data)
print 'MCC score from validation: ', MCC(valid_data[dep_var_name], pred_df)
#print score_MCC(valid_data[dep_var_name], pred_df)
#'''
#'''
############## Section of using cross validation #######################
## cross-validate any combined model
results = cross_validate_model(train, dep_var_name, combined_model, score_MCC, 3)
print results
#'''
############## Section of cross_valiate fit #######################
combined_model.cross_vlidate_fit(train, dep_var_name)
pred_df = combined_model.predict(valid_data, score_conversion_type='A')
print 'MCC score from cross_valiate_fit: ', MCC(valid_data[dep_var_name], pred_df)
|
[
"ymeng.ucla@gmail.com"
] |
ymeng.ucla@gmail.com
|
7168d5ad5f15baa04059a1819cae512b8d6ccff2
|
cf4e5165a8408344a4c62e63a0fd2d0fe6308b37
|
/15期/15 flask框架/13-使用蓝图划分模块/orders.py
|
a78452943880b1d6638e7eeb4f45bfaf6f0516f5
|
[] |
no_license
|
kennycaiguo/Heima-Python-2018
|
5f8c340e996d19f2b5c44d80ee7c144bf164b30e
|
a8acd798f520ec3d079cc564594ebaccb9c232a0
|
refs/heads/master
| 2021-01-08T10:54:18.937511
| 2019-09-01T14:37:49
| 2019-09-01T14:37:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# coding=utf-8
from flask import Flask, Blueprint
# 创建蓝图对象,蓝图就是一个小模块的概念
app_orders = Blueprint("app_orders", __name__, template_folder="templates")
@app_orders.route("/get_orders")
def get_orders():
return "get orders page"
@app_orders.route("post_orders")
def post_orders():
pass
|
[
"microease@163.com"
] |
microease@163.com
|
a849ea55c243f6df2ba62ce912a03beb7a554c9b
|
bfc0a74a378d3692d5b033c21c29cf223d2668da
|
/unittests/pytests/utils/TestPylith.py
|
de1c2d7ed43609cfec1e7c351ee3ff7dead84b91
|
[
"MIT"
] |
permissive
|
rishabhdutta/pylith
|
b2ed9cd8039de33e337c5bc989e6d76d85fd4df1
|
cb07c51b1942f7c6d60ceca595193c59a0faf3a5
|
refs/heads/master
| 2020-12-29T01:53:49.828328
| 2016-07-15T20:34:58
| 2016-07-15T20:34:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
#!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2016 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file unittests/pytests/utils/TestPylith.py
## @brief Unit testing of Pylith module.
import unittest
# ----------------------------------------------------------------------
class TestPylith(unittest.TestCase):
"""
Unit testing of Pylith object.
"""
def test_sizeofVoidPtr(self):
"""
Test sizeofVoidPtr().
"""
from pylith.utils.utils import sizeofVoidPtr
size = sizeofVoidPtr()
return
def test_sizeofPylithScalar(self):
"""
Test sizeofPylithScalar().
"""
from pylith.utils.utils import sizeofPylithScalar
size = sizeofPylithScalar()
self.failUnless(4 == size or 8 == size)
return
def test_isCUDAEnabled(self):
"""
Test constructor.
"""
from pylith.utils.utils import isCUDAEnabled
value = isCUDAEnabled()
return
# End of file
|
[
"baagaard@usgs.gov"
] |
baagaard@usgs.gov
|
cccc2caec8badcd13bf957235949812bb9349150
|
6ed48bf3c72e61fe53144a3545ab305112c93501
|
/appengine/findit/util_scripts/remote_api.py
|
d49a94ae16abf3e15f56036cd09699fb730490c3
|
[
"BSD-3-Clause"
] |
permissive
|
eunchong/infra
|
ee5f7a9379977de8c814f90dbba3f6adbf06a75c
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
refs/heads/master
| 2022-11-27T06:26:57.415805
| 2016-04-08T12:34:36
| 2016-04-08T12:34:36
| 55,699,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,991
|
py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module is used to set up Remote API to use services on App Engine.
After setup, available services include datastore, task queue, etc.
You may be prompted for credentials during the remote query or the like.
And you could use Remote API only when you are one of the project members.
For detail on usage of Remote API, please refer to:
https://cloud.google.com/appengine/docs/python/tools/remoteapi
"""
import os
import socket
import sys
_FINDIT_ROOT_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir)
_APPNGINE_SDK_DIR = os.path.join(_FINDIT_ROOT_DIR, os.path.pardir,
os.path.pardir, os.path.pardir,
'google_appengine')
# Add App Engine SDK dir to sys.path.
sys.path.insert(1, _APPNGINE_SDK_DIR)
import dev_appserver
dev_appserver.fix_sys_path()
# Add Findit root dir to sys.path so that modules in Findit would be available.
sys.path.insert(1, _FINDIT_ROOT_DIR)
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.ext.remote_api import remote_api_stub
def SetTimeoutForUrlOperations(url_blocking_operations_timeout=600):
"""Set timeout for url operations (socket, appengine db)."""
socket.setdefaulttimeout(url_blocking_operations_timeout)
urlfetch.set_default_fetch_deadline(url_blocking_operations_timeout)
def EnableRemoteApi(app_id='findit-for-me'):
"""Enable appengine services through remote API.
Args:
app_id (str): The appengine ID without '.appspot.com', eg. findit-for-me.
"""
if hasattr(EnableRemoteApi, app_id):
return
SetTimeoutForUrlOperations()
remote_api_stub.ConfigureRemoteApiForOAuth(
'%s.appspot.com' % app_id,
'/_ah/remote_api',
secure=True,
save_cookies=True)
setattr(EnableRemoteApi, app_id, True)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
117c969597f63a06bb56b3e40e60ba582e8ba33d
|
e1f87c26f973bd31da1f53dfef37ff4a8c7fd0b6
|
/packs/github/sensors/github_repository_sensor.py
|
80f7b909a2eb52584b114f38e3945c6d1560e86d
|
[
"Apache-2.0"
] |
permissive
|
meirwah/st2contrib
|
4470028cf467dfe33ccebe2ebb224c79edc6642e
|
0743c96abc04ccda983303c4bdb744929dc17fd2
|
refs/heads/master
| 2021-01-22T02:13:20.290982
| 2015-08-30T11:39:03
| 2015-08-30T11:39:03
| 38,318,169
| 1
| 2
| null | 2015-06-30T15:45:15
| 2015-06-30T15:45:15
| null |
UTF-8
|
Python
| false
| false
| 5,136
|
py
|
import eventlet
from github import Github
from st2reactor.sensor.base import PollingSensor
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
DATE_FORMAT_STRING = '%Y-%m-%d %H:%M:%S'
class GithubRepositorySensor(PollingSensor):
EVENT_TYPE_WHITELIST = [
'IssuesEvent', # Triggered when an issue is assigned, unassigned, labeled, unlabeled,
# opened, closed, or reopened
'IssueCommentEvent', # Triggered when an issue comment is created
'ForkEvent', # Triggered when a user forks a repository,
'WatchEvent' # Triggered when a user stars a repository
]
def __init__(self, sensor_service, config=None, poll_interval=None):
super(GithubRepositorySensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._trigger_ref = 'github.repository_event'
self._logger = self._sensor_service.get_logger(__name__)
self._client = None
self._repositories = []
self._last_event_ids = {}
def setup(self):
self._client = Github(self._config['token'])
for repository_dict in self._config['repository_sensor']['repositories']:
user = self._client.get_user(repository_dict['user'])
repository = user.get_repo(repository_dict['name'])
self._repositories.append((repository_dict['name'], repository))
def poll(self):
for repository_name, repository_obj in self._repositories:
self._logger.debug('Processing repository "%s"' %
(repository_name))
self._process_repository(name=repository_name,
repository=repository_obj)
def _process_repository(self, name, repository):
"""
Retrieve events for the provided repository and dispatch triggers for
new events.
:param name: Repository name.
:type name: ``str``
:param repository: Repository object.
:type repository: :class:`Repository`
"""
assert(isinstance(name, basestring))
count = self._config['repository_sensor']['count']
events = repository.get_events()[:count]
events = list(reversed(list(events)))
last_event_id = self._get_last_id(name=name)
for event in events:
if last_event_id and int(event.id) <= int(last_event_id):
# This event has already been processed
continue
self._handle_event(repository=name, event=event)
if events:
self._set_last_id(name=name, last_id=events[-1].id)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _get_last_id(self, name):
"""
:param name: Repository name.
:type name: ``str``
"""
if not self._last_event_ids.get(name, None) and hasattr(self._sensor_service, 'get_value'):
key_name = 'last_id.%s' % (name)
self._last_event_ids[name] = self._sensor_service.get_value(name=key_name)
return self._last_event_ids.get(name, None)
def _set_last_id(self, name, last_id):
"""
:param name: Repository name.
:type name: ``str``
"""
self._last_event_ids[name] = last_id
if hasattr(self._sensor_service, 'set_value'):
key_name = 'last_id.%s' % (name)
self._sensor_service.set_value(name=key_name, value=last_id)
def _handle_event(self, repository, event):
if event.type not in self.EVENT_TYPE_WHITELIST:
self._logger.debug('Skipping ignored event (type=%s)' % (event.type))
return
self._dispatch_trigger_for_event(repository=repository, event=event)
def _dispatch_trigger_for_event(self, repository, event):
trigger = self._trigger_ref
created_at = event.created_at
if created_at:
created_at = created_at.strftime(DATE_FORMAT_STRING)
# Common attributes
payload = {
'repository': repository,
'id': event.id,
'created_at': created_at,
'type': event.type,
'actor': {
'id': event.actor.id,
'login': event.actor.login,
'name': event.actor.name,
'email': event.actor.email,
'loaction': event.actor.location,
'bio': event.actor.bio,
'url': event.actor.html_url
},
'payload': {}
}
event_specific_payload = self._get_payload_for_event(event=event)
payload['payload'] = event_specific_payload
self._sensor_service.dispatch(trigger=trigger, payload=payload)
def _get_payload_for_event(self, event):
payload = event.payload or {}
return payload
|
[
"tomaz@tomaz.me"
] |
tomaz@tomaz.me
|
0cb32b495378935ca0dc58cd114e1c9f37142914
|
0c14e45bd96dcbdd344e038705822ffce90aba4e
|
/application.py
|
3bdd304a07cf828f4ba8632fc5a73c5751c60864
|
[] |
no_license
|
qiwsir/json-diff
|
624b18833832edaab62180a1bdf0c10c19f9fefc
|
bb5ae3798fcf24c0e3d20ba7cb6bd63dce217620
|
refs/heads/master
| 2021-01-16T21:08:39.725121
| 2014-11-13T15:25:11
| 2014-11-13T15:25:11
| 26,757,081
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,526
|
py
|
#! /usr/bin/env python
#coding:utf-8
from method import diffmethod
from dboption.mongodb import *
import json
import datetime
import time
import json_tools
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
def diff():
"""
find the difference between the two JSONs.
"""
gene_last = [ ele["_id"] for ele in lastdb.find({},{'_id':1}) ]
gene_new = [ ele["_id"] for ele in newdb.find({},{'_id':1}) ]
geneid = diffmethod.OptLst(gene_last, gene_new)
add_gene = geneid.addLst() #the list consisting of the IDs in the new collection different from the IDs in the old one
shared_gene = geneid.shareLst() #the list consisting of the IDs in the new collection same as the IDs in the old one
deleted_gene = geneid.deleLst() #the list consisting of the IDs in the old collection but not in the new collection
#insert the new values into the database.
if add_gene:
for i in add_gene:
one_gene = newdb.find_one({"_id":i})
db_change.insert({"gene_id":i,"changes":[{"stat":"new_gene","value":one_gene}],"lastdb":last_date,"newdb":new_date})
#store the deleted IDs
if deleted_gene:
for i in deleted_gene:
one_gene = lastdb.find_one({"_id":i})
db_change.insert({"gene_id":i,"changes":[{"stat":"delete"}],"lastdb":last_date,"newdb":new_date})
#store the records in which the values have been changed
if shared_gene:
diff_gene = [i for i in shared_gene if cmp(lastdb.find_one({"_id":i},{"_id":0}),newdb.find_one({"_id":i},{"_id":0}))] #the list of the IDs of the changed records
print "diff_gene_list:",len(diff_gene)
if diff_gene:
for i in diff_gene:
last_content = lastdb.find_one({"_id":i},{"_id":0})
new_content = newdb.find_one({"_id":i},{"_id":0})
diff = diffmethod.DiffJson(last_content, new_content)
diff_lst = diff.diffDict()
changes_value = diff.changesValue(diff_lst)
db_change.insert({"gene_id":i, "changes":changes_value, "lastdb":last_date, "newdb":new_date })
def main():
print ">>>Hi, I am Qiwei. Welcome to my website: www.itdiffer.com<<<"
print "I am working like a horse. You may have a rest and I will send you the result after a while."
diff()
print "ok."
if __name__=="__main__":
start = time.clock()
main()
print "The time I have spent is:"
print (time.clock() - start)
|
[
"qiwsir@gmail.com"
] |
qiwsir@gmail.com
|
22cce749bf9d8f333797bf9494f8885489e03119
|
fa89836a6759151896a07650747462b8cda40610
|
/mse/mapdata/migrations/0008_auto_20180223_2200.py
|
b0b78b056bec3467aec86819cb31761fc5f1df5d
|
[] |
no_license
|
DigitalGizmo/mse21
|
334813bfebec9b78f0541744e54f218f9cc6936b
|
89f1c0f9c05cefaaa8c703732ee4e4642aecd3c9
|
refs/heads/master
| 2023-07-09T13:29:13.903900
| 2018-03-26T19:26:09
| 2018-03-26T19:26:09
| 126,878,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-24 03:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0007_auto_20180223_2119'),
]
operations = [
migrations.AlterField(
model_name='voyage',
name='lat_sec',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='voyage',
name='lon_sec',
field=models.FloatField(blank=True, null=True),
),
]
|
[
"donpublic@digitalgizmo.com"
] |
donpublic@digitalgizmo.com
|
c51f89c655b8803a2b1d658b3ed1d38be188103a
|
67f988dedfd8ae049d982d1a8213bb83233d90de
|
/external/chromium/build/android/run_monkey_test.py
|
433b2bdd5f81c748e3497f34f25e25487ffdb2b1
|
[
"BSD-3-Clause"
] |
permissive
|
opensourceyouthprogramming/h5vcc
|
94a668a9384cc3096a365396b5e4d1d3e02aacc4
|
d55d074539ba4555e69e9b9a41e5deb9b9d26c5b
|
refs/heads/master
| 2020-04-20T04:57:47.419922
| 2019-02-12T00:56:14
| 2019-02-12T00:56:14
| 168,643,719
| 1
| 1
| null | 2019-02-12T00:49:49
| 2019-02-01T04:47:32
|
C++
|
UTF-8
|
Python
| false
| false
| 5,697
|
py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs the Monkey tests on one or more devices."""
import logging
import optparse
import random
import sys
import time
from pylib import android_commands
from pylib import python_test_base
from pylib import python_test_sharder
from pylib import test_options_parser
from pylib import test_result
class MonkeyTest(python_test_base.PythonTestBase):
def testMonkey(self):
start_ms = int(time.time()) * 1000
# Launch and wait for Chrome to launch.
self.adb.StartActivity(self.options.package_name,
self.options.activity_name,
wait_for_completion=True,
action='android.intent.action.MAIN',
force_stop=True)
# Chrome crashes are not always caught by Monkey test runner.
# Verify Chrome has the same PID before and after the test.
before_pids = self.adb.ExtractPid(self.options.package_name)
# Run the test.
output = ''
duration_ms = 0
if before_pids:
output = '\n'.join(self._LaunchMonkeyTest())
duration_ms = int(time.time()) * 1000 - start_ms
after_pids = self.adb.ExtractPid(self.options.package_name)
crashed = (not before_pids or not after_pids
or after_pids[0] != before_pids[0])
result = test_result.SingleTestResult(self.qualified_name, start_ms,
duration_ms, log=output)
results = test_result.TestResults()
if 'Monkey finished' in output and not crashed:
results.ok = [result]
else:
results.crashed = [result]
return results
def _LaunchMonkeyTest(self):
"""Runs monkey test for a given package.
Looks at the following parameters in the options object provided
in class initializer:
package_name: Allowed package.
category: A list of allowed categories.
throttle: Delay between events (ms).
seed: Seed value for pseduo-random generator. Same seed value
generates the same sequence of events. Seed is randomized by
default.
event_count: Number of events to generate.
verbosity: Verbosity level [0-3].
extra_args: A string of other args to pass to the command verbatim.
"""
category = self.options.category or []
seed = self.options.seed or random.randint(1, 100)
throttle = self.options.throttle or 100
event_count = self.options.event_count or 10000
verbosity = self.options.verbosity or 1
extra_args = self.options.extra_args or ''
timeout_ms = event_count * throttle * 1.5
cmd = ['monkey',
'-p %s' % self.options.package_name,
' '.join(['-c %s' % c for c in category]),
'--throttle %d' % throttle,
'-s %d' % seed,
'-v ' * verbosity,
'--monitor-native-crashes',
'--kill-process-after-error',
extra_args,
'%d' % event_count]
return self.adb.RunShellCommand(' '.join(cmd), timeout_time=timeout_ms)
def DispatchPythonTests(options):
"""Dispatches the Monkey tests, sharding it if there multiple devices."""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
available_tests = [MonkeyTest('testMonkey')]
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
raise Exception('You have no devices attached or visible!')
# Actually run the tests.
logging.debug('Running monkey tests.')
available_tests *= len(attached_devices)
options.ensure_value('shard_retries', 1)
sharder = python_test_sharder.PythonTestSharder(
attached_devices, available_tests, options)
result = sharder.RunShardedTests()
result.LogFull('Monkey', 'Monkey', options.build_type, available_tests)
result.PrintAnnotation()
def main():
desc = 'Run the Monkey tests on 1 or more devices.'
parser = optparse.OptionParser(description=desc)
test_options_parser.AddBuildTypeOption(parser)
parser.add_option('--package-name', help='Allowed package.')
parser.add_option('--activity-name',
default='com.google.android.apps.chrome.Main',
help='Name of the activity to start [default: %default].')
parser.add_option('--category',
help='A list of allowed categories [default: ""].')
parser.add_option('--throttle', default=100, type='int',
help='Delay between events (ms) [default: %default]. ')
parser.add_option('--seed', type='int',
help=('Seed value for pseduo-random generator. Same seed '
'value generates the same sequence of events. Seed '
'is randomized by default.'))
parser.add_option('--event-count', default=10000, type='int',
help='Number of events to generate [default: %default].')
parser.add_option('--verbosity', default=1, type='int',
help='Verbosity level [0-3] [default: %default].')
parser.add_option('--extra-args', default='',
help=('String of other args to pass to the command verbatim'
' [default: "%default"].'))
(options, args) = parser.parse_args()
if args:
parser.print_help(sys.stderr)
parser.error('Unknown arguments: %s' % args)
if not options.package_name:
parser.print_help(sys.stderr)
parser.error('Missing package name')
if options.category:
options.category = options.category.split(',')
DispatchPythonTests(options)
if __name__ == '__main__':
main()
|
[
"rjogrady@google.com"
] |
rjogrady@google.com
|
fd6ab081d187f04abb302d2404e52c16f876fa11
|
048cda95057e9852b7f1cebbab864ea10e3fc0db
|
/crawler/v1/yehey.py
|
d8db31fceb0c5421bbcdb9b8fa70b42b9a447127
|
[] |
no_license
|
AMAtreus/dg_crawler_website
|
fa9e587cf07549c0752bb88a8f61b1057496da26
|
1bcb03a48aff1ebca4e04a5c060be299ca9881d4
|
refs/heads/master
| 2023-08-17T09:56:51.379743
| 2021-10-13T13:50:23
| 2021-10-13T13:50:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,540
|
py
|
from crawler.spiders import BaseSpider
import requests
# 此文件包含的头文件不要修改
import scrapy
from utils.util_old import *
from crawler.items import *
from bs4 import BeautifulSoup
from scrapy.http import Request, Response
import re
import time
import json
class yehey(BaseSpider):# cyl
name = 'yehey'
# allowed_domains = ['https://yehey.com/']
start_urls = ['https://yehey.com/']
website_id = 1225 # 网站的id(必填)
language_id = 1866 # 所用语言的id
sql = { # sql配置
'host': '192.168.235.162',
'user': 'dg_admin',
'password': 'dg_admin',
'db': 'dg_crawler'
}
def parse(self, response):
meta = {}
meta['category2'] = ''
meta['url_cat'] = ''
meta['page_number'] = ''
html = BeautifulSoup(response.text, "html.parser")
cat1 = html.select_one("li#menu-item-5581 a").text
meta['category1'] = cat1 # 获取一级目录
cat1_url = html.select_one("li#menu-item-5581 a")['href']
yield scrapy.Request(cat1_url, meta=meta, callback=self.parse_category2)
def parse_category2(self, response):
html = BeautifulSoup(response.text, "html.parser")
cat2s = html.select("li#menu-item-5581>ul.sub-menu>li")
for c in cat2s:
cat2_url = c.select_one("a")['href']
cat2 = c.select_one("a").text
response.meta['category2'] = cat2 # 获取二级目录
yield scrapy.Request(cat2_url, meta=response.meta, callback=self.parse_category3)
def parse_category3(self, response):
html = BeautifulSoup(response.text, "html.parser")
detail_list = html.select("main#main>article")
for d in detail_list:
detail_url = d.select_one("h2.entry-title.th-text-md.th-mb-0 a")['href'] # 获取静态加载的url
yield scrapy.Request(detail_url, meta=response.meta, callback=self.parse_detail) # 处理静态的数据
url = response.url
ex2 = '.*?category/(.*?)/'
url_cat = re.findall(ex2, url, re.S)[0]
response.meta['url_cat'] = url_cat
page_number = 3
response.meta['page_number'] = page_number
request_url = 'https://yehey.com/?infinity=scrolling'
page_text = response.text
ex = '<script type="text/javascript">.*?currentday%22%3A%22(.*?)%22%2C%22'
currentday = re.findall(ex, page_text, re.S)[0]
data = {
'page': '2',
'currentday': currentday,
'query_args[category_name]': url_cat
}
yield scrapy.FormRequest.from_response(response, url=request_url, formdata=data, method='POST',
meta=response.meta, callback=self.parse_category4)
def parse_category4(self, response):
request_url = 'https://yehey.com/?infinity=scrolling'
url_cat = response.meta['url_cat']
page_number = response.meta['page_number']
dic = {'type': 'empty'}
if json.loads(response.body) == dic:
pass
else:
if 'currentday' in json.loads(response.body).keys():
currentday = json.loads(response.body)['currentday']
data = {
'page': str(page_number),
'currentday': currentday,
'query_args[category_name]': url_cat
}
if 'postflair' in json.loads(response.body).keys():
details = json.loads(response.body)['postflair'].keys()
for i in details:
yield scrapy.Request(i, meta=response.meta, callback=self.parse_detail)
if 'html' in json.loads(response.body).keys():
html = json.loads(response.body)['html']
html = BeautifulSoup(html, "html.parser")
ddl = html.select("article time")[0]['datetime']
ddl = re.split('T|\+', ddl) # ['2021-01-30', '23:00:00', '08:00']
ddl = ddl[0] + ' ' + ddl[1] # 2021-01-30 23:00:00
ddl = Util.format_time3(ddl)
else:
ddl = None
if (self.time == None or ddl >= int(self.time)):
response.meta['page_number'] = response.meta['page_number'] + 1
yield scrapy.FormRequest(url=request_url, formdata=data, method='POST',
meta=response.meta, callback=self.parse_category4)
else:
self.logger.info('时间截止')
pass
def parse_detail(self,response):
item = NewsItem()
html = BeautifulSoup(response.text, 'html.parser')
item['category1'] = response.meta['category1']
item['category2'] = response.meta['category2']
if html.find('h1', class_='entry-title th-mb-0 sm:th-text-8xl th-text-4xl').text.strip(): # 获取标题
item['title'] = html.find('h1', class_='entry-title th-mb-0 sm:th-text-8xl th-text-4xl').text.strip()
item['body'] = '' # 获取正文内容
if html.select("div.entry-content.th-content p"):
bodies = html.select("div.entry-content.th-content p")
item['abstract'] = bodies[0].text # 获取摘要
# for b in bodies:
# item['body'] += b.text.strip()
# item['body'] += "\n"
b_list = [b.text.strip() for b in bodies]
item['body'] = '\n'.join(b_list)
item['images'] = [] # 获取图片链接
if html.select_one("header#primary-header img") is not None: # 获取单独在标题的图片
image_one = html.select_one("header#primary-header img")['src']
item['images'].append(image_one)
if html.select("div.entry-content.th-content a>img"): # 获取在段落中的图片
imgaes = html.select("div.entry-content.th-content a>img")
for i in imgaes:
item['images'].append(i['src'])
if html.select_one("time.entry-date.published") is not None: # 获取发布时间
pub = html.select_one("time.entry-date.published")['datetime']
pub_time = re.split('T|\+', pub) # datetime="2021-01-30T23:00:00+08:00"
pubtime = pub_time[0] + ' ' + pub_time[1] # ['2021-01-30', '23:00:00', '08:00']
item['pub_time'] = pubtime # 2021-01-30 23:00:00
yield item
|
[
"2782299413@qq.com"
] |
2782299413@qq.com
|
71b991b29176ceeb40dd18fb108913132eac9b9c
|
de3b77cb0927f28cbd85e9142c2dfd7c8be7c27e
|
/tests/migrations/028_log_mod_onboarding_msgs_up.py
|
2d6e3dc23a0f321e5d54cd071e2830380b16a154
|
[
"MIT"
] |
permissive
|
LoansBot/database
|
f3dcbccde59fdb80c876d2612f250662946588e6
|
eeaed26c2dcfdf0f9637b47ebe15cd1e000d8cc4
|
refs/heads/master
| 2021-07-02T22:07:18.683278
| 2021-06-02T04:09:38
| 2021-06-02T04:09:38
| 239,400,935
| 0
| 1
|
MIT
| 2021-06-02T04:14:31
| 2020-02-10T01:06:53
|
Python
|
UTF-8
|
Python
| false
| false
| 850
|
py
|
import unittest
import helper
class UpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = helper.setup_connection()
cls.cursor = cls.connection.cursor()
@classmethod
def tearDownClass(cls):
cls.cursor.close()
cls.connection.rollback()
helper.teardown_connection(cls.connection)
def tearDown(self):
self.connection.rollback()
def test_mod_onboarding_msg_history_exists(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'mod_onboarding_msg_history')
)
def test_mod_onboarding_msg_history_has_pkey(self):
self.assertTrue(
helper.check_if_pkey_exists(self.cursor, 'mod_onboarding_msg_history')
)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
LoansBot.noreply@github.com
|
61459c90cd48b2ee3302ca8357924844cce4aef0
|
7fc26de436ad958fc02e11fc7f7486f9ac775d0b
|
/services/url_lookup/project/api/url.py
|
533675f407a56387237c088a49ac2be9f518316b
|
[] |
no_license
|
chenjienan/url_lookup_service
|
633071d78598b2ee248b6a6fc3ceee2bf4ccca9b
|
ef10d58450af97221697ac0fa26cfb9e5a43415e
|
refs/heads/master
| 2023-05-12T00:09:36.278356
| 2019-08-06T16:45:05
| 2019-08-06T16:45:05
| 199,910,038
| 0
| 0
| null | 2023-05-01T21:14:08
| 2019-07-31T18:36:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
from flask import Blueprint, request, jsonify
from flask_restful import Resource, Api
from sqlalchemy import exc
from project import db
from project.api.models import Url
from urllib.parse import unquote
import tldextract
url_blueprint = Blueprint('url', __name__)
api = Api(url_blueprint)
class UrlList(Resource):
def get(self):
""" Get all urls """
response_obj = {
'status': 'success',
'data': {
'urls': [url.to_json() for url in Url.query.all()]
}
}
return response_obj, 200
def post(self):
""" add url to the system """
post_data = request.get_json()
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
if not post_data:
return response_object, 400
url = post_data.get('url')
try:
get_url = Url.query.filter_by(url=url).first()
if not get_url:
db.session.add(Url(url=url))
db.session.commit()
response_object['status'] = 'success'
response_object['message'] = f'{url} was added!'
return response_object, 201
else:
response_object['message'] = 'That url already exists.'
return response_object, 400
except exc.IntegrityError:
db.session.rollback()
return response_object, 400
class UrlInfo(Resource):
def get(self, input_url=None):
""" Get url details """
url = unquote(input_url)
# post-process for domain/host extraction
ext = tldextract.extract(url)
host = '.'.join(part for part in ext if part)
response_obj = {
'status': 'fail',
'url': input_url,
'host': host,
'isMalware': None
}
try:
cur_url = Url.query.filter_by(url=host).first()
response_obj['status'] = 'success'
if not cur_url:
response_obj['isMalware'] = 'false'
return response_obj, 200
elif cur_url and not cur_url.active:
response_obj['isMalware'] = 'false'
return response_obj, 200
response_obj['isMalware'] = 'true'
return response_obj, 200
except ValueError:
return response_obj, 404
class UrlPing(Resource):
def get(self):
return {
'status': 'success',
'message': 'pong!'
}
api.add_resource(UrlPing, '/ping')
api.add_resource(UrlList, '/urls')
api.add_resource(UrlInfo, '/urlinfo/<path:input_url>')
|
[
"chenjienan2009@gmail.com"
] |
chenjienan2009@gmail.com
|
079e09457902766095c8d29b7e0de221c64610d5
|
48661992ea5d378437aa245bc2469d9677678fbc
|
/changecsv.py
|
d358fb5cd0d7309fed5ab796bf09c38300e23a08
|
[] |
no_license
|
sakurasakura1996/kaggle
|
a4f259ff0a89395d0801cfcd1215d2794598dcce
|
d159ccaebcc4fcd3013f746d6f280b4914ad9945
|
refs/heads/master
| 2020-09-11T22:51:34.604108
| 2019-12-08T06:51:45
| 2019-12-08T06:51:45
| 222,216,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
# 修改一下sub.csv的输出 输出序号从1开始,然后label是整数
import csv
import numpy as np
train_data = []
openfile = open('./sub.csv', 'r')
read = csv.reader(openfile)
for line in read:
train_data.append(line)
# print(line)
train_data = np.array(train_data)
|
[
"2470375551@qq.com"
] |
2470375551@qq.com
|
7acf6789b67228e00bf1be4e74c42b43dcefa3e8
|
a9a10382236404d65cd7909adf12bf41b6a59085
|
/django_and_full_stack/multi_madlib/web_madlib/web_madlib/views.py
|
de5b1f7511c5e47a2d346ade9d3fcb4dd9a746ed
|
[] |
no_license
|
shedwyn/codeguild
|
9718038613d082b3a21f2b4ba56cf6cbb282606a
|
db792b1a13b32d6b135398b21f6a5124a75c639b
|
refs/heads/master
| 2020-04-06T14:36:03.617029
| 2016-09-20T19:48:38
| 2016-09-20T19:48:38
| 55,080,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,006
|
py
|
"""
Python Coding Bootcamp (pdxcodeguild)
Code File for web_madlib/views.py
by: Erin Fough and Matthew James K on 5/9/2016
"""
from django.http import HttpResponse
def get_httpresponse_to_request(request=[], sentence=''):
"""
This function accepts the sentence for each numbered request, and the request object, and gives back the HttpResponse
:param 1: request is the [] Array of the current incommng http request post parameters from the client
:param 2: sentence is the main body of the madlib string as a String
:returns: the HttpResponse for each of the ^1 url incomming requests.
"""
response_positive = "200 OK"
response_negative = "400 BAD REQUEST "
if request[0] is None or request[0] is '':
response = HttpResponse(status = 400)
response.write(response_negative + 'noun1 was not provided or blank')
return response
elif request[1] is None or request[1] is '':
response = HttpResponse(status = 400)
response.write(response_negative + 'noun2 was not provided or blank')
return response
elif request[2] is None or request[2] is '':
response = HttpResponse(status = 400)
response.write(response_negative + 'noun3 was not provided or blank')
return response
else:
response = HttpResponse(status = 200)
response.write(response_positive + ' ' + sentence)
return response
def render_madlib_1(request):
post_parameters = []
post_parameters.append(request.GET['noun1'] if 'noun1' in request.GET else None) #The key value must be accessible
post_parameters.append(request.GET['noun2'] if 'noun2' in request.GET else None) #If the in expression does not work, use request.GET.get() then check for None
post_parameters.append(request.GET['noun3'] if 'noun3' in request.GET else None)
sentence = "{} hugged {} but not {}".format(post_parameters[0], post_parameters[1], post_parameters[2])
return get_httpresponse_to_request(post_parameters, sentence)
def render_madlib_2(request):
post_parameters = []
post_parameters.append(request.GET['noun1'] if 'noun1' in request.GET else None)
post_parameters.append(request.GET['noun2'] if 'noun2' in request.GET else None)
post_parameters.append(request.GET['noun3'] if 'noun3' in request.GET else None)
sentence = "{} troubled {} but not {}".format(post_parameters[0], post_parameters[1], post_parameters[2])
return get_httpresponse_to_request(post_parameters, sentence)
def render_madlib_3(request):
post_parameters = []
post_parameters.append(request.GET['noun1'] if 'noun1' in request.GET else None)
post_parameters.append(request.GET['noun2'] if 'noun2' in request.GET else None)
post_parameters.append(request.GET['noun3'] if 'noun3' in request.GET else None)
sentence = "{} ran for {} but not {}".format(post_parameters[0], post_parameters[1], post_parameters[2])
return get_httpresponse_to_request(post_parameters, sentence)
|
[
"shedwyn@gmail.com"
] |
shedwyn@gmail.com
|
5ef24cba12de0d3932574c98055682b47fb59215
|
faca44e8424959ecd04098ccf936e6f5f80c8465
|
/lessons/103-datetime.py
|
d4e481bf984cf1eaf889fa304cda67ee528e858a
|
[] |
no_license
|
craymaru/python_practice
|
01b0153631d83b2566e31a54346110f632412703
|
7616cbb945f432aa80d43408631b59afb90bf0f5
|
refs/heads/master
| 2021-05-20T10:12:07.245941
| 2020-05-19T14:33:14
| 2020-05-19T14:33:14
| 252,242,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
import datetime
print("datetime:")
now = datetime.datetime.now()
print(now)
print(now.isoformat())
print(now.strftime("%d/%m/%y-%H%M%S%f"))
today = datetime.date.today()
print(today)
print(today.isoformat())
print(today.strftime("%d/%m/%y"))
t = datetime.time(hour=1, minute=10, second=5, microsecond=100)
print(t)
print(t.isoformat())
print(t.strftime("%H_%M_%S_%f"))
print(now)
# d = datetime.timedelta(weeks=1)
d = datetime.timedelta(days=365)
# d = datetime.timedelta(hours=1)
# d = datetime.timedelta(minutes=1)
# d = datetime.timedelta(second=1)
# d = datetime.timedelta(microseconds=1)
print(now - d)
import time
print("time:")
time.sleep(1)
print(time.time())
import os
import shutil
file_name = "test.txt"
if os.path.exists(file_name):
shutil.copy(file_name, "{}.{}".format(
file_name, now.strftime("%Y_%m_%d_%H_%M_%S")
))
with open(file_name, "w") as f:
f.write("test")
|
[
"craymaru@gmail.com"
] |
craymaru@gmail.com
|
3f0729620abccdeb6d4a9f3848fc18bcc0de6521
|
0103046cd77e9f86ccde477736de36bba766ceb6
|
/src/sentry/migrations/0143_add_alerts_integrationfeature.py
|
8ebf10baaca631b26cd1f6b68dae5ac7c842b3e8
|
[
"BUSL-1.1",
"Apache-2.0"
] |
permissive
|
kaozdl/sentry
|
ad41ada649a20300e9f2fe69050200cfbf738a63
|
63d698f5294f64a8c206b4c741e2a11be1f9a9be
|
refs/heads/master
| 2021-06-21T18:24:21.713064
| 2021-03-04T19:45:20
| 2021-03-04T19:45:20
| 198,681,569
| 0
| 0
|
BSD-3-Clause
| 2019-07-24T17:32:29
| 2019-07-24T17:32:28
| null |
UTF-8
|
Python
| false
| false
| 2,008
|
py
|
# Generated by Django 1.11.29 on 2020-12-10 23:55
from django.db import migrations
import sentry.db.models.fields.bounded
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = True
dependencies = [
("sentry", "0142_add_dashboard_tombstone"),
]
operations = [
migrations.AlterField(
model_name="integrationfeature",
name="feature",
field=sentry.db.models.fields.bounded.BoundedPositiveIntegerField(
choices=[
(0, "integrations-api"),
(1, "integrations-issue-link"),
(2, "integrations-stacktrace-link"),
(3, "integrations-event-hooks"),
(4, "integrations-project-management"),
(5, "integrations-incident-management"),
(6, "integrations-feature-flag"),
(7, "integrations-alerts"),
],
default=0,
),
),
]
|
[
"noreply@github.com"
] |
kaozdl.noreply@github.com
|
72bd965eb35957d66906599b19f40556c0cd940b
|
06e34e2dface0b87fa785cab7e65422a5f20ba18
|
/Solutions/1822.Sign of the Product of an Array/python.py
|
efc08c79677082b17cc8158628db075c809fd996
|
[] |
no_license
|
JerryHu1994/LeetCode-Practice
|
c9841b0ce70451c19c8a429a3898c05b6233e1d4
|
b0ce69985c51a9a794397cd98a996fca0e91d7d1
|
refs/heads/master
| 2022-02-10T04:42:28.033364
| 2022-01-02T04:44:22
| 2022-01-02T04:44:22
| 117,118,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
class Solution:
def arraySign(self, nums: List[int]) -> int:
ans = 1
for n in nums:
if n < 0:
ans *= -1
elif n == 0:
return 0
return ans
|
[
"hjr01211@gmail.com"
] |
hjr01211@gmail.com
|
bba941cbd7a031aba596f35b0cd5f58040fc39ac
|
9d35f59c102236581e010e24ae39dd4a1c876aca
|
/fbauth/managers.py
|
f818d969d504b5fb08a0ddfbddc0c210e9ea6709
|
[
"MIT"
] |
permissive
|
Tuss4/django-fb-oauth
|
195d30f19b7fa3b43cf1b337c2178fffb1c4a7ef
|
9098ab3ea77e7695affd5c793c35d05ae61b1cdb
|
refs/heads/master
| 2021-07-17T17:30:09.435330
| 2016-02-07T08:02:45
| 2016-02-07T08:02:45
| 51,091,583
| 2
| 0
|
MIT
| 2021-06-10T18:27:59
| 2016-02-04T17:10:42
|
Python
|
UTF-8
|
Python
| false
| false
| 408
|
py
|
from django.db import models
from django.contrib.auth import get_user_model
class FBManager(models.Manager):
"""Manager method to create a Facebook User"""
def create_fb_user(self, fb_id, token, **kwargs):
user = get_user_model().objects.create_user(**kwargs)
fbt = self.model(user=user, facebook_id=fb_id, access_token=token)
fbt.save(using=self._db)
return user
|
[
"tuss4dbfn@gmail.com"
] |
tuss4dbfn@gmail.com
|
3b7113ec2fe7f30d44c4a0e4f1c9e0a04ee4f474
|
360e1f69f4c0923c5d79bc82aa33c0fd4e80b71e
|
/RECURSION/subsets.py
|
f334469a9f4ddc3cb74e3afb1d219f81e37a0ac4
|
[] |
no_license
|
Vijay1234-coder/data_structure_plmsolving
|
04e52fe6c918313e13d39107a2ded8b47645bb12
|
d449b266295d1ae55613cdcfd9b22ad9cee3dfbe
|
refs/heads/master
| 2023-08-01T00:55:28.825972
| 2021-09-12T15:20:12
| 2021-09-12T15:20:12
| 387,782,783
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
def subSets(arr,index,sub):
if index==len(arr):
if len(sub)!=0:
print(sub)
else:
subSets(arr,index+1,sub)
subSets(arr,index+1,sub+[arr[index]])
return sub
arr = [1,2,3]
n = len(arr)
subSets(arr,0,[])
|
[
"77201164+Vijay1234-coder@users.noreply.github.com"
] |
77201164+Vijay1234-coder@users.noreply.github.com
|
016c0b20397a06625c09871ea2375ffd3f6a0c97
|
dfe2a52a1c36a28a8bf85af7efd42380d980b773
|
/virtual/lib/python3.6/site-packages/registration/migrations/0001_initial.py
|
5c56041a6ca8b00eff3ae7668f2cfc0281c5d363
|
[
"MIT"
] |
permissive
|
virginiah894/Instagram-clone
|
2c2a15d89fcdb25b22bd60428cf84a01f3bd553c
|
4d8abe7bafefae06a0e462e6a47631c2f8a1d361
|
refs/heads/master
| 2022-12-10T06:56:21.105357
| 2020-01-07T14:14:50
| 2020-01-07T14:14:50
| 229,394,540
| 3
| 0
|
MIT
| 2022-12-08T03:23:40
| 2019-12-21T07:41:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,159
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='RegistrationProfile',
fields=[
('id',
models.AutoField(
primary_key=True,
serialize=False,
auto_created=True,
verbose_name='ID')),
('activation_key',
models.CharField(
verbose_name='activation key',
max_length=40)),
('user',
models.OneToOneField(
to=settings.AUTH_USER_MODEL,
verbose_name='user',
on_delete=models.CASCADE)),
],
options={
'verbose_name': 'registration profile',
'verbose_name_plural': 'registration profiles',
},
),
]
|
[
"virgyperry@gmail.com"
] |
virgyperry@gmail.com
|
e89bf19c8f53855bfc3e1277c31a3391c13eeba9
|
33621e000244ef274de9f4da3f1afc83a130414f
|
/tests/test_version.py
|
231a11632b4ac9665f625fd51b51dbfbda2a92cb
|
[
"MIT"
] |
permissive
|
pombredanne/i18nspector
|
8901b7dd39b2a9e662edd0870906bbe683ba1960
|
abf543ccad9034e6278af0ba1bb6a384193c799b
|
refs/heads/master
| 2021-01-19T20:09:06.069438
| 2017-07-04T08:26:38
| 2017-07-04T08:26:38
| 83,740,280
| 0
| 0
| null | 2017-07-04T08:26:39
| 2017-03-03T00:50:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
# Copyright © 2012-2013 Jakub Wilk <jwilk@jwilk.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from nose.tools import (
assert_equal,
)
from lib.cli import __version__
here = os.path.dirname(__file__)
docdir = os.path.join(here, os.pardir, 'doc')
def test_changelog():
path = os.path.join(docdir, 'changelog')
with open(path, 'rt', encoding='UTF-8') as file:
line = file.readline()
changelog_version = line.split()[1].strip('()')
assert_equal(changelog_version, __version__)
def test_manpage():
path = os.path.join(docdir, 'i18nspector.txt')
manpage_version = None
with open(path, 'rt', encoding='UTF-8') as file:
for line in file:
if line.startswith(':version:'):
manpage_version = line.split()[-1]
break
assert_equal(manpage_version, __version__)
# vim:ts=4 sts=4 sw=4 et
|
[
"jwilk@jwilk.net"
] |
jwilk@jwilk.net
|
8e281a5b517d5197c5e02452a34be7e31c26bc61
|
5191423bc86a4e56845c737b4ce6853f3faef90e
|
/pytautulli/const.py
|
705a83cdab7eff5546a0fac20c880319ed7c337e
|
[
"MIT"
] |
permissive
|
ludeeus/pytautulli
|
075d354d03a50dab0ffb7d9425bb4015c1ff443d
|
0cf5b826c2033882b582287bbd0056d81a30065f
|
refs/heads/main
| 2023-04-09T15:51:02.346720
| 2023-02-04T14:35:39
| 2023-02-04T14:35:39
| 141,796,012
| 2
| 5
|
MIT
| 2023-03-29T06:59:02
| 2018-07-21T08:46:33
|
Python
|
UTF-8
|
Python
| false
| false
| 374
|
py
|
"""Pytautulli constants."""
from enum import Enum
from logging import Logger, getLogger
API_HEADERS = {"Content-Type": "application/json"}
LOGGER: Logger = getLogger(__package__)
ATTR_RESPONSE = "response"
ATTR_DATA = "data"
class HTTPMethod(Enum):
"""HTTPMethod Enum."""
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
|
[
"noreply@github.com"
] |
ludeeus.noreply@github.com
|
945cbd4408c837958ac33a2794a1fa4bf98b2e3e
|
0a3c85ca1388a6e9935509a0488f4027f40986f8
|
/tests/issues/test_477.py
|
db9dd56244047b7eaa042b6be812e4e239298667
|
[
"Apache-2.0"
] |
permissive
|
langitem/hgvs
|
cbf4c9f22f4e8bd0523a8948e63b3bc95599c7ff
|
0dac443b9dc565c7fdca5a4b8de40b3fea7624f4
|
refs/heads/master
| 2020-04-18T00:37:30.157853
| 2019-02-25T19:24:59
| 2019-02-25T19:24:59
| 167,084,812
| 0
| 0
|
Apache-2.0
| 2019-02-16T01:57:14
| 2019-01-22T23:42:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
import pytest
from hgvs.exceptions import HGVSInvalidIntervalError
tests = (
# {"c": "", "g": "", "rs": "" },
# GPHB5, GRCh37 https://www.ncbi.nlm.nih.gov/gene/122876
{"c": "NM_145171.3:c.-63A>G", "g": "NC_000014.8:g.63785599T>C", "rs": "GPHB5/GRCh37/rs1299953722", "ex": HGVSInvalidIntervalError },
{"c": "NM_145171.3:c.-56G>A", "g": "NC_000014.8:g.63785592C>T", "rs": "GPHB5/GRCh37/rs982881702" },
{"c": "NM_145171.3:c.2T>C", "g": "NC_000014.8:g.63784562A>G", "rs": "GPHB5/GRCh37/rs1221379530" },
{"c": "NM_145171.3:c.388A>G", "g": "NC_000014.8:g.63779647T>C", "rs": "GPHB5/GRCh37/rs1380832691" },
{"c": "NM_145171.3:c.*4C>T", "g": "NC_000014.8:g.63779638G>A", "rs": "GPHB5/GRCh37/rs753041439" },
{"c": "NM_145171.3:c.*84A>G", "g": "NC_000014.8:g.63779558T>C", "rs": "GPHB5/GRCh37/rs1204774077" },
{"c": "NM_145171.3:c.*99G>A", "g": "NC_000014.8:g.63779543C>T", "rs": "GPHB5/GRCh37/rs144659601", "ex": HGVSInvalidIntervalError },
# GPHB5, GRCh37 https://www.ncbi.nlm.nih.gov/gene/122876
{"c": "NM_145171.3:c.-63A>G", "g": "NC_000014.9:g.63318885T>C", "rs": "GPHB5/GRCh38/rs1299953722", "ex": HGVSInvalidIntervalError },
{"c": "NM_145171.3:c.-56G>A", "g": "NC_000014.9:g.63318878C>T", "rs": "GPHB5/GRCh38/rs982881702" },
{"c": "NM_145171.3:c.2T>C", "g": "NC_000014.9:g.63317848A>G", "rs": "GPHB5/GRCh38/rs1221379530" },
{"c": "NM_145171.3:c.388A>G", "g": "NC_000014.9:g.63312933T>C", "rs": "GPHB5/GRCh38/rs1380832691" },
{"c": "NM_145171.3:c.*4C>T", "g": "NC_000014.9:g.63312924G>A", "rs": "GPHB5/GRCh38/rs753041439" },
{"c": "NM_145171.3:c.*84A>G", "g": "NC_000014.9:g.63312844T>C", "rs": "GPHB5/GRCh38/rs1204774077" },
{"c": "NM_145171.3:c.*99G>A", "g": "NC_000014.9:g.63312829C>T", "rs": "GPHB5/GRCh38/rs144659601", "ex": HGVSInvalidIntervalError },
# COX6A2 https://www.ncbi.nlm.nih.gov/gene/1339
{"c": "NM_005205.3:c.-106G>A", "g": "NC_000016.10:g.31428431C>T", "rs": "COX6A2/GRCh38/rs1033792906", "ex": HGVSInvalidIntervalError },
{"c": "NM_005205.3:c.-96C>T", "g": "NC_000016.10:g.31428421G>A", "rs": "COX6A2/GRCh38/rs755670336" },
{"c": "NM_005205.3:c.2T>C", "g": "NC_000016.10:g.31428324A>G", "rs": "COX6A2/GRCh38/rs200780049" },
{"c": "NM_005205.3:c.293G>A", "g": "NC_000016.10:g.31427775C>T", "rs": "COX6A2/GRCh38/rs764753905" },
{"c": "NM_005205.3:c.*3C>T", "g": "NC_000016.10:g.31427771G>A", "rs": "COX6A2/GRCh38/rs909673485" },
{"c": "NM_005205.3:c.*42G>C", "g": "NC_000016.10:g.31427732C>G", "rs": "COX6A2/GRCh38/rs375688325" },
{"c": "NM_005205.3:c.*43A>G", "g": "NC_000016.10:g.31427731T>C", "rs": "COX6A2/GRCh38/rs961248971" },
{"c": "NM_005205.3:c.*44G>A", "g": "NC_000016.10:g.31427730C>T", "rs": "COX6A2/GRCh38/rs756406653", "ex": HGVSInvalidIntervalError },
)
@pytest.mark.parametrize("pair", tests, ids=[p["rs"] for p in tests])
def test_pair(parser, am38, pair):
var_c = parser.parse(pair["c"])
var_g = parser.parse(pair["g"])
if "ex" in pair:
with pytest.raises(pair["ex"]):
var_gtoc = am38.g_to_c(var_g, var_c.ac)
else:
var_gtoc = am38.g_to_c(var_g, var_c.ac)
assert pair["c"] == str(var_gtoc)
|
[
"reecehart@gmail.com"
] |
reecehart@gmail.com
|
3588bec4261cc0eb788f5aa02ad1e5db0f2d19ec
|
db9b3be76bb5502d0b11a23ee829efbe328d68b2
|
/python/decoretors/actual decorator/using_decorator.py
|
c32ca1660ad581167ad793785de47c1c3cd8ed78
|
[] |
no_license
|
NARESHSWAMI199/python
|
2cdf240a8610815b4fb64fcb3e8beeafd3191aab
|
4896345ea2af623c76b06083bcfb38d90f318c88
|
refs/heads/master
| 2020-12-01T16:47:49.974094
| 2019-12-29T04:32:03
| 2019-12-29T04:32:03
| 230,702,606
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
from functools import wraps
def decorator(func):
@wraps(func)
def warpper(*args, **kwargs): # this args take as a tuple
''' this is a warpper function'''
print("this is a awsome function ")
return func( *args,**kwargs)
# also use return here beacuse some function will return something
# work procces of decoreater
# func = decoretor(func) # here the decorder function call the wapperfunction and pass in your func name variale
# when you call func then you are calling wapper function
return warpper
@ decorator
def add(a,b):
'''this is add function'''
return a+b
print(add(2,3))
# if you check the name of your add function and print your functions doc string then you get the
# name is wrapper function and the doc also will wrapper function for solve this problem we imort wraps
print(add.__doc__)
print(add.__name__)
|
[
"swaminaresh993@gmail.com"
] |
swaminaresh993@gmail.com
|
dc6194d9f9bf211787a355e6de94714e28514b5e
|
6b63845777e94a06ebd4c728fee3fb3127d97033
|
/setup.py
|
a32d9928d614164775df821d7305e5b6c3d84670
|
[
"BSD-2-Clause"
] |
permissive
|
mstaniszczak/python-redis-lock
|
86c517a6d8b825ac767c8f6cd06f59519e0bf973
|
5cfa2f48cb06940355fb7776b16742b32c779571
|
refs/heads/master
| 2020-07-26T08:13:31.863248
| 2019-11-18T14:45:35
| 2019-11-18T14:45:35
| 208,586,940
| 0
| 0
|
BSD-2-Clause
| 2019-09-15T11:56:11
| 2019-09-15T11:56:10
| null |
UTF-8
|
Python
| false
| false
| 2,945
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
setup(
name='python-redis-lock',
version='3.3.1',
license='BSD 2-Clause License',
description='Lock context manager implemented via redis SETNX/BLPOP.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Ionel Cristian Mărieș',
author_email='contact@ionelmc.ro',
url='https://github.com/ionelmc/python-redis-lock',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
project_urls={
'Documentation': 'https://python-redis-lock.readthedocs.io/',
'Changelog': 'https://python-redis-lock.readthedocs.io/en/latest/changelog.html',
'Issue Tracker': 'https://github.com/ionelmc/python-redis-lock/issues',
},
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
install_requires=[
'redis>=2.10.0',
],
extras_require={
'django': [
'django-redis>=3.8.0',
]
}
)
|
[
"contact@ionelmc.ro"
] |
contact@ionelmc.ro
|
1934058aa2a961fccfeb79e210fbfa47f3df6f84
|
67cb31c6ac800dd8a3b6f9cfde21bf619871d0de
|
/two-sum-closest.py
|
7523709d919495e37dda24deade2815c905528a7
|
[] |
no_license
|
onestarshang/leetcode_onestar
|
93a5fbafaa49bb7f186eafdee5accc031c8893db
|
2d6f1235b0ce311a0a2e46f157521430f17140e1
|
refs/heads/master
| 2021-01-19T20:30:49.169149
| 2017-06-16T05:12:58
| 2017-06-16T05:12:58
| 88,514,095
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
# coding=utf8
'''
找到两个数字使得他们和最接近target
nums = [-1, 2, 1, -4],target = 4.
最接近值为 1
'''
class Solution:
# @param {int[]} nums an integer array
# @param {int} target an integer
# @return {int} the difference between the sum and the target
def twoSumClosest(self, nums, target):
# Write your code here
import sys
if not nums:
return -1
nums.sort()
diff = sys.maxint
start, end = 0, len(nums) - 1
while start < end:
if nums[start] + nums[end] < target:
diff = min([diff, target - nums[start] - nums[end]])
start += 1
else:
diff = min([diff, nums[start] + nums[end] - target])
end -= 1
return diff
|
[
"onestar1967@gmail.com"
] |
onestar1967@gmail.com
|
dbaee83d67af86e8bc68165648dfd407ad7f3b3f
|
54ab0f79f5d68f4732ca7d205f72ecef99862303
|
/torch/distributed/fsdp/_fsdp_extensions.py
|
1f087f44b573970d1e27e823f8ddbd18f756a8ca
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
csarofeen/pytorch
|
a9dd0f8ffa0642d72df2d5e109a8b4d9c2389cbc
|
e8557ec5e064608577f81e51ccfe7c36c917cb0f
|
refs/heads/devel
| 2023-04-30T02:42:13.558738
| 2023-03-14T00:50:01
| 2023-03-14T00:50:01
| 88,071,101
| 35
| 10
|
NOASSERTION
| 2023-06-21T17:37:30
| 2017-04-12T16:02:31
|
C++
|
UTF-8
|
Python
| false
| false
| 3,174
|
py
|
from abc import ABC, abstractmethod
from typing import Any, List, Optional, Tuple
import torch
import torch.distributed as dist
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
from torch.distributed._shard.sharded_tensor.shard import Shard
from torch.distributed.fsdp._shard_utils import _create_chunk_sharded_tensor
class FSDPExtensions(ABC):
"""
This enables some customizable hooks to enable composability with tensor
parallelism. To activate these hooks, use :func:`_set_fsdp_extensions` to
set a custom :class:`FSDPExtensions` that implements the hooks.
"""
@abstractmethod
def pre_flatten_transform(
self,
tensor: torch.Tensor,
) -> Tuple[torch.Tensor, Optional[Any]]:
"""E.g. converting ``DistributedTensor`` to local tensor."""
...
@abstractmethod
def post_unflatten_transform(
self,
tensor: torch.Tensor,
param_extension: Any,
) -> torch.Tensor:
"""E.g. converting local tensor to ``DistributedTensor``."""
...
@abstractmethod
def chunk_tensor(
self,
tensor: torch.Tensor,
rank: int,
world_size: int,
num_devices_per_node: int,
pg: dist.ProcessGroup,
) -> torch.Tensor:
"""Shards a tensor to chunks and returns the local chunk."""
...
@abstractmethod
def pre_load_state_dict_transform(
self,
tensor: torch.Tensor,
) -> Tuple[torch.Tensor, List[Shard]]:
"""
This is to be called before loading a *sharded* model state dict and
should return the tensor and list of shards from which to load data.
"""
...
_extensions: Optional[FSDPExtensions] = None
def _set_fsdp_extensions(flattener: FSDPExtensions) -> None:
global _extensions
_extensions = flattener
def _ext_pre_flatten_transform(
tensor: torch.Tensor,
) -> Tuple[torch.Tensor, Optional[Any]]:
if _extensions is not None:
new_tensor, extension = _extensions.pre_flatten_transform(tensor)
if extension is not None:
return new_tensor, extension
return tensor, None
def _ext_post_unflatten_transform(
tensor: torch.Tensor,
param_extension: Any,
) -> torch.Tensor:
if _extensions is not None and param_extension is not None:
return _extensions.post_unflatten_transform(tensor, param_extension)
return tensor
def _ext_chunk_tensor(
tensor: torch.Tensor,
rank: int,
world_size: int,
num_devices_per_node: int,
pg: dist.ProcessGroup,
) -> torch.Tensor:
chunk_tensor_fn = (
_extensions.chunk_tensor
if _extensions is not None
else _create_chunk_sharded_tensor
)
return chunk_tensor_fn(
tensor,
rank,
world_size,
num_devices_per_node,
pg,
)
def _ext_pre_load_state_dict_transform(
tensor: torch.Tensor,
) -> Tuple[torch.Tensor, List[Shard]]:
if _extensions is not None:
return _extensions.pre_load_state_dict_transform(tensor)
assert type(tensor) is ShardedTensor
shards = tensor.local_shards()
return (tensor, shards)
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
3cf3c1f1d35bc706b4bad88d510bd47402af2346
|
c487885e2b43f1dbaa8b06a6ad379550a6d8de16
|
/work/022-web-crawl/program.py
|
830209341c6466bf3b46af269bdcfc7aa8d367bb
|
[] |
no_license
|
tbrlpld/100daysofweb-with-python-course
|
3395a9d055e7e0d9b04785a48fe7dbd5e8d3a080
|
6b80c01a33d144107fe1bebe402f22cf23fc5408
|
refs/heads/master
| 2023-01-24T10:57:17.308107
| 2020-09-07T01:02:57
| 2020-09-07T01:02:57
| 215,710,767
| 2
| 0
| null | 2023-01-05T14:54:23
| 2019-10-17T05:34:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
import requests
import bs4
from colorama import Fore
def get_html(episode_number: int) -> str:
print(Fore.YELLOW + f"Getting HTML for episode {episode_number}", flush=True)
url = f'https://talkpython.fm/{episode_number}'
resp = requests.get(url)
resp.raise_for_status()
return resp.text
def get_title(html: str, episode_number: int) -> str:
print(Fore.CYAN + f"Getting TITLE for episode {episode_number}", flush=True)
soup = bs4.BeautifulSoup(html, 'html.parser')
header = soup.select_one('h1')
if not header:
return "MISSING"
return header.text.strip()
def get_title_range():
# Please keep this range pretty small to not DDoS my site. ;)
lower_episode_number = 150
higher_episode_number = 170
for n in range(lower_episode_number, higher_episode_number):
html = get_html(n)
title = get_title(html, n)
print(Fore.WHITE + f"Title found: {title}", flush=True)
def main():
get_title_range()
print("Done.")
if __name__ == '__main__':
main()
|
[
"tibor@lpld.io"
] |
tibor@lpld.io
|
f6719e445232e1c84992d697f3fcc76afaf4c267
|
7e96ba20c25c6fb56af6ccd36b3b6d68df6a081c
|
/Kyle_Marienthal/DJANGO/wishlist/apps/wish_app/urls.py
|
d928425723734900ca75a11317fa753b0ac948ee
|
[] |
no_license
|
CodingDojoDallas/python_september_2017
|
9d8cd74131a809bc6b13b7f465594cf8b1e2fd75
|
f9f2f7b39bf9c4fceda3df5dc7424164aa5d5df5
|
refs/heads/master
| 2021-01-23T08:52:22.899994
| 2017-10-30T17:00:55
| 2017-10-30T17:00:55
| 102,558,291
| 2
| 14
| null | 2018-01-13T05:28:34
| 2017-09-06T03:28:38
|
Python
|
UTF-8
|
Python
| false
| false
| 557
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^dashboard$', views.dashboard, name='dashboard'),
url(r'^wish_item$', views.wish_item, name='wish_item'),
url(r'^create$', views.create_item, name='create_item'),
url(r'^show_wish/(?P<id>\d+)$', views.show_wish, name='show_wish'),
url(r'^delete_wish/(?P<id>\d+)$', views.delete_wish, name='delete_wish'),
url(r'^remove_wish/(?P<id>\d+)$', views.remove_wish, name='remove_wish'),
url(r'^add_wish/(?P<id>\d+)$', views.add_wish, name='add_wish'),
]
|
[
"kylemarienthal@gmail.com"
] |
kylemarienthal@gmail.com
|
2569b26ea00537888870b42898de01514eb98c50
|
a873f3cd46a10ad879fc56d78e1f533d8bf486c0
|
/vova_project/vova_resful/接口/loc3.py
|
1ebd4430df919e0ff85e243af0f32555921ed583
|
[] |
no_license
|
shenhaiyu0923/resful
|
d0301b39363e6b3d3659f62fa4a9b2532ebcd225
|
1e66cae7d68fa231794776953cc1a5e999bf36c6
|
refs/heads/master
| 2021-07-08T20:46:57.300298
| 2021-06-01T08:17:27
| 2021-06-01T08:17:27
| 244,308,016
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
from locust import HttpLocust, TaskSequence, task, TaskSet
from pprint import pprint
from random import randint
import os
'''
1. 实现登陆基本功能,输出响应,脚本正确
2. 多用户随机登陆:在doLogin方法里构造随机数据。 -LR:参数化 Jmeter : 参数化
3. 添加初始化方法on_start:类似于构造方法,每个用户只运行一次
4. 添加检查点:断言
- 在请求方法中设置catch_response参数为True
- 调用success和feature方法标注成功或失败
'''
# 任务类
#class TestLogin(TaskSet):TaskSequence 继承自TaskSet
class TestLogin(TaskSequence):
#任务开始前先自动执行的
def on_start(self):
# 请求正文接受的是字典,username=byhy&passwprd=88888888
self.loginnData = [
{'username': 'byhy', 'password': '88888888'},
{'username': 'byhy1', 'password': '888888881'},]
print('------on_start-------')
@task
def doLogin(self):
# 1000以内的随机数,对用户数据长度3进行取余数,0,1,
ranIndex = randint(1,1000) % len(self.loginnData)
data = {
"username": "byhy",
"password": "88888888",
}
url = 'http://127.0.0.1:8001/api/mgr/signin'
response = self.client.post(url,data=self.loginnData[ranIndex],catch_response = True)# catch_response = True 捕获响应
if '"ret": 0' in response.text:
response.success()
else:
response.failure(' Can not login!')
print(self.loginnData[ranIndex]['username'])
print(response.text)
class WebSite(HttpLocust):
task_set = TestLogin
min_wait = 1000
max_wait = 3000
if __name__ == "__main__":
os.system("locust -f loc3.py --web-host=127.0.0.1")
'''
class UserBehavior(TaskSequence):
@task(1)
def byhy(self):
data = {
"username": "byhy",
"password": "88888888",
}
url = 'http://127.0.0.1:8001/api/mgr/signin'
r = self.client.post(url, data=data)
pprint(r.json())
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 1000
max_wait = 3000
if __name__ == "__main__":
import os
os.system("locust -f loc3.py --web-host=127.0.0.1")
'''
|
[
"1802161998@qq.com"
] |
1802161998@qq.com
|
96702d0e44ae4a9a31a61c1639af7cc8917fd069
|
7e72e16f43170749dada023624a88fd622727639
|
/jdcloud_sdk/services/monitor/models/DescribeAlarmingRulesEnd.py
|
05a8f48f0d26c1478fdc96f2918611bc48513a30
|
[
"Apache-2.0"
] |
permissive
|
jdcloud-demo/jdcloud-sdk-python
|
4dc1e814217df16c5f60f5e4b3f8260b770f9d2b
|
fddc2af24031c597948b8b8091978ac7e01a2695
|
refs/heads/master
| 2020-07-11T18:19:59.688112
| 2019-08-23T05:55:18
| 2019-08-23T05:55:18
| 204,613,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class DescribeAlarmingRulesEnd(object):
def __init__(self, alarmHistoryList=None, numberPages=None, numberRecords=None, pageNumber=None, pageSize=None):
"""
:param alarmHistoryList: (Optional) 未恢复的报警
:param numberPages: (Optional) 总页数
:param numberRecords: (Optional) 总记录数
:param pageNumber: (Optional) 当前页码
:param pageSize: (Optional) 分页大小
"""
self.alarmHistoryList = alarmHistoryList
self.numberPages = numberPages
self.numberRecords = numberRecords
self.pageNumber = pageNumber
self.pageSize = pageSize
|
[
"oulinbao@jd.com"
] |
oulinbao@jd.com
|
728e0e56dc869b0274f7b832fdc35d1ddddf9393
|
57da072d37d59f00301e7483fdee067a244f24ce
|
/autobots/spider/test.py
|
16cdf605c823d2c3aa45aba5b337bb417a119592
|
[] |
no_license
|
KonroyZhu/auto_project
|
47919879e5f4b78ef082e7e76be2c2bb958018d3
|
c0f10e8ee24342ede402a5694a20d160322eb8c1
|
refs/heads/master
| 2021-03-30T21:17:46.470105
| 2018-03-10T09:41:44
| 2018-03-10T09:41:44
| 124,640,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
url="https://www.baidu.com/"
driver=webdriver.PhantomJS()
dcap=dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"]=(" Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0")
driver=webdriver.PhantomJS(desired_capabilities=dcap)
driver.implicitly_wait(20)
driver.get(url=url)
|
[
"1433192948@qq.com"
] |
1433192948@qq.com
|
cee42ce181f4e1b5dfe899aa01006622846891c3
|
1a6c2be5ff1a8364c97a1ede23c824b2579ecf79
|
/tfx/tools/cli/kubeflow_v2/commands/run_test.py
|
46bb0730fdc04d5a2c3f65c8b83d3ee5b7b8f0ea
|
[
"Apache-2.0"
] |
permissive
|
418sec/tfx
|
fa1a4690df2178e9c6bd24f97df0bbde7436df95
|
df1529c91e52d442443eca5968ff33cf0a38dffa
|
refs/heads/master
| 2023-04-18T12:25:38.098958
| 2021-04-28T16:11:00
| 2021-04-28T16:11:00
| 333,769,030
| 2
| 1
|
Apache-2.0
| 2021-04-28T16:11:01
| 2021-01-28T13:35:14
| null |
UTF-8
|
Python
| false
| false
| 1,587
|
py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Kubeflow V2 run commands."""
# TODO(b/169094706): Add kokoro test coverage for this test.
import codecs
import locale
import os
import sys
from click import testing as click_testing
import mock
import tensorflow as tf
# Fake GCP project ID, API key, docker image tag and job name under test.
# _TEST_API_KEY = 'fake-api-key'
# _TEST_PROJECT_ID = 'fake-gcp-project'
# _TEST_IMAGE = 'gcr.io/fake-image:fake-tag'
# _TEST_JOB_NAME = 'taxi-pipeline-1'
# TODO(b/169094706): re-surrect the tests when the unified client becomes
# available.
class RunTest(tf.test.TestCase):
def setUp(self):
# Change the encoding for Click since Python 3 is configured to use ASCII as
# encoding for the environment.
super().setUp()
if codecs.lookup(locale.getpreferredencoding()).name == 'ascii':
os.environ['LANG'] = 'en_US.utf-8'
self.runner = click_testing.CliRunner()
sys.modules['handler_factory'] = mock.Mock()
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow-extended-nonhuman@googlegroups.com"
] |
tensorflow-extended-nonhuman@googlegroups.com
|
689cb975bfecd8db5e093abf10d4ce088eda3841
|
77c2010bb9533ecbdfa46cd41c16ee5ae26e94fa
|
/blog/urls.py
|
f47fe3383b5e4c15a25bebd8f057c5476da21240
|
[] |
no_license
|
dimansion/portfolio-django
|
b2cbb28dff97dd03cdf795f0bc661d39bcfae83d
|
2dffe0e8579b2a426cb7aceb1ee085933b122d90
|
refs/heads/master
| 2020-05-23T08:15:38.205372
| 2017-03-05T14:44:14
| 2017-03-05T14:44:14
| 70,251,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
from django.conf.urls import url
from django.contrib import admin
from .views import (
post_list,
post_create,
post_detail,
post_update,
post_delete,
)
urlpatterns = [
url(r'^$', post_list, name='list'),
url(r'^create/$', post_create),
url(r'^(?P<slug>[\w-]+)/$', post_detail, name='detail'),
url(r'^(?P<slug>[\w-]+)/edit/$', post_update, name='update'),
url(r'^(?P<slug>[\w-]+)/delete/$', post_delete),
#url(r'^<posts></posts>/$', "<appname>.views.<function_name>"),
]
|
[
"dimansional@gmail.com"
] |
dimansional@gmail.com
|
952a83fef5c7290c002d8325d0d09c4de2fc2f89
|
94d5467b1315791fa75165eb862fdd8fef300958
|
/yunyan_baotou/src/user_prob/bak/prob_trans.py
|
5c962bdbf3d75cd901bff55b83083745fea7fde2
|
[] |
no_license
|
scmsqhn/code
|
e31926174c247d49c1db8f121e3ec1b82f8a2d9d
|
b389d7dc5fafad8a4185a03cd6d5519ccf8f99df
|
refs/heads/master
| 2022-12-09T05:37:07.065840
| 2019-05-14T01:55:07
| 2019-05-14T01:55:07
| 185,903,771
| 1
| 0
| null | 2022-12-08T05:05:51
| 2019-05-10T02:22:28
|
Python
|
UTF-8
|
Python
| false
| false
| 448
|
py
|
#P={'B': {'E': -0.510825623765990, 'M': -0.916290731874155},
# 'E': {'B': -0.5897149736854513, 'S': -0.8085250474669937},
# 'M': {'E': -0.33344856811948514, 'M': -1.2603623820268226},
# 'S': {'B': -0.7211965654669841, 'S': -0.6658631448798212}}
P={'B': {'E': -3.2770032528476309, 'M': -1.030296703073057},
'E': {'B': -3.2770032528476309 , 'S': -1e+10},
'M': {'E': -1.030296703073057, 'M': -1.3924866568570602},
'S': {'B': -1e+10, 'S': -1e+10}}
|
[
"2364839934@qq.com"
] |
2364839934@qq.com
|
d923053a034a799be752fd757cda0a0a54a7f8e4
|
b3c3b09a2abc71b35fc54da16f2b4d88b254fc4b
|
/zerver/views/development/integrations.py
|
fba4308be9525cf13491593a658ca06e12e346b9
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
yovanycunha/zulip
|
2fc57ba2f35736d13b609f763e1b36f6686c4a80
|
339f742578c15cc9c72d44963e621986629818bb
|
refs/heads/master
| 2020-09-03T04:12:41.356314
| 2019-11-03T22:02:37
| 2019-11-03T23:51:19
| 219,382,296
| 1
| 0
|
Apache-2.0
| 2019-11-03T23:51:44
| 2019-11-03T23:51:44
| null |
UTF-8
|
Python
| false
| false
| 5,681
|
py
|
import os
import ujson
from typing import Any, Dict, List
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.test import Client
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.response import json_success, json_error
from zerver.models import UserProfile, get_realm
from zerver.lib.webhooks.common import get_fixture_http_headers, \
standardize_headers
ZULIP_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../')
def get_webhook_integrations() -> List[str]:
return [integration.name for integration in WEBHOOK_INTEGRATIONS]
def dev_panel(request: HttpRequest) -> HttpResponse:
integrations = get_webhook_integrations()
bots = UserProfile.objects.filter(is_bot=True, bot_type=UserProfile.INCOMING_WEBHOOK_BOT)
context = {"integrations": integrations, "bots": bots}
return render(request, "zerver/integrations/development/dev_panel.html", context)
def send_webhook_fixture_message(url: str=REQ(),
body: str=REQ(),
is_json: bool=REQ(),
custom_headers: Dict[str, Any]=REQ()) -> HttpResponse:
client = Client()
realm = get_realm("zulip")
standardized_headers = standardize_headers(custom_headers)
http_host = standardized_headers.pop("HTTP_HOST", realm.host)
if is_json:
content_type = standardized_headers.pop("HTTP_CONTENT_TYPE", "application/json")
else:
content_type = standardized_headers.pop("HTTP_CONTENT_TYPE", "text/plain")
return client.post(url, body, content_type=content_type, HTTP_HOST=http_host,
**standardized_headers)
@has_request_variables
def get_fixtures(request: HttpResponse,
integration_name: str=REQ()) -> HttpResponse:
integrations = get_webhook_integrations()
if integration_name not in integrations:
return json_error("\"{integration_name}\" is not a valid webhook integration.".format(
integration_name=integration_name), status=404)
fixtures = {}
fixtures_dir = os.path.join(ZULIP_PATH, "zerver/webhooks/{integration_name}/fixtures".format(
integration_name=integration_name))
if not os.path.exists(fixtures_dir):
msg = ("The integration \"{integration_name}\" does not have fixtures.").format(
integration_name=integration_name)
return json_error(msg, status=404)
for fixture in os.listdir(fixtures_dir):
fixture_path = os.path.join(fixtures_dir, fixture)
with open(fixture_path, 'r') as f:
body = f.read()
try:
body = ujson.loads(body)
except ValueError:
pass # The file extension will be used to determine the type.
headers_raw = get_fixture_http_headers(integration_name,
"".join(fixture.split(".")[:-1]))
headers = {}
for header in headers_raw:
if header.startswith("HTTP_"): # HTTP_ is a prefix intended for Django.
headers[header.lstrip("HTTP_")] = headers_raw[header]
else:
headers[header] = headers_raw[header]
fixtures[fixture] = {"body": body, "headers": headers}
return json_success({"fixtures": fixtures})
@has_request_variables
def check_send_webhook_fixture_message(request: HttpRequest,
url: str=REQ(),
body: str=REQ(),
is_json: bool=REQ(),
custom_headers: str=REQ()) -> HttpResponse:
try:
custom_headers_dict = ujson.loads(custom_headers)
except ValueError as ve:
return json_error("Custom HTTP headers are not in a valid JSON format. {}".format(ve)) # nolint
response = send_webhook_fixture_message(url, body, is_json,
custom_headers_dict)
if response.status_code == 200:
responses = [{"status_code": response.status_code,
"message": response.content}]
return json_success({"responses": responses})
else:
return response
@has_request_variables
def send_all_webhook_fixture_messages(request: HttpRequest,
url: str=REQ(),
integration_name: str=REQ()) -> HttpResponse:
fixtures_dir = os.path.join(ZULIP_PATH, "zerver/webhooks/{integration_name}/fixtures".format(
integration_name=integration_name))
if not os.path.exists(fixtures_dir):
msg = ("The integration \"{integration_name}\" does not have fixtures.").format(
integration_name=integration_name)
return json_error(msg, status=404)
responses = []
for fixture in os.listdir(fixtures_dir):
fixture_path = os.path.join(fixtures_dir, fixture)
with open(fixture_path, 'r') as f:
content = f.read()
x = fixture.split(".")
fixture_name, fixture_format = "".join(_ for _ in x[:-1]), x[-1]
headers = get_fixture_http_headers(integration_name, fixture_name)
if fixture_format == "json":
is_json = True
else:
is_json = False
response = send_webhook_fixture_message(url, content, is_json, headers)
responses.append({"status_code": response.status_code,
"fixture_name": fixture,
"message": response.content})
return json_success({"responses": responses})
|
[
"tabbott@zulipchat.com"
] |
tabbott@zulipchat.com
|
45d3ae300ef4fa451f0e6b75cff19ba93f979adc
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/operations/_protection_policy_operation_results_operations.py
|
58b5481810d826165bfeac369e00feb3a9394861
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 7,201
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import RecoveryServicesBackupClientMixinABC, _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
vault_name: str, resource_group_name: str, policy_name: str, operation_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-02-01"] = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}/operationResults/{operationId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"policyName": _SERIALIZER.url("policy_name", policy_name, "str"),
"operationId": _SERIALIZER.url("operation_id", operation_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class ProtectionPolicyOperationResultsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.RecoveryServicesBackupClient`'s
:attr:`protection_policy_operation_results` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self, vault_name: str, resource_group_name: str, policy_name: str, operation_id: str, **kwargs: Any
) -> _models.ProtectionPolicyResource:
"""Provides the result of an operation.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param policy_name: Backup policy name whose operation's result needs to be fetched. Required.
:type policy_name: str
:param operation_id: Operation ID which represents the operation whose result needs to be
fetched. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionPolicyResource or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionPolicyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-02-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ProtectionPolicyResource] = kwargs.pop("cls", None)
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
policy_name=policy_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ProtectionPolicyResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}/operationResults/{operationId}"
}
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
5ac37bff1e1c67144707a66c27ca55b95a253605
|
e1b8fb9a5500516f28d3d7e9a5f259c49ef35f14
|
/top/api/rest/ItempropvaluesGetRequest.py
|
1641824f67bf20947dfd5c1e6d7cc49197575d1e
|
[] |
no_license
|
htom78/taobao_comet_py
|
9224dbca1a413a54bcc5569873e4c7a9fc9ba059
|
ad8b2e983a14d3ab7665244449f79dd72f390815
|
refs/heads/master
| 2020-05-17T10:47:28.369191
| 2013-08-27T08:50:59
| 2013-08-27T08:50:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
'''
Created by auto_sdk on 2013-06-16 16:36:02
'''
from top.api.base import RestApi
class ItempropvaluesGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.cid = None
self.fields = None
self.pvs = None
self.type = None
def getapiname(self):
return 'taobao.itempropvalues.get'
|
[
"tomhu@ekupeng.com"
] |
tomhu@ekupeng.com
|
ef5540685a093ff38d640fd18b6e7e5528ee2196
|
8292648c36f4b1e8eb70c0992eec3737dc7d7749
|
/exam_practice/Samples/Sample 6/sample_6.py
|
fb8d6c663edd847d65fb057da8c8f45486bd82f2
|
[] |
no_license
|
LukeElliman/Sandbox
|
6f4dc2b57db4475dab376fa4de8ec7b3a0cd238e
|
519ab171c121ca7f7dc22c484836314b816033be
|
refs/heads/master
| 2023-05-24T06:53:57.638685
| 2021-06-05T12:57:12
| 2021-06-05T12:57:12
| 344,040,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
def count_words(sentence):
"""Count words in sentence"""
words = sentence.split(" ")
return len(words)
print(count_words("This is a sentence with words in it"))
print(count_words(("Hello, my name is Luke, whats your's?")))
|
[
"luke.elliman@my.jcu.edu.au"
] |
luke.elliman@my.jcu.edu.au
|
e384b0b89ef2675764b8f90db90e2618a131954f
|
0e3d0ac18a0605c26ac004c6da904d06d1f93ad0
|
/decorators_exercise/execution_time.py
|
1c81a3a6fb46897c9dd2a914709b3568ab0a67a0
|
[] |
no_license
|
lion963/Python-OOP
|
a74c85918bf7400dc5ffc82ff4c02b699969b1b1
|
24d184030f6cac8288d27a17cecb64bd133c1cf6
|
refs/heads/main
| 2023-04-06T05:48:53.945037
| 2021-04-15T07:28:35
| 2021-04-15T07:28:35
| 333,083,111
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
import time
def exec_time(func):
def wrapper(*args):
start_time = time.time()
func(*args)
return time.time() - start_time
return wrapper
@exec_time
def loop(start, end):
total = 0
for x in range(start, end):
total += x
return total
print(loop(1, 10000000))
@exec_time
def concatenate(strings):
result = ""
for string in strings:
result += string
return result
print(concatenate(["a" for i in range(1000000)]))
|
[
"lion963@mail.bg"
] |
lion963@mail.bg
|
3b20d8bc8956d16dfbb697f43dec97305b5fa7df
|
8ef6dbdd3791dd7fbe1320483a22e0540c54359b
|
/Core Python/Dictionary/19Nested.py
|
83bae4d2c11b6adf2d1a3a4876f39b5fc4330d28
|
[] |
no_license
|
kundan4U/Python
|
8095eecba088910d2068a6375c907d47f2bb9c95
|
6b3fdbe66edb52e9f612352abb9c6563547b6297
|
refs/heads/main
| 2023-06-24T06:29:43.770282
| 2021-07-21T18:40:11
| 2021-07-21T18:40:11
| 388,213,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# Nested Dictionary
# Empty NEsted dictionary
z={
1:{},
2:{},
3:{}
}
# Access Element
a = {'course':'python','fees':15000,1:{'course':'javaScript','fees':100000}}
print(a['course'])
print(a[1])
print(a[1]['fees'],"\n")
# Modifi Element
print("Before Modifieng",a)
a['course']= 'Machine Learning'
print(" After modife ",a)
|
[
"kc946605@gmail.com"
] |
kc946605@gmail.com
|
04facd847cbb9cda0046a3abb22c61f30f0cee65
|
6f844abf7c436a4ae3444744c523f5f1fe6367a4
|
/UserBase/migrations/0016_auto_20171123_1931.py
|
fb52007d0c78de3f27659bd6a8992c3d97a6bb13
|
[] |
no_license
|
deepakbhamla/Halanx_Backend_Task
|
1dd02a1061eb4d4f6072a3276735cb606dfec752
|
acda98c6b79690689317585dd06cf0c1cfc65192
|
refs/heads/master
| 2022-04-12T10:02:29.740781
| 2020-03-12T12:17:22
| 2020-03-12T12:27:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-23 14:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserBase', '0015_customer_profilepic'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='ProfilePic',
field=models.TextField(blank=True, help_text='In Base64', null=True),
),
]
|
[
"patel.ayush08@gmail.com"
] |
patel.ayush08@gmail.com
|
84bf444bf1e614422ec13359d22a0d727da1ca4c
|
d9e0585e57b482d91e8af7514e683e2488e23381
|
/dbcog/models/leader_skill_model.py
|
804712af51304155ae45a7a870eda7e86f31d374
|
[
"MIT"
] |
permissive
|
TrendingTechnology/pad-cogs
|
d08abb8da8bf2763a4091a29139168d8c1d2333a
|
b913a4e16a6473b8b53fae4bda564bedcc82c876
|
refs/heads/master
| 2023-08-11T01:10:22.088761
| 2021-09-19T00:41:43
| 2021-09-19T00:41:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
import re
from .base_model import BaseModel
class LeaderSkillModel(BaseModel):
def __init__(self, **kwargs):
self.leader_skill_id = kwargs['leader_skill_id']
self.name_ja = kwargs['name_ja']
self.name_en = kwargs['name_en']
self.name_ko = kwargs['name_ko']
self.max_hp = kwargs['max_hp']
self.max_atk = kwargs['max_atk']
self.max_rcv = kwargs['max_rcv']
self.max_shield = kwargs['max_shield']
self.max_combos = kwargs['max_combos']
self.bonus_damage = kwargs['bonus_damage']
self.mult_bonus_damage = kwargs['mult_bonus_damage']
self.extra_time = kwargs['extra_time']
self.tags = [int(tag) for tag in re.findall(r'\((\d+)\)', kwargs['tags'])]
self.desc_en = kwargs['desc_en']
self.desc_ja = kwargs['desc_ja']
self.desc_ko = kwargs['desc_ko']
@property
def data(self):
return (self.max_hp,
self.max_atk,
self.max_rcv,
self.max_shield,
self.max_combos,
self.bonus_damage,
self.mult_bonus_damage,
self.extra_time)
@property
def desc(self):
return self.desc_en or self.desc_ja
@property
def name(self):
return self.name_en or self.name_ja
@property
def is_7x6(self):
return 200 in self.tags
def to_dict(self):
return {
'leader_skill_id': self.leader_skill_id,
'name_ja': self.name_ja,
'name_en': self.name_en,
}
def __eq__(self, other):
if isinstance(other, LeaderSkillModel):
return self.leader_skill_id == other.leader_skill_id \
and self.data == other.data \
and self.desc_en == other.desc_en
return False
|
[
"noreply@github.com"
] |
TrendingTechnology.noreply@github.com
|
46a00379b4971f43bbcd75efd489630c9201401e
|
470c6e447c7ee6daed90a0bf1216e2fb838282b6
|
/rtl/tasks/open.py
|
1b982debfdebfb9d21abbbfe2a35895eedbfcc43
|
[
"Apache-2.0"
] |
permissive
|
kelceydamage/raspi-tasks
|
d0300173b2eba274a5c0a974b7ecb8817586d22d
|
18aa323e3e2428c998b7472c226d05a00c8ae8c2
|
refs/heads/master
| 2020-07-02T11:55:27.276343
| 2019-08-10T06:42:42
| 2019-08-10T06:42:42
| 201,520,835
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
#!/usr/bin/env python3
# ------------------------------------------------------------------------ 79->
# Author: ${name=Kelcey Damage}
# Python: 3.5+
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Doc
# ------------------------------------------------------------------------ 79->
#
# Imports
# ------------------------------------------------------------------------ 79->
import zlib
import ast
import ujson as json
import numpy as np
from numpy import ndarray
from numpy import array
from rtl.common.task import Task
# Globals
# ------------------------------------------------------------------------ 79->
# Classes
# ------------------------------------------------------------------------ 79->
class Open(Task):
def __init__(self, kwargs, content):
super(Open, self).__init__(kwargs, content)
self.keys = [
'compression',
'delimiter',
'encoding'
]
self.defaults = {
'compression': False,
'delimiter': '\n',
'encoding': False
}
self.configuration = {}
self.mode = 'r'
for key in self.keys:
if key in kwargs:
self.configuration[key] = kwargs[key]
else:
self.configuration[key] = self.defaults[key]
def openfile(self):
if self.configuration['compression']:
self.mode = 'rb'
with open('{0}/{1}'.format(self.path, self.file), self.mode) as f:
r = f.read()
if self.configuration['compression']:
r = zlib.decompress(r).decode()
parts = r.replace('][', ']\n[').split('\n')
return parts
def decode(self, parts):
results = []
while parts:
item = parts.pop().strip('\n')
if item == '':
continue
if self.configuration['encoding']:
item = json.loads(item.rstrip())
else:
item = item.rstrip().split(self.configuration['delimiter'])
results.append(item)
return results
def open(self):
parts = self.openfile()
if parts == [''] or parts == '':
return [[False]]
results = self.decode(parts)
del parts
if self.mixed:
self.data = {i: results[i] for i in range(len(results))}
self.data['headers'] = self.headers
else:
self.ndata = np.ndarray(
(len(results), len(results[0])),
buffer=array(results),
dtype=np.dtype(int)
)
return self
# Functions
# ------------------------------------------------------------------------ 79->
def task_open(kwargs, contents):
return Open(kwargs, contents).open().getContents()
# Main
# ------------------------------------------------------------------------ 79->
|
[
"kdamage@palantir.com"
] |
kdamage@palantir.com
|
4d2c140c6db6f542dc0a49fd239d9fe840daa562
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/msData/datatypes/Facets/double/double_enumeration002.py
|
59133e9f2219248cc567d675c7fac346d16f2afd
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 273
|
py
|
from output.models.ms_data.datatypes.facets.double.double_enumeration002_xsd.double_enumeration002 import FooTypeFoo
from output.models.ms_data.datatypes.facets.double.double_enumeration002_xsd.double_enumeration002 import Test
obj = Test(
foo=FooTypeFoo.VALUE_1_1
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
5930d4f80c56d4ac4735ccaa84bd96cd822d5d74
|
13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab
|
/home--tommy--mypy/mypy/lib/python2.7/site-packages/statsmodels/graphics/tests/test_boxplots.py
|
503c5cc20c15d3e577a1709b13de1418c79ccd3c
|
[
"Unlicense"
] |
permissive
|
tommybutler/mlearnpy2
|
8ec52bcd03208c9771d8d02ede8eaa91a95bda30
|
9e5d377d0242ac5eb1e82a357e6701095a8ca1ff
|
refs/heads/master
| 2022-10-24T23:30:18.705329
| 2022-10-17T15:41:37
| 2022-10-17T15:41:37
| 118,529,175
| 0
| 2
|
Unlicense
| 2022-10-15T23:32:18
| 2018-01-22T23:27:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
import numpy as np
from numpy.testing import dec
from statsmodels.graphics.boxplots import violinplot, beanplot
from statsmodels.datasets import anes96
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_violinplot_beanplot():
# Test violinplot and beanplot with the same dataset.
data = anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels, jitter=True,
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels, jitter=True, side='right',
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels, jitter=True, side='left',
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'bean_legend_text': 'text'})
plt.close(fig)
|
[
"tbutler.github@internetalias.net"
] |
tbutler.github@internetalias.net
|
3c2c612f1ce660f79c63116378101afddeb27721
|
cc4d7a0dc58e70379fda3d1f6b75c7b70ad1e205
|
/UDEMY/numbers.py
|
42aaed7267cc1885a724c942be82aff76500f3f3
|
[] |
no_license
|
evamaina/Python_practice
|
2da766d61c111135285323aa2b8fb50ee5b31be1
|
ecd6c255dc66b2dc6f2cd81ec79bc42a241bfca1
|
refs/heads/master
| 2020-03-24T00:08:17.911604
| 2018-07-25T09:17:41
| 2018-07-25T09:17:41
| 142,275,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# Modify the method below to make sure only even numbers are returned.
def even_numbers():
evens = []
for number in numbers:
if number % 2 == 0:
evens.append(number)
return evens
# Modify the below method so that "Quit" is returned if the choice parameter is "q".
# Don't remove the existing code
def user_menu(choice):
if choice == "a":
return "Add"
elif choice == "q":
return "Quit"
else:
return None
|
[
"evajohnson714@gmail.com"
] |
evajohnson714@gmail.com
|
fd4b14a27556c53af09d1ab430e96ecf11199d9c
|
7c7c3a34b266e664cf63f710ae5aff5587672c91
|
/ALUS/countingBlobs/createTestingImagesFile.py
|
1c4ad82a1afd8acf6aee2de2c29ef1da109498f2
|
[] |
no_license
|
Schnei1811/PythonScripts
|
845594a886a1fecc81cf5d7c550abec325f006a3
|
89eb331357b7cea86f5b2d0b33089215b73f9481
|
refs/heads/main
| 2023-03-31T23:36:48.731570
| 2021-04-11T21:55:46
| 2021-04-11T21:55:46
| 356,950,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
import os
from glob import glob
from tqdm import tqdm
import cv2
# img_dir = "G:\\PythonData\\ALUS\\ALUS_Data\\"
# img_dir = "G:\\PythonData\\ALUS\\ALUS_Mixed_Test_Set\\"
img_dir = "C:\\Users\\Stefan\\Desktop\\CountBlobs\\LCFCN-master\\ALUS_BL\\"
# sizes = [1, 2, 3, 4, 5]
sizes = [5]
for div in sizes:
if not os.path.exists(img_dir + "full_data_test_div{}".format(div)):
os.makedirs(img_dir + "full_data_test_div{}".format(div))
div_path = img_dir + "full_data_test_div{}\\".format(div)
path_lst = []
#import ipdb;ipdb.set_trace()
for img_path in tqdm(glob(img_dir + "full_data_test\\*")):
img_name = div_path + img_path.split("\\")[-1][:-4] + ".JPG"
img = cv2.imread(img_path)
h, w, c = img.shape
resized_img = cv2.resize(img, (int(w / div), int(h / div)))
cv2.imwrite(img_name, resized_img)
path_lst.append(img_name)
with open(img_dir + "test_div{}.txt".format(div), "w") as f:
for path in path_lst:
f.write("%s\n" % path)
|
[
"stefan871@gmail.com"
] |
stefan871@gmail.com
|
3254f277b9f7bc17109e5f0eb6f62261ecc84387
|
9d278285f2bc899ac93ec887b1c31880ed39bf56
|
/ondoc/diagnostic/migrations/0143_merge_20190102_1610.py
|
600e06ca0f55228ae1e74f57b7435416c88120fa
|
[] |
no_license
|
ronit29/docprime
|
945c21f8787387b99e4916cb3ba1618bc2a85034
|
60d4caf6c52a8b70174a1f654bc792d825ba1054
|
refs/heads/master
| 2023-04-01T14:54:10.811765
| 2020-04-07T18:57:34
| 2020-04-07T18:57:34
| 353,953,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
# Generated by Django 2.0.5 on 2019-01-02 10:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('diagnostic', '0142_auto_20181229_0505'),
('diagnostic', '0139_auto_20181231_1658'),
]
operations = [
]
|
[
"navneetsingh@docprime.com"
] |
navneetsingh@docprime.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.