blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cec549844f429ed57e856702a1979e9d7ed82176
|
4574f5c8e491993dbb89b8a0abc63ede4b9adfc0
|
/src/rule_chains/dispatch.py
|
dd4fe6c45e938b70d29325a739976965815a027b
|
[
"Apache-2.0"
] |
permissive
|
deeso/rule-chains
|
93f472e79b2e2eb9a17618a11940b7fe9afb00bb
|
499e42626a4c1911be7916aabfcb76a7172a55cd
|
refs/heads/master
| 2021-01-21T08:11:53.675403
| 2018-03-13T05:37:23
| 2018-03-13T05:37:23
| 101,954,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,097
|
py
|
class ChainDispatchResult(object):
def __init__(self, table_name, chain_name=None, success=False,
chain_result=None,
block_results=None, chain_results=None, rvalue=None,
extraction_rule_results=None, outcome=False,
extraction_value=None, block_name=None):
self.table_name = table_name
self.extraction_rule_results = extraction_rule_results
self.extraction_value = extraction_value
self.chain_name = chain_name
self.chain_outcome = outcome
self.chain_rvalue = rvalue
self.block_name = block_name
self.block_results = block_results
self.chain_result = chain_result
self.outcome = outcome
def get_chain_results(self):
return self.chain_result
def get_rule_results(self):
if self.chain_result is not None:
return self.chain_result.get_rule_results()
return None
def get_rule_name(self):
if self.chain_result is not None:
return self.chain_result.get_rule_name()
return None
def update_from_chain_result(self, chain_result):
self.chain_name = chain_result.chain_name
self.chain_result = chain_result
self.chain_outcome = chain_result.outcome
self.block_name = chain_result.block_name
self.block_results = chain_result.block_results
self.chain_rvalue = chain_result.rvalue
self.outcome = chain_result.outcome
class ChainDispatch(object):
def __init__(self, name, extract_rule, extract_type, extract_value,
all_blocks=[], any_blocks=[], none_blocks=[], blocks=[],
dispatch_table={}, perform_blocks=None):
self.name = name
self.extract_rule = extract_rule
self.dispatch_table = dispatch_table
self.raw_value = extract_value
self.perform_blocks = perform_blocks
self.blocks = blocks
self.all_blocks = all_blocks
self.any_blocks = any_blocks
self.none_blocks = none_blocks
self.extract_value = self.code_factory(extract_type, extract_value)
@classmethod
def code_factory(cls, ctype, cvalue):
if ctype == 'lambda':
return eval(cvalue)
elif ctype == 'function':
return eval(cvalue)
return lambda state, res: None
@classmethod
def from_json(cls, json_data, block_objs={}, chains={}, chains_def={}):
name = json_data.get('name', None)
extract_rule = json_data.get('extract_rule', None)
etype = json_data.get('extract_type', None)
evalue = json_data.get('extract_value', None)
any_blocks = json_data.get('any', [])
all_blocks = json_data.get('all', [])
none_blocks = json_data.get('none', [])
_blocks = any_blocks + all_blocks + none_blocks
blocks = dict((c, block_objs.get(c)) for c in _blocks
if c in block_objs)
perform_blocks = json_data.get('perform_blocks', [])
# print name, extract_rule, etype, evalue
if name is None or \
extract_rule is None or \
etype is None or \
evalue is None:
raise Exception("Missing required Block parameters")
dispatch_table = {}
# print json_data.get('dispatch_table')
for k, v in json_data.get('dispatch_table', []):
c = chains.get(v, None)
dispatch_table[k] = c
return ChainDispatch(name, extract_rule, etype, evalue,
all_blocks=all_blocks, any_blocks=any_blocks,
none_blocks=none_blocks, blocks=blocks,
dispatch_table=dispatch_table,
perform_blocks=perform_blocks)
def execute_value_extraction(self, string, frontend=None, state={}):
frontend = frontend if frontend is not None else self.frontend
results = frontend.match_pattern(self.extract_rule, string)
value = self.extract_value(state, results.get('rule_results', {}))
# print "value is: ", value
return value, results
def execute_dispatch(self, string, frontend=None, state={}):
cdr = ChainDispatchResult(self.name)
frontend = frontend if frontend is not None else self.frontend
if frontend is None:
raise Exception("Missing frontend reference")
# TODO run a pre-check set of blocks or chains
# before executing the value extraction
value, rule_results = self.execute_value_extraction(string,
frontend, state)
# print value, rule_results
cdr.extraction_value = value
cdr.extraction_rule_results = rule_results
chains = self.dispatch_table.get(value, None)
if chains is not None:
chain_result = chains.execute_chains(string)
cdr.update_from_chain_result(chain_result)
return cdr
def update_frontend(self, frontend):
self.frontend = frontend
|
[
"adam.pridgen@thecoverofnight.com"
] |
adam.pridgen@thecoverofnight.com
|
879c72a749aa447e3cf0d98e4a0ad65d7b96ec4b
|
97ad602612adf894bdfab85c4867cac69b2d7c99
|
/learnpythonthehardway/erect-fence.py
|
c6590a9ee3c6e8e39d645b035aeca2371444cfce
|
[] |
no_license
|
cotncndy/leetcode-python
|
bebd15f3dd44e8ed1c5f33f29314977de4fcc141
|
591067d87209702c4d41e1a9fce88f9dd1815fed
|
refs/heads/master
| 2020-03-17T03:05:07.182353
| 2018-05-10T16:37:57
| 2018-05-10T16:37:57
| 133,219,257
| 0
| 3
| null | 2018-05-13T08:34:43
| 2018-05-13T08:34:43
| null |
UTF-8
|
Python
| false
| false
| 2,630
|
py
|
# There are some trees, where each tree is represented by (x,y) coordinate in a two-dimensional garden. Your job is
# to fence the entire garden using the minimum length of rope as it is expensive. The garden is well fenced only if
# all the trees are enclosed. Your task is to help find the coordinates of trees which are exactly located on the
# fence perimeter.
#
# Example 1:
# Input: [[1,1],[2,2],[2,0],[2,4],[3,3],[4,2]]
# Output: [[1,1],[2,0],[4,2],[3,3],[2,4]]
# Explanation:
#
# Example 2:
# Input: [[1,2],[2,2],[4,2]]
# Output: [[1,2],[2,2],[4,2]]
# Explanation:
#
# Even you only have trees in a line, you need to use rope to enclose them.
# Note:
#
# All trees should be enclosed together. You cannot cut the rope to enclose trees that will separate them in more
# than one group.
# All input integers will range from 0 to 100.
# The garden has at least one tree.
# All coordinates are distinct.
# Input points have NO order. No order required for output.
# Definition for a point.
class Point(object):
def __init__(self, a=0, b=0):
self.x = a
self.y = b
class Solution(object):
def outerTrees(self, points):
"""
:type points: List[Point]
:rtype: List[Point]
"""
res, collinear, start = set(), set(), points[0]
for p in points: # find the left most point
if p.x < start.x:
start = p
res.add(start)
cur = start
while True:
next = points[0]
for p in points:
if p == cur:
continue
cross = self.dotProduct(cur, next, p)
if cross > 0:
next, collinear = p, set()
elif cross == 0:
if self.dist(cur, next) < self.dist(cur, p):
collinear.add(next)
next = p
else:
collinear.add(p) # bugfixed
for p in collinear:
res.add(p)
if next == start:
break
res.add(next)
cur = next
return list(res)
def dotProduct(self, a, b, c):
baX, baY = a.x - b.x, a.y - b.y
bcX, bcY = c.x - b.x, c.y - b.y
return baX * bcY - baY * bcX
def dist(self, a, b):
return (a.x - b.x) ** 2 + (a.y - b.y) ** 2
def wrapper(self, a):
li = []
for p in a:
li.append(Point(p[0], p[1]))
return self.outerTrees(li)
if __name__ == '__main__':
s = Solution().wrapper([[1, 1], [2, 2], [2, 0], [2, 4], [3, 3], [4, 2]])
|
[
"xin_wei@intuit.com"
] |
xin_wei@intuit.com
|
f582bd1fe8a38bfb9010d139d33a72d4f79d7469
|
401aae63dde689f298c196b9063b6dca3ecf529b
|
/utils.py
|
4f3987e6e9f34679e576b4a3765a5a7ebe79e7e9
|
[] |
no_license
|
dunovank/Hierarchical-Attention-Network-1
|
1791c4afddaf786abd60e05bf3eb4b542c9a1306
|
558660c7030e41698b62702c88741f0a893c8509
|
refs/heads/master
| 2021-10-25T09:17:28.882012
| 2019-04-03T11:50:51
| 2019-04-03T11:50:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,591
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.lines import Line2D
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
try:
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
except Exception as err:
print("Encountered Exception at {}".format(n))
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.5) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.show()
|
[
"shikhar.chauhan@live.com"
] |
shikhar.chauhan@live.com
|
b69b52e5f9ea49a0fb6647fb95417e9d880c1ac1
|
5acc77c4d594c1750a9b7477499ee25b4c307bca
|
/ehpi_action_recognition/networks/action_recognition_nets/action_rec_net_ehpi.py
|
b0f025abbc592698624fe9a900a4b478838ee111
|
[
"MIT"
] |
permissive
|
noboevbo/ehpi_action_recognition
|
bc15a3c260c79b85a82844a2779c9b1ec9cf42fd
|
3b77eeb5103f0f11c8d4be993ec79dddad7e661c
|
refs/heads/master
| 2021-12-29T05:24:31.891044
| 2021-12-19T16:23:36
| 2021-12-19T16:23:36
| 180,351,212
| 113
| 23
| null | 2019-04-23T11:24:27
| 2019-04-09T11:22:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,202
|
py
|
from typing import List, Dict
import cv2
import numpy as np
import torch
from nobos_commons.data_structures.dimension import ImageSize
from nobos_commons.data_structures.human import Human
from nobos_commons.data_structures.humans_metadata.algorithm_output_buffer import AlgorithmOutputBuffer
from nobos_commons.data_structures.humans_metadata.algorithm_output_buffer_entry import AlgorithmOutputBufferEntry
from nobos_commons.feature_preparations.feature_vec_producers.from_skeleton_joints.feature_vec_producer_ehpi import \
FeatureVecProducerEhpi
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import RemoveJointsOutsideImgEhpi, NormalizeEhpi
from torch.autograd import Variable
class ActionRecNetEhpi(object):
def __init__(self, model, feature_vec_producer: FeatureVecProducerEhpi, image_size: ImageSize):
self.model = model
self.feature_vec_producer = feature_vec_producer
self.action_buffer: AlgorithmOutputBuffer = AlgorithmOutputBuffer(buffer_size=32)
self.remove = RemoveJointsOutsideImgEhpi(image_size)
self.normalize = NormalizeEhpi(image_size)
model.cuda()
model.eval()
def get_actions(self, humans: List[Human], frame_nr: int) -> Dict[str, np.ndarray]:
ehpi_vecs = []
for human in humans:
ehpi_vecs.append(
AlgorithmOutputBufferEntry(human.uid, self.feature_vec_producer.get_feature_vec(human.skeleton)))
self.action_buffer.add(ehpi_vecs, frame_nr)
humans_for_action_rec = self.action_buffer.get_all(only_full_buffer=True)
outputs: Dict[str, np.ndarray] = {}
for human_id, action_vecs in humans_for_action_rec.items():
ehpi_img = np.zeros((32, 15, 3), dtype=np.float32)
for frame_num, action_vec in enumerate(action_vecs):
if action_vec is None:
continue
ehpi_img[frame_num] = action_vec
ehpi_img = np.transpose(ehpi_img, (2, 0, 1))
# Set Blue Channel to zero
ehpi_img[2, :, :] = 0
# Normalize EHPI
tmp_dict = {'x': ehpi_img}
tmp_dict['x'] = self.remove(tmp_dict)['x']
ehpi_img = self.normalize(tmp_dict)['x']
# action_img = np.transpose(np.copy(ehpi_img), (2, 1, 0))
# action_img *= 255
# action_img = action_img.astype(np.uint8)
# # action_img = cv2.resize(action_img, (action_img.shape[1] * 30, action_img.shape[0] * 30), cv2.INTER_NEAREST)
# action_img = cv2.cvtColor(action_img, cv2.COLOR_BGR2RGB)
# cv2.imshow("ehpi", action_img)
# cv2.waitKey(1)
# cv2.imwrite(os.path.join(get_create_path("/media/disks/beta/dump/itsc_2019_imgs/ehpi"),
# "{}.png".format(str(frame_nr).zfill(5))), action_img)
net_input = np.zeros((1, 3, 32, 15), dtype=np.float32)
net_input[0] = ehpi_img
input_seq = Variable(torch.tensor(net_input, dtype=torch.float)).cuda()
tag_scores = self.model(input_seq).data.cpu().numpy()[0]
outputs[human_id] = tag_scores
return outputs
|
[
"Dennis.Ludl@reutlingen-university.de"
] |
Dennis.Ludl@reutlingen-university.de
|
f807ab26b6709ac4c4da5cd5a8fd8a42d2095e0d
|
289aaefe2f78bde474ea082afe55054f14932be4
|
/LearnPython3-master/14-各种if语句/test.py
|
f8ca0d1778a1946fc2b6debfc9952c9d347ba02f
|
[] |
no_license
|
13424010187/python
|
ed15dbad380164c846ef68692213dea7689b514e
|
aab8ba8565ed29e7d8b93d7ec3270fafb1294fe9
|
refs/heads/master
| 2023-07-17T07:57:47.694227
| 2021-08-15T09:57:06
| 2021-08-15T09:57:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
point = 5
# if
# if point > 30:
# print("MVP球星")
# if-else
# if point > 20:
# print("绝对球星")
# else:
# print("未来球星")
# if-elif-else
# if point > 30:
# print("MVP球星")
# elif point >= 20:
# print("绝对球星")
# else:
# print("未来球星")
# if-elif-elif-else
if point > 30:
print("MVP球星")
elif point >= 20:
print("绝对球星")
elif point >= 10:
print("未来球星")
else:
print("普通球员")
|
[
"1393699623@qq.com"
] |
1393699623@qq.com
|
589cb44e6bd250ca99485e8a12bf3737b5cdbe43
|
0d0cf0165ca108e8d94056c2bae5ad07fe9f9377
|
/24_Image_Processing_with_Keras_in_Python/2_Using_Convolutions/trainingACNNToClassifyClothingTypes.py
|
a19cd93f8c468f435f23c4ea61341e3ec4a0d1ca
|
[] |
no_license
|
MACHEIKH/Datacamp_Machine_Learning_For_Everyone
|
550ec4038ebdb69993e16fe22d5136f00101b692
|
9fe8947f490da221430e6dccce6e2165a42470f3
|
refs/heads/main
| 2023-01-22T06:26:15.996504
| 2020-11-24T11:21:53
| 2020-11-24T11:21:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
# Training a CNN to classify clothing types
# Before training a neural network it needs to be compiled with the right cost function, using the right optimizer. During compilation, you can also define metrics that the network calculates and reports in every epoch. Model fitting requires a training data set, together with the training labels to the network.
# The Conv2D model you built in the previous exercise is available in your workspace.
# Instructions
# 100 XP
# Compile the network using the 'adam' optimizer and the 'categorical_crossentropy' cost function. In the metrics list define that the network to report 'accuracy'.
# Fit the network on train_data and train_labels. Train for 3 epochs with a batch size of 10 images. In training, set aside 20% of the data as a validation set, using the validation_split keyword argument.
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Fit the model on a training set
model.fit(train_data, train_labels,
validation_split=0.2,
epochs=3, batch_size=10)
|
[
"noreply@github.com"
] |
MACHEIKH.noreply@github.com
|
35567216643280a09ad3bd8935dace54d5234dbd
|
2accbf8013faf4d879ebd5bf1de9767331d6c9ff
|
/Python/어서와 파이썬은 처음이지!/selection_sort.py
|
c2f36f2b8033bce395339903ad6ac0c18144ae19
|
[] |
no_license
|
egsu20/study
|
5e8f7fe149342edd43378f9ccf264346f32128fe
|
5a42d022f7402ee0354e3fd19769b5485205c55d
|
refs/heads/main
| 2023-07-17T22:40:42.039749
| 2021-08-27T14:19:22
| 2021-08-27T14:19:22
| 324,571,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
def selection_sort(aList):
for i in range(len(aList)):
least = i
least_value = aList[i]
for j in range(i+1, len(aList)):
if aList[j] < least_value:
least_value = aList[j]
least = j
tmp = aList[i]
aList[i] = aList[least]
aList[least] = tmp
list1 = [7,8,5, 1,6]
selection_sort(list1)
print(list1)
|
[
"56678959+egsu20@users.noreply.github.com"
] |
56678959+egsu20@users.noreply.github.com
|
e6c64fa2264290c17763d19942f3cc545b1e6ebe
|
7089b86f90d855c7862d0b408e61a67725d0c254
|
/website/migrations/0037_auto_20210129_1257.py
|
9e69d14b79cb6b3aa11f06ebd0ac33962a22f43b
|
[] |
no_license
|
samozzy/codamotion
|
784fdeb99216f030a53f27a3c1f22041743b41f3
|
6a4a85852023d01b13459fa0cb644f1855eb752a
|
refs/heads/master
| 2023-03-06T23:39:12.876150
| 2021-02-23T19:47:53
| 2021-02-23T19:47:53
| 334,784,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
# Generated by Django 3.1.5 on 2021-01-29 12:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0036_auto_20210125_2315'),
]
operations = [
migrations.CreateModel(
name='SiteMenu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(choices=[('H', 'Header'), ('F', 'Footer')], default='H', max_length=1, unique=True)),
],
options={
'verbose_name': 'Menu',
'verbose_name_plural': 'Menus',
},
),
migrations.AlterField(
model_name='teammember',
name='person_type',
field=models.CharField(choices=[('KEYC', 'Key Contact'), ('MGMT', 'Management'), ('ADVS', 'Advisors')], default='KEYC', max_length=4),
),
migrations.AddField(
model_name='page',
name='menu',
field=models.ManyToManyField(to='website.SiteMenu'),
),
]
|
[
"sam.tosborne@googlemail.com"
] |
sam.tosborne@googlemail.com
|
e7d838441af4644f293ada41a9292abbeb913be1
|
b08d42933ac06045905d7c005ca9c114ed3aecc0
|
/src/coefSubset/evaluate/ranks/fifth/rank_2x9a_L.py
|
e66f7696ff621027b573962e362ee9ac48c79b60
|
[] |
no_license
|
TanemuraKiyoto/PPI-native-detection-via-LR
|
d148d53f5eb60a4dda5318b371a3048e3f662725
|
897e7188b0da94e87126a4acc0c9a6ff44a64574
|
refs/heads/master
| 2022-12-05T11:59:01.014309
| 2020-08-10T00:41:17
| 2020-08-10T00:41:17
| 225,272,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,204
|
py
|
# 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2x9a.csv'
identifier = 'L'
thresholdCoef = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/fifth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/fifth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
|
[
"tanemur1@msu.edu"
] |
tanemur1@msu.edu
|
9a0efd92897fe170fc7b6274f6a4b78de5ba4563
|
52bb00d5e9dd936fb11aff2adb8c6d0f94849dc8
|
/Stack/stackBasics.py
|
02a1d2f1f7c25c4d306af354705e6860c171694f
|
[] |
no_license
|
anildhaker/DataStructures
|
1c7815d2bb1dc9d8ebd51e1ec868df8260557519
|
e333fa5b95ecfc08a036bbeadfc4244f64361d7d
|
refs/heads/master
| 2020-04-24T14:07:44.240689
| 2019-11-24T21:21:03
| 2019-11-24T21:21:03
| 172,009,569
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# Creating Stack using Arrays
from sys import maxsize
def createStack():
stack = []
return stack
def isEmpty(stack):
return (len(stack) == 0)
def push(stack, item):
stack.append(item)
def pop(stack):
if isEmpty(stack):
return (str(-maxsize - 1))
return stack.pop()
stack = createStack()
push(stack, str(10))
push(stack, str(20))
push(stack, str(30))
print(pop(stack) + " popped from stack")
|
[
"anildhaker777@gmail.com"
] |
anildhaker777@gmail.com
|
8652501aa9d3ac641cd3ee899b3d58665f34fa0b
|
051f3b084f1df675338815a12abde6a70ee5e649
|
/locamapper.py
|
d1063feb7d8c84ecf7e11be5e316e0c669e62e8e
|
[] |
no_license
|
CaMeLCa5e/daily
|
d952b1de9e5cb1bcb805203b0b1cde859adb2314
|
e9ec40868e8085a521f009f9ccc19cd0c64c51c3
|
refs/heads/master
| 2020-04-26T11:25:10.130077
| 2015-04-19T19:30:45
| 2015-04-19T19:30:45
| 29,375,471
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
import sys
for line in sys.stdin
line = line.strip()
unpacked = line.split(",")
stadium, capacity, location, surface, turf, team, opened weather = line.split(",")
results = [turf, "1"]
print("\t".join(results))
|
[
"JM273606@gmail.com"
] |
JM273606@gmail.com
|
5f2ce7121fa780c80dfe8ccdb8e31d4f67ad3753
|
56abd8f94a511ae0d163161cb2f5e0a91d4b8bed
|
/datahub/investment/test/test_validate.py
|
1a5ffaa2fa0b35879bda1a796d8eb999cd128ab5
|
[
"MIT"
] |
permissive
|
cgsunkel/data-hub-api
|
994c58bd975d902bf2bc44b415a5892919ff4539
|
a92faabf73fb93b5bfd94fd465eafc3e29aa6d8e
|
refs/heads/develop
| 2023-05-31T22:35:56.344904
| 2021-06-30T11:23:06
| 2021-06-30T11:23:06
| 303,947,456
| 0
| 0
|
MIT
| 2021-06-30T10:34:50
| 2020-10-14T08:14:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
from datetime import date
import pytest
from freezegun import freeze_time
from datahub.investment.validate import (
_is_provided_and_is_date_in_the_past,
is_provided_and_is_date_less_than_a_year_ago,
)
@pytest.mark.parametrize(
'data_date,expected_result',
(
(
date(2019, 2, 2),
False,
),
(
date(2019, 2, 1),
True,
),
(
date(2019, 1, 31),
True,
),
(
None,
False,
),
),
)
@freeze_time('2019-02-01')
def test_is_date_in_the_past(data_date, expected_result):
"""Tests that a given date is in the past."""
assert _is_provided_and_is_date_in_the_past(data_date) is expected_result
@pytest.mark.parametrize(
'post_data,expected_result',
(
(
date(2019, 2, 1),
True,
),
(
date(2019, 2, 2),
False,
),
(
date(2019, 1, 31),
True,
),
(
date(2017, 9, 30),
False,
),
(
None,
False,
),
(
{},
False,
),
),
)
@freeze_time('2019-02-01')
def test_is_date_less_than_a_year_ago(post_data, expected_result):
"""Tests if a given date is within the last year."""
assert is_provided_and_is_date_less_than_a_year_ago(post_data) is expected_result
|
[
"marcus.patino-pan@digital.trade.gov.uk"
] |
marcus.patino-pan@digital.trade.gov.uk
|
b2006cbb7580f686445bb729d89f9d2782cd9c57
|
3f5504aff203cc15ca8754353991208962a04a90
|
/src/the_tale/the_tale/game/heroes/shop_accessors.py
|
ee3911ac374e614437dcd8b8046989ebb6aca997
|
[
"BSD-3-Clause"
] |
permissive
|
Portal777/the-tale
|
39614e60fa817a1e00e948a2b465fcb6be35e0c6
|
1a98294f6ed45d26bf5f09bdd2b4a931dbbb72e3
|
refs/heads/master
| 2021-08-24T01:46:11.472205
| 2017-11-04T13:31:30
| 2017-11-04T13:31:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
# coding: utf-8
import random
from the_tale.game.balance.power import Power
from the_tale.game.artifacts.storage import artifacts_storage
from the_tale.game.cards import objects as cards_objects
class ShopAccessorsMixin(object):
__slots__ = ()
def purchase_energy_bonus(self, energy):
self.add_energy_bonus(energy)
def purchase_experience(self, experience):
self.add_experience(experience)
def purchase_artifact(self, rarity, better):
distribution = self.preferences.archetype.power_distribution
power = Power.better_artifact_power_randomized(distribution, self.level) if better else Power.artifact_power_randomized(distribution, self.level)
artifacts_storage.sync()
artifact = random.choice(artifacts_storage.artifacts).create_artifact(level=self.level,
power=power,
rarity=rarity)
self.put_loot(artifact, force=True)
self.actions.request_replane()
return artifact
|
[
"a.eletsky@gmail.com"
] |
a.eletsky@gmail.com
|
a65372cb81c7b03b6c39d5f50a614eb3fe350d61
|
ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31
|
/repos/flaskTs-master/app/email.py
|
9203880e1598384e4a0187a41bb57893fba8264d
|
[] |
no_license
|
babiato/flaskapp1
|
84de2d0b26a54f5820d3bbe97926782ad41e005c
|
530beb9e3b8516e0e93960b99521c23a523ef546
|
refs/heads/master
| 2023-02-26T16:36:49.760632
| 2021-02-04T09:08:40
| 2021-02-04T09:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
from flask_mail import Message
from app import mail
from flask import render_template
def send_email(to,subject,template,**kwargs):
msg=Message("[TecnologyDreamer]"+subject,sender='879651072@qq.com',recipients=[to])
msg.body=render_template(template+'.txt',**kwargs)
msg.html=render_template(template+'.html',**kwargs)
mail.send(msg)
|
[
"jinxufang@tencent.com"
] |
jinxufang@tencent.com
|
adf8bcf70a7abd41c6617653f5ac599ad6aff3cc
|
f260ff31ba63e9cd35e21b99c577107c46135a0d
|
/test005/test_reduce.py
|
2b49c6ecf3d3ee5062e62b927f5198f055b7b5fd
|
[] |
no_license
|
wscfan/pythoncode
|
e1cc882139931f4257528e274f443c3c8217ec8d
|
4bbe06f47b046a5078e8dd0f2ae9ccb9eeb01743
|
refs/heads/master
| 2021-03-20T21:12:58.580056
| 2020-12-19T16:38:10
| 2020-12-19T16:38:10
| 247,234,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
from functools import reduce
def get_sum_use_python(l):
return sum(l)
def f(m, n):
return m + n
def get_sum_use_reduce(l):
return reduce(f, l)
def get_sum_use_lambda(l):
return reduce(lambda m, n: m + n,l)
if __name__ == "__main__":
l = [1, 2, 3, 5, 7]
print(get_sum_use_python(l))
print('------------------')
print(get_sum_use_reduce(l))
print('+++++++++++++++++++')
print(get_sum_use_lambda(l))
|
[
"wshappyday@sina.com"
] |
wshappyday@sina.com
|
4f558c18905639160671530a35dbbf592d71058c
|
a15200778946f6f181e23373525b02b65c44ce6e
|
/Algoritmi/2019-06-25/all-CMS-submissions/2019-06-25.10:26:54.215386.VR432075.biancaneve.py
|
959fb7d91ea8ff4d2875e8281f77063d1dc829c4
|
[] |
no_license
|
alberto-uni/portafoglioVoti_public
|
db518f4d4e750d25dcb61e41aa3f9ea69aaaf275
|
40c00ab74f641f83b23e06806bfa29c833badef9
|
refs/heads/master
| 2023-08-29T03:33:06.477640
| 2021-10-08T17:12:31
| 2021-10-08T17:12:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
"""
* user: VR432075
* fname: BUSATTO
* lname: ALESSANDRO
* task: biancaneve
* score: 2.0
* date: 2019-06-25 10:26:54.215386
"""
from __future__ import print_function
import sys
if sys.version_info<(3,0):
input=raw_input
def scambia(nani,p1,p2):
x=nani[p1-1]
nani[p1-1]=nani[p2-1]
nani[p2-1]=x
def check(nani, h1, h2):
num_nani=h2-h1+1
total=0
y=len(nani)+1
prefix_sum=[0]*y
for i in range(0,y-1):
prefix_sum[i+1]=prefix_sum[i]+nani[i]
for i in range(h1,h2+1):
total += i
i=len(prefix_sum)-1
while i-num_nani >= 0 and prefix_sum[i]>=total:
if(prefix_sum[i]-prefix_sum[i-num_nani]==total):
return 1
i=i-1
return 0
def main():
#r1=input()
#split=r1.split()
#n=int(split[0])
#m=int(split[1])
#disp_nani=input()
#nani=int(disp_nani.split())
n, m = map(int, input().split())
nani = map(int, input().split())
for i in range(0,m):
t, p1, p2 = map(int, input().split())
#r=input()
#r_split=r.split()
#t=int(r_split[0])
#p1=int(r_split[1])
#p2=int(r_split[2])
if t==1:
scambia(nani,p1,p2)
else:
res=check(nani,p1,p2)
if res==1:
print("YES")
else:
print("NO")
if __name__ == '__main__':
main()
|
[
"romeo.rizzi@univr.it"
] |
romeo.rizzi@univr.it
|
b5c3ee3c6030d006925a30c43b0ae563408aeda9
|
fe6775ca8c5b42710785e3a923974ae079f92c8f
|
/剑指offer/剑指 Offer 55 - I. 二叉树的深度.py
|
53759e4232eb424671a0c80016e248c07bbe847d
|
[] |
no_license
|
AiZhanghan/Leetcode
|
41bda6676fa1a25fa19e393553c1148ed51fdf72
|
101bce2fac8b188a4eb2f5e017293d21ad0ecb21
|
refs/heads/master
| 2021-06-28T10:48:07.865968
| 2020-11-20T09:45:15
| 2020-11-20T09:45:15
| 188,155,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxDepth(self, root):
"""
Args:
root: TreeNode
Return:
int
"""
if not root:
return 0
left_depth = self.maxDepth(root.left)
right_depth = self.maxDepth(root.right)
return max(left_depth, right_depth) + 1
|
[
"35103759+AiZhanghan@users.noreply.github.com"
] |
35103759+AiZhanghan@users.noreply.github.com
|
8cbe4de82e8973e9de229af929a25871f3a061bc
|
20d54e88dbdab0a0335f6ae4bad22117e14eb556
|
/src/py3/srtmTest.py
|
475a6f72cca4bfec1cea1366608b23bc1b003792
|
[] |
no_license
|
bjohan/GnuradioTransceiver
|
ebf0426aabf5be4e06a52ac7a8ce941e14341ea7
|
501d68f78e40931f2f9549ab1ae1982faae464c6
|
refs/heads/master
| 2021-01-11T03:36:48.162992
| 2020-05-17T21:15:19
| 2020-05-17T21:15:19
| 70,995,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import srtmData
import heightResampler
sd = srtmData.SrtmData()
hr = heightResampler.HeightResampler(sd)
e, x, y = sd.getBlock(12, 57)
print(x)
print(y)
#plt.figure(1)
#plt.imshow(np.clip(e, 0, 10000), extent=[x[0], x[-1], y[-1], y[0]])
plt.figure(2)
xr = np.linspace(-180,180,500*2)
yr = np.linspace(-60,60,500)
#xr = np.linspace(12,14,500)
#yr = np.linspace(57,59,500)
#australia
#xr = np.linspace(110,155,10000)
#yr = np.linspace(-45,-10,5000)
rs = hr.get(xr, yr)
plt.imshow(np.clip(rs,0,10000), extent=[xr[0], xr[-1], yr[0], yr[-1]])
plt.show()
|
[
"you@example.com"
] |
you@example.com
|
6b8be3ce5a2307b8c3529e0eb27f4790497e0059
|
5eb29ce7104e10a399d9afd7e253f029bf8bc0ff
|
/scripts/tests/test_get_sim.py
|
fd8d224e2bcce56d015705e64cd8d67421aa30a1
|
[
"BSD-2-Clause"
] |
permissive
|
svebk/DeepSentiBank_memex
|
69789dc09316e97aad711edeb251837a60184e7e
|
4e69ce66e3a177817ff360ddc263f55c6e0b63f7
|
refs/heads/master
| 2021-01-18T18:55:10.870052
| 2017-10-19T22:51:29
| 2017-10-19T22:51:29
| 36,091,024
| 22
| 1
| null | 2017-02-09T20:31:20
| 2015-05-22T19:20:54
|
Python
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
import happybase
if __name__=="__main__":
tab_image = 'escorts_images_similar_row'
conn = happybase.Connection(host='10.1.94.57')
image_sha1s = '1000013C0A38D8DACAEC31360AFAFEB5DC3D712B'
table = conn.table(tab_image)
row = table.row(image_sha1s,columns=['s'])
print len(row.keys()),[x.split(':')[-1] for x in row.keys()]
|
[
"svebor.karaman@gmail.com"
] |
svebor.karaman@gmail.com
|
bfea27733e3baaa41e1cf44c97e610c37ea4f198
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03049/s868278541.py
|
b3f9160fdd19cecfb8e58fc70735fa3e21e93802
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
n=int(input())
a_num=0
b_num=0
set_num=0
ans=0
for i in range(n):
s=list(input())
for j in range(len(s)-1):
if s[j]+s[j+1]=="AB":
ans+=1
if s[0]=="B" and s[-1]=="A":
set_num+=1
elif s[0]=="B":
b_num+=1
elif s[-1]=="A":
a_num+=1
if set_num==0:
print(ans+min(a_num,b_num))
else:
ans+=set_num-1
if not(a_num==0 and b_num==0):
a_num+=1
b_num+=1
print(ans+min(a_num,b_num))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a59e67e2f7499c2085d18c176ceba106f3d6e308
|
ad0857eaba945c75e705594a53c40dbdd40467fe
|
/leetCode/number_of_papers_1780.py
|
dace9f5d455a28a9951f765e1c89a67e75850822
|
[
"MIT"
] |
permissive
|
yskang/AlgorithmPractice
|
c9964d463fbd0d61edce5ba8b45767785b0b5e17
|
3efa96710e97c8740d6fef69e4afe7a23bfca05f
|
refs/heads/master
| 2023-05-25T13:51:11.165687
| 2023-05-19T07:42:56
| 2023-05-19T07:42:56
| 67,045,852
| 0
| 0
| null | 2021-06-20T02:42:27
| 2016-08-31T14:40:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
# Title: 종이의 개수
# Link: https://www.acmicpc.net/problem/1780
import sys
sys.setrecursionlimit(10 ** 6)
def read_list_int():
return list(map(int, sys.stdin.readline().strip().split(' ')))
def read_single_int():
return int(sys.stdin.readline().strip())
def check_all_number(matrix, start_x, start_y, length):
base = matrix[start_y][start_x]
if length == 1:
return base
for y in range(start_y, start_y+length):
for x in range(start_x, start_x+length):
if matrix[y][x] != base:
return 9
return base
def number_of_papers(matrix, start_x, start_y, length):
index = [-1, 0, 1]
sums = {-1: 0, 0: 0, 1: 0}
base = matrix[start_y][start_x]
if length != 1:
done = False
for y in range(start_y, start_y+length):
for x in range(start_x, start_x+length):
if matrix[y][x] != base:
base = 9
done = True
break
if done:
break
if base != 9:
sums[base] += 1
else:
new_length = length // 3
for x in range(3):
for y in range(3):
s = number_of_papers(matrix, start_x + new_length * x, start_y + new_length * y, new_length)
for i in index:
sums[i] += s[i]
return sums
if __name__ == '__main__':
N = read_single_int()
matrix = []
for _ in range(N):
matrix.append(read_list_int())
ret = number_of_papers(matrix, 0, 0, N)
for i in ret:
print(ret[i])
|
[
"yongsung.kang@gmail.com"
] |
yongsung.kang@gmail.com
|
d28596a3b41c60b5e98f6cde6ebd4cf085d4d579
|
42cbf381d6d12b29a5212f3e8482ebde2067758b
|
/3 - Estrutura de Repetição/9nv.py
|
c87bfb1840eb522246d2b97d6b305cbc5d05d381
|
[] |
no_license
|
loristron/PythonExercisesLearnPython
|
d76d0f7d0b96b76ca463c0d431814a6ba74bbe74
|
c98a903900b41733980a5a13017dc1901b1ecee7
|
refs/heads/master
| 2023-02-09T12:46:44.885145
| 2021-01-07T19:39:33
| 2021-01-07T19:39:33
| 327,707,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 13:11:08 2020
@author: loris
Faça um programa que imprima na tela apenas os números ímpares entre 1 e 50.
"""
lista = []
for n in range (1, 50):
if n % 2 != 0:
print(n)
lista.append(n)
print(lista)
|
[
"loremmiranda@gmail.com"
] |
loremmiranda@gmail.com
|
db978cb55308c9705010fe8e0799d0f3dfcee515
|
62ccdb11daefaecc8e63f235c7519cc7594f705a
|
/images/google-cloud-sdk/lib/googlecloudsdk/command_lib/compute/instance_groups/managed/rolling_action.py
|
62c3d07cac5b5ba9303df7b4585f2a7c5fcf9414
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
hiday1979/kalabasa-mas
|
eccc869bfe259bb474f9d2a4dc4b8561a481f308
|
53a9818eb2a6f35ee57c4df655e7abaaa3e7ef5b
|
refs/heads/master
| 2021-07-05T16:34:44.962142
| 2018-07-10T10:22:24
| 2018-07-10T10:22:24
| 129,709,974
| 0
| 1
| null | 2020-07-24T22:15:29
| 2018-04-16T08:27:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,470
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create requests for rolling-action restart/recreate commands."""
from __future__ import absolute_import
from googlecloudsdk.api_lib.compute import managed_instance_groups_utils
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.instance_groups import flags as instance_groups_flags
from googlecloudsdk.command_lib.compute.managed_instance_groups import update_instances_utils
from googlecloudsdk.core.util import times
def CreateRequest(args,
cleared_fields,
client,
resources,
minimal_action,
max_surge=None):
"""Create request helper for compute instance-groups managed rolling-action.
Args:
args: argparse namespace
cleared_fields: Fields which are left cleared, but should be send in request
client: The compute client
resources: The compute resources
minimal_action: MinimalActionValueValuesEnum value
max_surge: InstanceGroupManagerUpdatePolicy.maxSurge value
Returns:
ComputeInstanceGroupManagersPatchRequest or
ComputeRegionInstanceGroupManagersPatchRequest instance
"""
resource_arg = instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG
default_scope = compute_scope.ScopeEnum.ZONE
scope_lister = flags.GetDefaultScopeLister(client)
igm_ref = resource_arg.ResolveAsResource(
args, resources, default_scope=default_scope, scope_lister=scope_lister)
update_policy_type = (client.messages.InstanceGroupManagerUpdatePolicy.
TypeValueValuesEnum.PROACTIVE)
max_unavailable = update_instances_utils.ParseFixedOrPercent(
'--max-unavailable', 'max-unavailable', args.max_unavailable,
client.messages)
igm_info = managed_instance_groups_utils.GetInstanceGroupManagerOrThrow(
igm_ref, client)
versions = (igm_info.versions or [
client.messages.InstanceGroupManagerVersion(
instanceTemplate=igm_info.instanceTemplate)
])
current_time_str = str(times.Now(times.UTC))
for i, version in enumerate(versions):
version.name = '%d/%s' % (i, current_time_str)
update_policy = client.messages.InstanceGroupManagerUpdatePolicy(
maxSurge=max_surge,
maxUnavailable=max_unavailable,
minReadySec=args.min_ready,
minimalAction=minimal_action,
type=update_policy_type)
igm_resource = client.messages.InstanceGroupManager(
instanceTemplate=None, updatePolicy=update_policy, versions=versions)
if igm_ref.Collection() == 'compute.instanceGroupManagers':
service = client.apitools_client.instanceGroupManagers
request = client.messages.ComputeInstanceGroupManagersPatchRequest(
instanceGroupManager=igm_ref.Name(),
instanceGroupManagerResource=igm_resource,
project=igm_ref.project,
zone=igm_ref.zone)
elif igm_ref.Collection() == 'compute.regionInstanceGroupManagers':
service = client.apitools_client.regionInstanceGroupManagers
request = client.messages.ComputeRegionInstanceGroupManagersPatchRequest(
instanceGroupManager=igm_ref.Name(),
instanceGroupManagerResource=igm_resource,
project=igm_ref.project,
region=igm_ref.region)
# Due to 'Patch' semantics, we have to clear either 'fixed' or 'percent'.
# Otherwise, we'll get an error that both 'fixed' and 'percent' are set.
if max_surge is not None:
cleared_fields.append('updatePolicy.maxSurge.fixed' if max_surge.fixed is
None else 'updatePolicy.maxSurge.percent')
if max_unavailable is not None:
cleared_fields.append('updatePolicy.maxUnavailable.fixed'
if max_unavailable.fixed is None else
'updatePolicy.maxUnavailable.percent')
return (service, 'Patch', request)
|
[
"accounts@wigitech.com"
] |
accounts@wigitech.com
|
d5b44d84be7c7901c1635edaa14a5f4c9d9321c6
|
fa04309288a0f8b2daae2fd73c8224a1c0ad4d95
|
/eventkit_cloud/utils/tests/test_wcs.py
|
14f1277c8a48f019d673a0273edd556c62f11626
|
[] |
no_license
|
jj0hns0n/eventkit-cloud
|
7bb828c57f29887621e47fe7ce0baa14071ef39e
|
2f749090baf796b507e79251a4c4b30cb0b4e126
|
refs/heads/master
| 2021-01-01T19:45:32.464729
| 2017-07-24T19:01:24
| 2017-07-24T19:01:24
| 98,675,805
| 0
| 0
| null | 2017-07-28T18:16:34
| 2017-07-28T18:16:34
| null |
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
# -*- coding: utf-8 -*-
import logging
import os
from mock import Mock, patch
from django.conf import settings
from django.test import TransactionTestCase
from string import Template
from ..wcs import WCStoGPKG, WCStoGeotiff
from uuid import uuid4
logger = logging.getLogger(__name__)
class TestWCSToGPKG(TransactionTestCase):
def setUp(self):
self.path = settings.ABS_PATH()
self.task_process_patcher = patch('eventkit_cloud.utils.wcs.TaskProcess')
self.task_process = self.task_process_patcher.start()
self.addCleanup(self.task_process_patcher.stop)
self.task_uid = uuid4()
@patch('eventkit_cloud.utils.wcs.os.path.exists')
def test_create_convert(self, exists):
gpkg = '/path/to/sqlite.gpkg'
bbox = [-45, -45, 45, 45]
layer = 'awesomeLayer'
name = 'Great export'
service_url = 'http://my-service.org/some-server/wcs?'
cmd = Template("gdal_translate -projwin $minX $maxY $maxX $minY -of GPKG -ot byte $wcs $out")
exists.return_value = True
self.task_process.return_value = Mock(exitcode=0)
w2g = WCStoGPKG(out=gpkg,
bbox=bbox,
service_url=service_url,
layer=layer,
debug=False,
name=name,
service_type=None,
task_uid=self.task_uid)
out = w2g.convert()
self.task_process.assert_called_once_with(task_uid=self.task_uid)
exists.assert_called_once_with(os.path.dirname(gpkg))
cmd = cmd.safe_substitute({'out': gpkg, 'wcs': w2g.wcs_xml_path, 'minX': bbox[0], 'minY': bbox[1],
'maxX': bbox[2], 'maxY': bbox[3]})
self.task_process().start_process.assert_called_once_with(cmd, executable='/bin/sh', shell=True, stderr=-1,
stdout=-1)
self.assertEquals(out, gpkg)
self.task_process.return_value = Mock(exitcode=1)
with self.assertRaises(Exception):
w2g.convert()
|
[
"joseph.svrcek@rgi-corp.com"
] |
joseph.svrcek@rgi-corp.com
|
c7d90f35e98f547498e4ac58bd24c52bf0e03f4f
|
9404b743f04a87626f117e394ed0877445f88efe
|
/DK_Project/market/urls.py
|
0e353a9701ea3776ba2bdda7ad4315f8865da274
|
[
"Apache-2.0"
] |
permissive
|
xedporject/DK
|
3497ddfb03521d856e3e9a1874e310db30d64fee
|
af8f9521011ac1ee0256db4863220abbbf9699ac
|
refs/heads/master
| 2020-03-24T22:37:29.871222
| 2018-08-11T07:42:47
| 2018-08-11T07:42:47
| 143,094,839
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
from django.conf.urls import url
from rest_framework.routers import SimpleRouter
from market import views
router = SimpleRouter()
router.register(r'^goods', views.GoodsApi)
router.register(r'brand', views.BrandApi)
router.register(r'category', views.CategoryApi)
urlpatterns = [
url(r'^index/', views.index, name='index'),
url(r'goods/details/(\d+)/', views.details)
]
urlpatterns += router.urls
|
[
"1367000465@qq.com"
] |
1367000465@qq.com
|
d51e0b54497d62f9511db1030a8af93fea2fdc67
|
931a3304ea280d0a160acb87e770d353368d7d7d
|
/vendor/swagger_client/models/get_fw_leaderboards_characters_active_total.py
|
84445a28e19347e99ebfd6bf4119f7a6f4c946d2
|
[] |
no_license
|
LukeS5310/Broadsword
|
c44786054e1911a96b02bf46fe4bdd0f5ad02f19
|
3ba53d446b382c79253dd3f92c397cca17623155
|
refs/heads/master
| 2021-09-08T00:05:26.296092
| 2017-10-24T07:01:48
| 2017-10-24T07:01:48
| 105,143,152
| 0
| 1
| null | 2017-11-03T14:29:38
| 2017-09-28T12:03:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,883
|
py
|
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.6.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetFwLeaderboardsCharactersActiveTotal(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, amount=None, character_id=None):
"""
GetFwLeaderboardsCharactersActiveTotal - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'amount': 'int',
'character_id': 'int'
}
self.attribute_map = {
'amount': 'amount',
'character_id': 'character_id'
}
self._amount = amount
self._character_id = character_id
@property
def amount(self):
"""
Gets the amount of this GetFwLeaderboardsCharactersActiveTotal.
Amount of kills
:return: The amount of this GetFwLeaderboardsCharactersActiveTotal.
:rtype: int
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this GetFwLeaderboardsCharactersActiveTotal.
Amount of kills
:param amount: The amount of this GetFwLeaderboardsCharactersActiveTotal.
:type: int
"""
self._amount = amount
@property
def character_id(self):
"""
Gets the character_id of this GetFwLeaderboardsCharactersActiveTotal.
character_id integer
:return: The character_id of this GetFwLeaderboardsCharactersActiveTotal.
:rtype: int
"""
return self._character_id
@character_id.setter
def character_id(self, character_id):
"""
Sets the character_id of this GetFwLeaderboardsCharactersActiveTotal.
character_id integer
:param character_id: The character_id of this GetFwLeaderboardsCharactersActiveTotal.
:type: int
"""
self._character_id = character_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetFwLeaderboardsCharactersActiveTotal):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"cyberlibertyx@gmail.com"
] |
cyberlibertyx@gmail.com
|
742ac4cb0de10696de786121651155b128a26bc5
|
1e4c7e1c949bd6c396454dccab5a17ed543c5546
|
/snippets/settings.py
|
76d838983684016722954ac2dda015c565ddf7b5
|
[] |
no_license
|
Kennedy-Njeri/Registration-Token-Based-Authentication
|
05f621f4e2a3445c6685af1e73921aee009f234b
|
464629b94a831bb8e41ddbbea913c6e37d9f8217
|
refs/heads/master
| 2020-05-09T19:10:45.680297
| 2019-04-14T21:04:09
| 2019-04-14T21:04:09
| 181,367,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,620
|
py
|
"""
Django settings for snippets project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2zp9f54aw1_l3ke!tr)687fpj)wk*p5vlujyc#bmh=!7ip=c+0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'account',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'snippets.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'snippets.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT= os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'account/static')
]
LOGIN_REDIRECT_URL = 'account'
LOGIN_URL = 'login'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
EMAIL_USE_TLS = True
SERVER_EMAIL = 'securesally@gmail.com'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'mistakenz123@gmail.com'
EMAIL_HOST_PASSWORD = 'qlwcpapjlisegoie'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
|
[
"mistakenz123@gmail.com"
] |
mistakenz123@gmail.com
|
b3ce66d5ab56ec78ae434f553712d4027996e0c8
|
7bf377472dea25a39933e34726dc581e8f7efb6f
|
/4_lr_analysis/get_lu_data.py
|
393f8e9613fef98a48f8ad3e0c721ad64001b5b4
|
[] |
no_license
|
gordonje/deadly_work
|
8fe655cca4fea522842609cfdc7fff1582cd4775
|
cdd8586eaf71b643b2076ef4389333a64e04da8e
|
refs/heads/master
| 2021-01-21T08:01:09.981286
| 2015-03-20T02:55:28
| 2015-03-20T02:55:28
| 21,902,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,707
|
py
|
import getpass
import psycopg2
import requests
db = raw_input("Enter name of target database:")
user = raw_input("Enter your PostgreSQL username (this might just be 'postgres'):")
password = getpass.getpass("Enter your PostgreSQL user password:")
conn = psycopg2.connect("dbname=%(db)s user=%(user)s password=%(password)s" % {"db": db, "user": user, "password":password})
cur = conn.cursor()
session = requests.Session()
session.headers.update({"Connection": "keep-alive"})
# create the schema if it doesn't already exist
cur.execute('''CREATE SCHEMA IF NOT EXISTS lu;''')
conn.commit()
# if the current data table doesn't already exist, create and populate it
cur.execute('''SELECT * FROM information_schema.tables WHERE table_name = 'current_data' AND table_schema = 'lu';''')
has_current_data = cur.fetchone()
if has_current_data == None:
print "Getting current data..."
cur.execute('''CREATE TABLE lu.current_data (
series_id varchar(17)
, year int4
, period varchar(3)
, value numeric
, footnote_codes varchar(255)
, PRIMARY KEY (series_id, year)
);''')
conn.commit()
response = session.get("http://download.bls.gov/pub/time.series/lu/lu.data.0.Current")
rows = response.content.split('\n')
for row in rows[1:]:
values = row.split('\t')
if len(values) > 1:
cur.execute('''INSERT INTO lu.current_data (series_id, year, period, value, footnote_codes)
VALUES (%s, %s, %s, %s, %s);''',
[values[0].strip(), values[1].strip(), values[2].strip(), values[3].strip(), values[4].strip()])
conn.commit()
# if the all data table doesn't already exist, create and populate it
cur.execute('''SELECT * FROM information_schema.tables WHERE table_name = 'all_data' AND table_schema = 'lu';''')
has_all_data = cur.fetchone()
if has_all_data == None:
print "Getting all data..."
cur.execute('''CREATE TABLE lu.all_data (
series_id varchar(17)
, year int4
, period varchar(3)
, value numeric
, footnote_codes varchar(255)
, PRIMARY KEY (series_id, year)
);''')
conn.commit()
response = session.get("http://download.bls.gov/pub/time.series/lu/lu.data.1.AllData")
rows = response.content.split('\n')
for row in rows[1:]:
values = row.split('\t')
if len(values) > 1:
cur.execute('''INSERT INTO lu.all_data (series_id, year, period, value, footnote_codes)
VALUES (%s, %s, %s, %s, %s);''',
[values[0].strip(), values[1].strip(), values[2].strip(), values[3].strip(), values[4].strip()])
conn.commit()
# if the series table doesn't already exist, create and populate it
cur.execute('''SELECT * FROM information_schema.tables WHERE table_name = 'series' AND table_schema = 'lu';''')
has_series = cur.fetchone()
if has_series == None:
print "Getting series..."
cur.execute('''CREATE TABLE lu.series (
series_id varchar(17) PRIMARY KEY
, lfst_code varchar(2)
, fips_code varchar(2)
, series_description varchar(255)
, tdata_code varchar(2)
, pcts_code varchar(2)
, earn_code varchar(2)
, class_code varchar(2)
, unin_code varchar(1)
, indy_code varchar(4)
, occupation_code varchar(4)
, education_code varchar(2)
, ages_code varchar(2)
, race_code varchar(2)
, orig_code varchar(2)
, sexs_code varchar(2)
, seasonal varchar(2)
, footnote_codes varchar(255)
, begin_year int4
, begin_period varchar(3)
, end_year int4
, end_period varchar(3)
);''')
conn.commit()
response = session.get("http://download.bls.gov/pub/time.series/lu/lu.series")
rows = response.content.split('\n')
for row in rows[1:]:
values = row.split('\t')
if len(values) > 1:
cur.execute('''INSERT INTO lu.series (series_id, lfst_code, fips_code, series_description, tdata_code, pcts_code, earn_code,
class_code, unin_code, indy_code, occupation_code, education_code, ages_code, race_code,
orig_code, sexs_code, seasonal, footnote_codes, begin_year, begin_period, end_year, end_period)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);''',
[values[0].strip(), values[1].strip(), values[2].strip(), values[3].strip(), values[4].strip(),
values[5].strip(), values[6].strip(), values[7].strip(), values[8].strip(), values[9].strip(),
values[10].strip(), values[11].strip(), values[12].strip(), values[13].strip(), values[14].strip(),
values[15].strip(), values[16].strip(), values[17].strip(), values[18].strip(), values[19].strip(),
values[20].strip(), values[21].strip()])
conn.commit()
# check to see if the columns are on the areas table, then add them.
cur.close()
conn.close()
|
[
"gordon.je@gmail.com"
] |
gordon.je@gmail.com
|
20d016d8c835bb90dcdfc491f101ee23416a4a9d
|
0ccb70cd22862f5c1617113cec62fb4438093ce7
|
/src/gamer/application/tests/fixtures.py
|
1667dcd3275df70908ece80cd9b9f344daf4c45a
|
[] |
no_license
|
socek/gamer
|
f4590a557819047158c1a8c0e9605632dbaac58c
|
040216b44d38f2ab5a111cb55981645d331c2ba3
|
refs/heads/master
| 2020-09-13T09:42:26.427433
| 2014-10-26T19:44:17
| 2014-10-26T22:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
from haplugin.toster.fixtures import Fixtures as Base
class Fixtures(Base):
def __call__(self):
# example:
# self.create_nameless(Model, name=value)
pass
|
[
"msocek@gmail.com"
] |
msocek@gmail.com
|
d7893781c8869541e806fcbcbc353555c39f40fe
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_280/ch39_2020_03_31_00_53_54_201792.py
|
47ea9f05865c88712d8501fe79b7fc7736d39472
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
def tamanho(x):
i = 1
while x != 1:
if x%2 == 0:
x = x/2
else:
x = 3*x + 1
i += 1
return i
nms = 1
ms = 1
x = 2
while 1 < x < 1000:
if tamanho(x) >= ms:
ms = tamanho (x)
nms = x
x = x + 1
print(nms)
|
[
"you@example.com"
] |
you@example.com
|
6b0a7bd2d974d61447504b9f0b45adcff16dc291
|
f4d710f68d715470905daa1245f3b9f4f4c4cef5
|
/local_settings.py
|
da93c18c7fe1d80df9288fb1a2a37e53e9ee22de
|
[] |
no_license
|
powellc/findhistory_me
|
c3044a894840e62f12bb2ee4dc0ad7dbe8a524fd
|
d72eb449eb0e15f0d62a46986ad8551ab1cb66ca
|
refs/heads/master
| 2016-09-15T20:55:57.994241
| 2014-04-15T03:56:18
| 2014-04-15T03:56:18
| 10,428,262
| 0
| 0
| null | 2014-04-15T03:54:33
| 2013-06-01T20:46:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 658
|
py
|
DEBUG = True
AWS_ACCESS_KEY_ID = 'AKIAIC6KSWVHASDPKERQ'
AWS_SECRET_ACCESS_KEY = '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'
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.sqlite3",
# DB name or path to database file if using sqlite3.
"NAME": "dev.db",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
|
[
"colin.powell@gmail.com"
] |
colin.powell@gmail.com
|
83b6521cbb69e918d5adf86d3847e2be974e7380
|
33c1c5d0f48ad952776fe546a85350a441d6cfc2
|
/ABC/125/B.py
|
8f6837817b70aa2610cc83b398e4c54f113aa439
|
[] |
no_license
|
hisyatokaku/Competition
|
985feb14aad73fda94804bb1145e7537b057e306
|
fdbf045a59eccb1b2502b018cab01810de4ea894
|
refs/heads/master
| 2021-06-30T18:48:48.256652
| 2020-11-16T11:55:12
| 2020-11-16T11:55:12
| 191,138,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
N = int(input())
S = input()
K = int(input())
tar = S[K-1]
ans = ""
for c in S:
if c != tar:
ans += "*"
else:
ans += c
print(ans)
|
[
"hisyatokaku2005@yahoo.co.jp"
] |
hisyatokaku2005@yahoo.co.jp
|
1c8511d43344a1e4d70820bed6125f0579cc50c8
|
c112831974be5aa036a74bbe1bf3798a4f9a5907
|
/Python基础教程学习代码/venv/Scripts/pip3.7-script.py
|
5cd47a7d356bee3623bf1d548ec06ba97a352e59
|
[] |
no_license
|
MuSaCN/PythonLearning_old1
|
5e1cb069d80cbe9527c179877b0d2026072c45c0
|
c9aa0938875959526cf607344c1094a8fbf76400
|
refs/heads/master
| 2020-07-31T23:51:08.591550
| 2019-09-24T10:59:46
| 2019-09-24T10:59:46
| 210,792,257
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 443
|
py
|
#!C:\Users\i2011\PycharmProjects\Python»ù´¡½Ì³Ìѧϰ\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"39754824+MuSaCN@users.noreply.github.com"
] |
39754824+MuSaCN@users.noreply.github.com
|
06dc48c81124fea793ef637fde3fec4caa144662
|
8f5ee885986e9a0ec8816c32a9ad2966fb747f7d
|
/src/aido_schemas/estimation_demo.py
|
eb7931f53cb1b8c34e57b5ea740582f2ec41d9cf
|
[] |
no_license
|
duckietown/aido-protocols
|
3cca7564738d645785a5cc242bb39fd53936af0a
|
47b551d80151a76aba05f76a13e516f9fa06749c
|
refs/heads/daffy
| 2023-04-13T08:57:28.079004
| 2022-11-29T13:18:35
| 2022-11-29T13:18:35
| 169,989,925
| 1
| 1
| null | 2021-10-31T22:48:30
| 2019-02-10T15:00:05
|
Python
|
UTF-8
|
Python
| false
| false
| 523
|
py
|
from .basics import InteractionProtocol
__all__ = ["protocol_simple_predictor"]
protocol_simple_predictor = InteractionProtocol(
description="""
An estimator receives a stream of values and must predict the next value.
""".strip(),
inputs={"observations": float, "seed": int, "get_prediction": type(None)},
outputs={"prediction": float},
language="""
in:seed? ;
(in:observations |
(in:get_prediction ; out:prediction)
)*
""",
)
|
[
"acensi@ethz.ch"
] |
acensi@ethz.ch
|
afbe0a36ff1f83a9c1ebd24646a5d41fef49fe65
|
73c05ee0cbc54dd77177b964f3a72867138a1f0f
|
/interview/CyC2018_Interview-Notebook/剑指offer/41_2.py
|
6ab655dbf45728e3718151f3849c4ee7f6f8943e
|
[] |
no_license
|
tb1over/datastruct_and_algorithms
|
8be573953ca1cdcc2c768a7d9d93afa94cb417ae
|
2b1c69f28ede16c5b8f2233db359fa4adeaf5021
|
refs/heads/master
| 2020-04-16T12:32:43.367617
| 2018-11-18T06:52:08
| 2018-11-18T06:52:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# -*- coding: utf-8 -*-
"""题目描述
请实现一个函数用来找出字符流中第一个只出现一次的字符。例如,当从字符流中只读出前两个字符"go"时,第一个只出现一次的字符是"g"。当从该字符流中读出前六个字符“google"时,第一个只出现一次的字符是"l"。
"""
class Solution:
# 返回对应char
def FirstAppearingOnce(self):
# write code here
def Insert(self, char):
# write code here
|
[
"mitree@sina.com"
] |
mitree@sina.com
|
59abb941173fc174e7c1871a202d8b4af137e040
|
68ee9027d4f780e1e5248a661ccf08427ff8d106
|
/extra/unused/qgisRasterColorscale.py
|
1ced8315150eba200810e34a79bad3ffd8fa1c6c
|
[
"MIT"
] |
permissive
|
whyjz/CARST
|
87fb9a6a62d39fd742bb140bddcb95a2c15a144c
|
4fc48374f159e197fa5a9dbf8a867b0a8e0aad3b
|
refs/heads/master
| 2023-05-26T20:27:38.105623
| 2023-04-16T06:34:44
| 2023-04-16T06:34:44
| 58,771,687
| 17
| 4
|
MIT
| 2021-03-10T01:26:04
| 2016-05-13T20:54:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
#!/usr/bin/python
# qgisRasterColorscale.py
# Author: Andrew Kenneth Melkonian
# All rights reserved
def qgisRasterColorscale(qgs_path, qml_path):
assert os.path.exists(qgs_path), "\n***** ERROR: " + qgs_path + " does not exist\n";
assert os.path.exists(qml_path), "\n***** ERROR: " + qml_path + " does not exist\n";
raster_renderer = "";
infile = open(qml_path);
for line in infile:
raster_renderer += line;
infile.close();
import re;
raster_renderer = raster_renderer[re.search("\s*<raster",raster_renderer).start(0) : re.search("</rasterrenderer>",raster_renderer).end(0)];
raster_section = False;
outfile = open("temp", "w");
infile = open(qgs_path, "r");
for line in infile:
if line.find("<rasterrenderer") > -1:
raster_section = True;
outfile.write(raster_renderer + "\n");
elif line.find("</rasterrenderer") > -1:
raster_section = False;
elif raster_section == False:
outfile.write(line);
outfile.close();
infile.close();
return;
if __name__ == "__main__":
import os;
import sys;
assert len(sys.argv) > 2, "\n***** ERROR: qgisRasterColorscale.py requires 2 arguments, " + str(len(sys.argv) - 1) + " given\n";
assert os.path.exists(sys.argv[1]), "\n***** ERROR: " + sys.argv[1] + " does not exist\n";
assert os.path.exists(sys.argv[2]), "\n***** ERROR: " + sys.argv[2] + " does not exist\n";
qgisRasterColorscale(sys.argv[1], sys.argv[2]);
exit();
|
[
"wz278@cornell.edu"
] |
wz278@cornell.edu
|
2bfb834c61e5fd67368ad0fbc61cdbb04f3ac348
|
1a4bc1a11fdb3f714f22f5e0e826b47aa0569de2
|
/lab/lab09/tests/q3_2.py
|
ca45e1c7feda06578903f5453e3fdb3f09c5adcf
|
[] |
no_license
|
taylorgibson/ma4110-fa21
|
201af7a044fd7d99140c68c48817306c18479610
|
a306e1b6e7516def7de968781f6c8c21deebeaf5
|
refs/heads/main
| 2023-09-05T21:31:44.259079
| 2021-11-18T17:42:15
| 2021-11-18T17:42:15
| 395,439,687
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
test = { 'name': 'q3_2',
'points': None,
'suites': [ { 'cases': [ { 'code': ">>> # Make sure your column labels are correct.\n>>> set(faithful_predictions.labels) == set(['duration', 'wait', 'predicted wait'])\nTrue",
'hidden': False,
'locked': False},
{'code': '>>> abs(1 - np.mean(faithful_predictions.column(2))/100) <= 0.35\nTrue', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
[
"taylorgibson@gmail.com"
] |
taylorgibson@gmail.com
|
c4ac861f2ee0b8e2fc382f3d37a11fd699b479ca
|
a1bffcd8854e1843e56bb812d4d83b3161a5211e
|
/plugins/lookup/cyberarkpassword.py
|
79e855c22d4b5573ba40e8c231017a3b2e10e868
|
[] |
no_license
|
goneri/ansible.community
|
1a71f9d98c164b77f8ed2ed7f558b4963005ff8f
|
f26f612dd0a3154050d90b51a75502018c95f6e4
|
refs/heads/master
| 2020-12-29T07:47:35.353515
| 2020-01-22T17:43:18
| 2020-01-22T17:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,196
|
py
|
# (c) 2017, Edward Nunez <edward.nunez@cyberark.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
lookup: cyberarkpassword
short_description: get secrets from CyberArk AIM
requirements:
- CyberArk AIM tool installed
description:
- Get secrets from CyberArk AIM.
options :
_command:
description: Cyberark CLI utility.
env:
- name: AIM_CLIPASSWORDSDK_CMD
default: '/opt/CARKaim/sdk/clipasswordsdk'
appid:
description: Defines the unique ID of the application that is issuing the password request.
required: True
query:
description: Describes the filter criteria for the password retrieval.
required: True
output:
description:
- Specifies the desired output fields separated by commas.
- "They could be: Password, PassProps.<property>, PasswordChangeInProcess"
default: 'password'
_extra:
description: for extra_parms values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide"
note:
- For Ansible on windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe
'''
EXAMPLES = """
- name: passing options to the lookup
debug: msg={{ lookup("cyberarkpassword", cyquery)}}
vars:
cyquery:
appid: "app_ansible"
query: "safe=CyberArk_Passwords;folder=root;object=AdminPass"
output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess"
- name: used in a loop
debug: msg={{item}}
with_cyberarkpassword:
appid: 'app_ansible'
query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
"""
RETURN = """
password:
description:
- The actual value stored
passprops:
description: properties assigned to the entry
type: dictionary
passwordchangeinprocess:
description: did the password change?
"""
import os
import subprocess
from subprocess import PIPE
from subprocess import Popen
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.parsing.splitter import parse_kv
from ansible_collections.ansible.community.plugins.module_utils._text import to_bytes, to_text, to_native
from ansible.utils.display import Display
display = Display()
CLIPASSWORDSDK_CMD = os.getenv('AIM_CLIPASSWORDSDK_CMD', '/opt/CARKaim/sdk/clipasswordsdk')
class CyberarkPassword:
def __init__(self, appid=None, query=None, output=None, **kwargs):
self.appid = appid
self.query = query
self.output = output
# Support for Generic parameters to be able to specify
# FailRequestOnPasswordChange, Queryformat, Reason, etc.
self.extra_parms = []
for key, value in kwargs.items():
self.extra_parms.append('-p')
self.extra_parms.append("%s=%s" % (key, value))
if self.appid is None:
raise AnsibleError("CyberArk Error: No Application ID specified")
if self.query is None:
raise AnsibleError("CyberArk Error: No Vault query specified")
if self.output is None:
# If no output is specified, return at least the password
self.output = "password"
else:
# To avoid reference issues/confusion to values, all
# output 'keys' will be in lowercase.
self.output = self.output.lower()
self.b_delimiter = b"@#@" # Known delimiter to split output results
def get(self):
result_dict = {}
try:
all_parms = [
CLIPASSWORDSDK_CMD,
'GetPassword',
'-p', 'AppDescs.AppID=%s' % self.appid,
'-p', 'Query=%s' % self.query,
'-o', self.output,
'-d', self.b_delimiter]
all_parms.extend(self.extra_parms)
b_credential = b""
b_all_params = [to_bytes(v) for v in all_parms]
tmp_output, tmp_error = Popen(b_all_params, stdout=PIPE, stderr=PIPE, stdin=PIPE).communicate()
if tmp_output:
b_credential = to_bytes(tmp_output)
if tmp_error:
raise AnsibleError("ERROR => %s " % (tmp_error))
if b_credential and b_credential.endswith(b'\n'):
b_credential = b_credential[:-1]
output_names = self.output.split(",")
output_values = b_credential.split(self.b_delimiter)
for i in range(len(output_names)):
if output_names[i].startswith("passprops."):
if "passprops" not in result_dict:
result_dict["passprops"] = {}
output_prop_name = output_names[i][10:]
result_dict["passprops"][output_prop_name] = to_native(output_values[i])
else:
result_dict[output_names[i]] = to_native(output_values[i])
except subprocess.CalledProcessError as e:
raise AnsibleError(e.output)
except OSError as e:
raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror))
return [result_dict]
class LookupModule(LookupBase):
"""
USAGE:
"""
def run(self, terms, variables=None, **kwargs):
display.vvvv("%s" % terms)
if isinstance(terms, list):
return_values = []
for term in terms:
display.vvvv("Term: %s" % term)
cyberark_conn = CyberarkPassword(**term)
return_values.append(cyberark_conn.get())
return return_values
else:
cyberark_conn = CyberarkPassword(**terms)
result = cyberark_conn.get()
return result
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
27f10dff9fe70eb67bbbd8be5e27c8ee089b46f9
|
65dce36be9eb2078def7434455bdb41e4fc37394
|
/454 4Sum II.py
|
c5d654a137c6a70d3df07a7fcec921b7407065cd
|
[] |
no_license
|
EvianTan/Lintcode-Leetcode
|
9cf2d2f6a85c0a494382b9c347bcdb4ee0b5d21a
|
d12dd31e98c2bf24acc20c5634adfa950e68bd97
|
refs/heads/master
| 2021-01-22T08:13:55.758825
| 2017-10-20T21:46:23
| 2017-10-20T21:46:23
| 92,607,185
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
'''
Given four lists A, B, C, D of integer values, compute how many tuples (i, j, k, l) there are such that A[i] + B[j] + C[k] + D[l] is zero.
To make problem a bit easier, all A, B, C, D have same length of N where 0 ≤ N ≤ 500. All integers are in the range of -228 to 228 - 1 and the result is guaranteed to be at most 231 - 1.
Example:
Input:
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
Output:
2
Explanation:
The two tuples are:
1. (0, 0, 0, 1) -> A[0] + B[0] + C[0] + D[1] = 1 + (-2) + (-1) + 2 = 0
2. (1, 1, 0, 0) -> A[1] + B[1] + C[0] + D[0] = 2 + (-1) + (-1) + 0 = 0
'''
class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
dic={}
res=0
for a in A:
for b in B:
if a+b not in dic:
dic[a+b]=1
else:
dic[a+b]+=1
for c in C:
for d in D:
if -c-d in dic:
res+=dic[-c-d]
return res
|
[
"yiyun.tan@uconn.edu"
] |
yiyun.tan@uconn.edu
|
9b782c688e0dd74223de5b199c0bc92e6fa39895
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007.1/server/openldap/actions.py
|
cb88cc11802e70a1cca7ea9961bec056dfadb4c4
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,360
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005, 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.echo("include/ldap_defaults.h", "#define LDAPI_SOCK \"/var/run/openldap/slapd.sock\"")
autotools.configure("--prefix=/usr \
--enable-bdb \
--with-ldbm-api=berkeley \
--enable-hdb=mod \
--enable-slapd \
--enable-slurpd \
--enable-ldbm \
--enable-passwd=mod \
--enable-phonetic=mod \
--enable-dnssrv=mod \
--enable-ldap \
--enable-wrappers \
--enable-meta=mod \
--enable-monitor=mod \
--enable-null=mod \
--enable-shell=mod \
--enable-rewrite \
--enable-rlookups \
--enable-aci \
--enable-modules \
--enable-cleartext \
--enable-lmpasswd \
--enable-spasswd \
--enable-slapi \
--enable-dyngroup \
--enable-proxycache \
--enable-perl \
--enable-syslog \
--enable-dynamic \
--enable-local \
--enable-proctitle \
--enable-overlay \
--with-tls \
--with-cyrus-sasl \
--enable-crypt \
--enable-ipv6")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("ANNOUNCEMENT", "CHANGES", "COPYRIGHT", "README", "LICENSE")
pisitools.dodir("/var/run/openldap")
pisitools.dodir("/var/run/openldap/slapd")
pisitools.dodir("/etc/openldap/ssl")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
7b8466f0376f6de64cf039644fc1465308b1e644
|
e1c5b001b7031d1ff204d4b7931a85366dd0ce9c
|
/EMu/2016/plot_fake/check_data.py
|
285e253d884fcbc8e17661669330414a85534585
|
[] |
no_license
|
fdzyffff/IIHE_code
|
b9ff96b5ee854215e88aec43934368af11a1f45d
|
e93a84777afad69a7e63a694393dca59b01c070b
|
refs/heads/master
| 2020-12-30T16:03:39.237693
| 2020-07-13T03:06:53
| 2020-07-13T03:06:53
| 90,961,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
import ROOT
try:
tchain=ROOT.TChain('tap')
tchain.Add('data_2016B_DoubleEG.root')
except:
print "errors!"
run_list = []
n_passed1 = 0
totalEntry = tchain.GetEntries()
for iEntry in range(0, tchain.GetEntries()):
tchain.GetEntry(iEntry)
if tchain.ev_run_out not in run_list:run_list.append(tchain.ev_run_out)
if iEntry%50000==0 and iEntry > 0:
print '%d / %d Prossed'%(iEntry,totalEntry)
if 60<=tchain.M_ee and tchain.M_ee<=120 :
if (tchain.t_region == 1 and tchain.heep2_region == 1) or (tchain.t_region == 3 and tchain.heep2_region == 3) or (tchain.t_region == 1 and tchain.heep2_region == 3) or (tchain.t_region == 3 and tchain.heep2_region == 1):
n_passed1+=tchain.w_PU_combined
print 'n total : ', n_passed1
run_list.sort()
for run in run_list:
print run
|
[
"1069379433@qq.com"
] |
1069379433@qq.com
|
e97e4caa02a91f4185685942cc774181c4259b6c
|
caaf56727714f8c03be38710bc7d0434c3ec5b11
|
/homeassistant/components/avri/__init__.py
|
3165b6ee87a77f41cca449f635f51943bbe62923
|
[
"Apache-2.0"
] |
permissive
|
tchellomello/home-assistant
|
c8db86880619d7467901fd145f27e0f2f1a79acc
|
ed4ab403deaed9e8c95e0db728477fcb012bf4fa
|
refs/heads/dev
| 2023-01-27T23:48:17.550374
| 2020-09-18T01:18:55
| 2020-09-18T01:18:55
| 62,690,461
| 8
| 1
|
Apache-2.0
| 2023-01-13T06:02:03
| 2016-07-06T04:13:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
"""The avri component."""
import asyncio
from datetime import timedelta
import logging
from avri.api import Avri
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import (
CONF_COUNTRY_CODE,
CONF_HOUSE_NUMBER,
CONF_HOUSE_NUMBER_EXTENSION,
CONF_ZIP_CODE,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
SCAN_INTERVAL = timedelta(hours=4)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Avri component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Avri from a config entry."""
client = Avri(
postal_code=entry.data[CONF_ZIP_CODE],
house_nr=entry.data[CONF_HOUSE_NUMBER],
house_nr_extension=entry.data.get(CONF_HOUSE_NUMBER_EXTENSION),
country_code=entry.data[CONF_COUNTRY_CODE],
)
hass.data[DOMAIN][entry.entry_id] = client
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
[
"noreply@github.com"
] |
tchellomello.noreply@github.com
|
961cc26729281f013409614ad41160edb6caace8
|
a97f789530412fc1cb83170a11811f294b139ee8
|
/疯狂Python讲义/codes/10/10.8/dict_vs_defaultdict.py
|
271ff0662c308adfa488ad5cc5242ed3fdbd5820
|
[] |
no_license
|
baidongbin/python
|
3cebf2cc342a15b38bf20c23f941e6887dac187a
|
1c1398bff1f1820afdd8ddfa0c95ccebb4ee836f
|
refs/heads/master
| 2021-07-21T19:23:32.860444
| 2020-03-07T11:55:30
| 2020-03-07T11:55:30
| 195,909,272
| 0
| 1
| null | 2020-07-21T00:51:24
| 2019-07-09T01:24:31
|
Python
|
UTF-8
|
Python
| false
| false
| 244
|
py
|
from collections import defaultdict
my_dict = {}
# 使用 int 作为 defaultdict 的 default_factory
# 当 key 不存在时,将会返回 int 函数的返回值
my_defaultdict = defaultdict(int)
print(my_defaultdict['a'])
print(my_dict['a'])
|
[
"baidongbin@thunisoft.com"
] |
baidongbin@thunisoft.com
|
a5230b855b505b17f14791a0061759b8f1b21930
|
fa27b2e9668484959772c6ac37622a7442396347
|
/sharing/app/api_1_0/register.py
|
039c61f8e235d2d4e7f5478a4cc4115a74de729a
|
[] |
no_license
|
tangxiangru/2017-sharing-backend
|
5a3cc9ba6c22944046ae99221bee70245e326ffd
|
0905e38c9a30296cf01950efa6eed2708807f957
|
refs/heads/master
| 2021-01-13T11:30:24.026822
| 2017-02-11T18:56:29
| 2017-02-11T18:56:29
| 81,680,679
| 1
| 0
| null | 2017-02-11T20:37:08
| 2017-02-11T20:37:08
| null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
#coding:utf-8
from flask import jsonify, redirect, request, url_for, flash
from ..models import User
from .. import db
from . import api
#注册
@api.route('/register/',methods = ['POST'])
def register():
if request.method == 'POST':
email = request.get_json().get("email")
password = request.get_json().get("password")
username = request.get_json().get("username")
user = User ( username= username,email=email ,password=password)
#user = User.from_json(request.json)
db.session.add(user)
db.session.commit()
user_id=User.query.filter_by(email=email).first().id
#token = user.generate_confirmation_token()
#send_email(user.email,'请确认你的账户',
# 'auth/email/confirm',user = user,token = token)
#flash(u'确认邮件已经发往了你的邮箱')
return jsonify({
"created":user_id
})
|
[
"504490160@qq.com"
] |
504490160@qq.com
|
a5e994b745288becf5f7c50b640bea1b03d4ad05
|
ae88dd2493c2329be480030f87e6e2a91470e255
|
/src/python/DQIS/Client/CommandLine.py
|
15d7a3b7f6f2e7ddc217564c71ac050dab93013c
|
[] |
no_license
|
dmwm/DQIS
|
a48da3841ab6a086247ae8e437e2b5eb9e1c5048
|
bd861954c2531df1bd2e9dceb2585b9acd4cbbdc
|
refs/heads/master
| 2021-01-23T08:15:06.804525
| 2010-05-11T19:30:33
| 2010-05-11T19:30:33
| 4,423,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,840
|
py
|
'''
Created on 7 May 2010
@author: metson
'''
from DQIS.API.Database import Database
from optparse import OptionParser
import json
D_DATABASE_NAME = 'dqis'
D_DATABASE_ADDRESS = 'localhost:5984'
def do_options():
op = OptionParser(version="%prog 0.1")
op.add_option("-u", "--url",
type="string",
action="store",
dest="db_address",
help="Database url. Default address %s" % D_DATABASE_ADDRESS,
default=D_DATABASE_ADDRESS)
op.add_option("-d", "--database",
type="string",
action="store",
dest="db_name",
help="Database name. Default: '%s'" % D_DATABASE_NAME,
default=D_DATABASE_NAME)
op.add_option("-k", "--key",
action="append",
nargs=2,
type="string",
dest="keys",
help="Key Value pair (e.g.-k ecal True)")
op.add_option("--startrun",
action="store",
type="int",
dest="start_run",
help="Run value")
op.add_option("--endrun",
action="store",
type="int",
dest="end_run",
help="Run value")
op.add_option("--lumi",
action="store",
type="int",
dest="lumi",
help="Lumi value")
op.add_option("--dataset",
action="store",
type="string",
dest="dataset",
help="Dataset value")
op.add_option("--bfield", "-b",
action="store",
type="int",
dest="bfield",
help="Magnetic field value")
op.add_option("--id",
type="string",
action="store",
dest="doc_id",
help="Document ID",) #TODO: validate
op.add_option("--crab",
"-c",
action="store_true",
dest='crab',
help='Create a CRAB lumi.json file in the current directory.',
default=False)
return op.parse_args()
options, args = do_options()
db = Database(dbname = options.db_name, url = options.db_address, size = 1000)
map = {}
for k,v in options.keys:
map[k] = bool(v)
if options.crab:
data = db.crab(options.start_run, options.end_run, map, options.bfield)
f = open('lumi.json', 'w')
json.dump(data, f)
f.close()
elif options.doc_id:
print db.getDoc(doc_id)
else:
print db.search(options.start_run, options.end_run, map, options.bfield)
|
[
"metson@4525493e-7705-40b1-a816-d608a930855b"
] |
metson@4525493e-7705-40b1-a816-d608a930855b
|
f721745c59dfa425155103c807994cc344f7ce31
|
93039551fbdef0a112a9c39181d30b0c170eb3a6
|
/day03/day03HomeWork.py
|
a399b93ad6e0b969a53525cf311c62598023889e
|
[] |
no_license
|
wenzhe980406/PythonLearning
|
8714de8a472c71e6d02b6de64efba970a77f6f4a
|
af0e85f0b11bf9d2f8e690bac480b92b971c01bb
|
refs/heads/master
| 2020-07-14T20:46:45.146134
| 2020-05-28T12:16:21
| 2020-05-28T12:16:21
| 205,398,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,561
|
py
|
# _*_ coding : UTF-8 _*_
# 开发人员 : ChangYw
# 开发时间 : 2019/7/17 17:33
# 文件名称 : day03HomeWork.PY
# 开发工具 : PyCharm
#1
# #1)
# if __name__ == "__main__":
# score = []
#
# #2)
# if __name__ == "__main__":
# score.append(68)
# score.append(87)
# score.append(92)
# score.append(100)
# score.append(76)
# score.append(88)
# score.append(54)
# score.append(89)
# score.append(76)
# score.append(61)
#
# #3)
# if __name__ == "__main__":
# print(score[2])
#
# #4)
# if __name__ == "__main__":
# print(score[:6])
#
# #5)
# if __name__ == "__main__":
# score.insert(3,59)
# print(score)
#
# #6)
# if __name__ == "__main__":
# num = score.count(76)
# print(num)
#
# #7)
# if __name__ == "__main__":
# print(55 in score)
#
# #8)
# if __name__ == "__main__":
# print(score.index(68)+19000100)
# print(score.index(87)+19000100)
# print(score.index(92)+19000100)
# print(score.index(100)+19000100)
#
# #9)
# if __name__ == "__main__":
# score[3] = score[3] + 1
# print(score[3])
#
# #10)
# if __name__ == "__main__":
# del score[0]
# print(score)
#11)
# if __name__ == "__main__":
# print(score.__len__())
# print(len(score))
# #12)
# if __name__ == "__main__":
# score.sort()
# print(score)
# print(min(score))
# print(max(score))
# #13)!!
# if __name__ == "__main__":
# print(list(reversed(score)))
#14)
# if __name__ == "__main__":
# del score[-1]
# print(score)
# #15)???如何定位第一个值为88的字符?
# if __name__ == "__main__":
# score.append(88)
# del score[6]
# #16)
# if __name__ == "__main__":
# score1 = [80,61]
# score2 = [71,95,82]
# score = score1.append(score2)
# print(score)
#17)
# if __name__ == "__main__":
# score1 = [80,61]
# score2 = score1 *5
# print(score2)
#2)
import random
# if __name__ == "__main__":
# #1)入栈(先入后出,后入先出)
# score1 = [70,45,15,48,25,70,75,35,76,88]
# score2 = [22,84,63]
# score = score1 + score2
# # 2)出栈
# del score[0:2]
# print(score)
# #3)查看栈顶的元素
# print(score[-1])
# print(score.pop())
# #4)查看栈的长度
# print(len(score))
# #5)判断栈是否为空
# if score is None :
# print("score is null")
# else:
# print("score is not null")
# #6)退出程序。
# exit(0)
#3)
# if __name__ == "__main__":
# comm_list = ["T恤", "长裤", "鞋子", "饮料", "餐巾纸", "手机", "电脑", "防晒霜", "疯狂Python书", "椅子"]
# comm_price = ["88", "108", "168", "38", "18", "6288", "5288", "108", "68", "228"]
#
# money = input("请输入你的余额:")
# if (not money.isdigit()):
# print("请输入一个正整数:")
# print("输入成功,即将进入主界面")
# # money = int(money)
# while True :
# print("--------------")
# print("您的余额为:",money)
# print("--------------")
# print("1.显示余额")
# print("2.充值")
# print("3.显示商品")
# print("4.显示商品价格")
# print("5.购买商品")
# print("6.退出程序")
#
# choice = input("请输入你的选择:")
# if (not choice.isdigit()):
# print("请输入一个正整数:")
# choice = int(choice)
# if choice <0 and choice > 5 :
# print("请正确输入界面选项!")
#
# if choice == 1 :
# continue
# elif choice == 2 :
# money_invest = input("请输入充值金额:(充值金额为正整数)")
# if (not money_invest.isdigit()):
# print("请正确输入您的充值金额,金额为正整数:")
# else:
# # money_invest = int(money_invest)
# money = int(money)
# money_invest = int(money_invest)
# money = money + money_invest
# print("本次充值金额为:", money_invest, "元", "充值后的金额为:", money, "元")
# elif choice == 3:
# print("商品列表为:")
# for i in comm_list :
# print(i,end=" ")
# print()
# elif choice == 4:
# print("商品列表对应价格为:")
# for i in comm_price :
# print(i,end=" ")
# print()
# elif choice == 5:
# comm_choice = input("请输入要购买的商品:")
# money = int(money)
# if not comm_choice in comm_list:
# print("没有",comm_choice,"这款的商品,请按照列表重新输入")
# for i in comm_list:
# print(i, end=" ")
# print()
# elif comm_choice in comm_list:
# print("您选中了", comm_choice)
# comm_buy_num = int(comm_list.index(comm_choice))
# comm_buy_money = int(comm_price[comm_buy_num])
# if int(money) < int(comm_buy_money) :
# print("您的余额不足,请及时充值!")
# else:
# print("您的余额为:", money, "购买", comm_choice, "即将扣除", comm_buy_money, "元,请稍后")
# money = int(money - comm_buy_money)
# print("购买成功,您的余额还剩余:",money,"元")
# elif comm_choice is None :
# print("既然选择要买了,那可就要买一个哟!")
# elif (comm_choice.isdigit()):
# print("请正确输入您想要购买的商品:")
# elif choice == 6 :
# print("谢谢光临,欢迎下次光临!")
# exit(0)
#
#4
# if __name__ == "__main__":
# #7.3 True
# print('abc' in ('abcdefg'))
# #7.4 True
# print('abc' in ('abcdefg'))
# #7.5 True
# print('\x41'=='A')
# #7.6 hello world!
# print(''.join(list('hello world!')))
# #7.7 换行
# # print('\n')
# #7.8 为啥是3
# x = ['11','2','3']
# print(max(x))
# #7.9 11
# print(min(['11','2','3']))
#7.10 11
# x = ['11', '2', '3']
# print(max(x,key=len))
# #7.11 c:\test.htm
# path = r'c:\test.html'
# print(path[:-4]+'htm')
# #7.12 False
# print(list(str([1,2,3])) == [1,2,3])
# #7.13 [1,2,3]
# print(str([1,2,3]))
# #7.14 (1,2,3)
# print(str((1,2,3)))
# #7.15 1+3+5+7+9=25
# print(sum(range(1,10,2)))
# #7.16 1+2+3+4+5+6+7+8+9=45
# print(sum(range(1,10)))
# #7.17 A
# print('%c'%65)
# #7.18 65
# print('%s'%65)
# #7.19 65,A
# print('%d,%c'%(65,65))
# #7.20 The first:97,the second is 65
# print('The first:{1},the second is {0}'.format(65,97))
# #7.21 65,0x41,0o101
# print('{0:#d},{0:#x},{0:#o}'.format(65))
# #7.22 True
# print(isinstance('abcdefg',str))
# #7.23 True
# print(isinstance('abcdefg',object))
# #7.24 True
# print(isinstance(3,object))
# #7.25 6
# print('abcabcabc'.rindex('abc'))
# #7.26 ab:efg
# print(':'.join('abcdefg'.split('cd')))
# #7.27 -1
# print('Hello world.I like Python.'.rfind('python'))
# #7.28 3
# print('abcabcabc'.count('abc'))
# #7.29 1
# print('apple.peach,banana,pear'.find('p'))
# #7.30 -1
# print('apple.peach,banana,pear'.find('ppp'))
# #7.31 ['abcdefg']
# print('abcdefg'.split(','))
# #7.32 1:2:3:4:5
# print(':'.join('1,2,3,4,5'.split(',')))
# #7.33 a,b,ccc,ddd
# print(','.join('a b ccc\n\n\nddd '.split()))
# #7.34 ??? 345
# x = {i:str(i+3) for i in range(3)}
# print(''.join([item[1] for item in x.items()]))
# #7.35 HELLO WORLD
# print('Hello world'.upper())
# #7.36 hello world
# print('Hello world'.lower())
# #7.37 HELLO WORLD
# print('Hello world'.lower().upper())
# #7.38 Hello world
# print('Hello world'.swapcase().swapcase())
# #7.39 True
# print(r'c:\windows\notepad.exe'.endswith('.exe'))
# #7.40
# print(r'c:\windows\notepad.exe'.endswith('.jpg','.exe'))
# #7.41 True
# print(r'C:\\Windows\\notepad.exe'.startswith('C:'))
# #7.42 20
# print(len('Hello world!'.ljust(20)))
|
[
"noreply@github.com"
] |
wenzhe980406.noreply@github.com
|
72e99ab5f865b18e80a5fd7dbe9e887b0bcfcdbc
|
8e8273a3c9b87e58e46dd6ab575a33eb6fde9f62
|
/version_manager/options_set.py
|
e9aa203b1975a30aac22e2e044a66baa54686947
|
[] |
no_license
|
mdrotthoff/version-manager-py
|
69ddd1308f1f1c896739f583f372d1af09d3d384
|
e5f388ff3856f7f4f1818215422610233b2dcb1d
|
refs/heads/master
| 2020-12-07T07:07:02.762375
| 2020-01-08T22:52:38
| 2020-01-08T22:52:38
| 232,666,355
| 0
| 0
| null | 2020-01-08T21:46:51
| 2020-01-08T21:46:50
| null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
from typing import Dict, List, Optional
import yaml
def get_parameter_values(parameter_values: Dict[str, str],
values_list: Optional[List[str]]) -> Dict[str, str]:
"""
Override the parameter values that are given in the list.
It assumes each parameter is in the 'KEY=VALUE' format.
"""
if not values_list:
return parameter_values
for value in values_list:
tokens = value.split('=', 2)
parameter_values[tokens[0]] = tokens[1]
return parameter_values
def get_parameters_from_file(file_name: Optional[str]) -> Dict[str, str]:
if not file_name:
return dict()
with open(file_name, 'r', encoding='utf-8') as stream:
result = list(yaml.safe_load_all(stream))[0]
return result
|
[
"bogdan.mustiata@gmail.com"
] |
bogdan.mustiata@gmail.com
|
12e3936893568ce3f48ea41898acde3506eb4f06
|
52855d750ccd5f2a89e960a2cd03365a3daf4959
|
/ABC/ABC52_B.py
|
6a5c62026f989697559e55494fbdcbc27af93a36
|
[] |
no_license
|
takuwaaan/Atcoder_Study
|
b15d4f3d15d48abb06895d5938bf8ab53fb73c08
|
6fd772c09c7816d147abdc50669ec2bbc1bc4a57
|
refs/heads/master
| 2021-03-10T18:56:04.416805
| 2020-03-30T22:36:49
| 2020-03-30T22:36:49
| 246,477,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
N = int(input())
S = input()
l = [0]
x = 0
for i in range(N):
if S[i] == "I":
x+=1
else:
x-=1
l.append(x)
print(max(l))
|
[
"takutotakuwan@gmail.com"
] |
takutotakuwan@gmail.com
|
3a4162c73e2895e4844d4a8ce5c4a057e8fa230e
|
cb703e45cf56ec816eb9203f171c0636aff0b99c
|
/Dzien06/loger.py
|
0e546809184bbae08d85b4ec2e6a1b2e188b982b
|
[] |
no_license
|
marianwitkowskialx/Enzode
|
dc49f09f086e4ca128cd189852331d3c9b0e14fb
|
67d8fd71838d53962b4e58f73b92cb3b71478663
|
refs/heads/main
| 2023-06-04T20:58:17.486273
| 2021-06-24T16:37:53
| 2021-06-24T16:37:53
| 366,424,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
# Przykład logowania w Pythonie
import logging
log_format="%(asctime)s:%(levelname)s:%(filename)s:%(message)s"
logging.basicConfig(
format=log_format,
handlers= [
logging.StreamHandler(),
logging.FileHandler("app1.log")
],
level=logging.DEBUG,
#filename="app.log",
datefmt="%Y-%m-%dT%H:%M:%S%z",
)
logging.debug("debug message")
logging.info("info message")
logging.warning("warning message")
logging.error("error message")
logging.fatal("fatal message")
try:
y = 1/0
except Exception as exc:
logging.critical(exc, exc_info=True)
|
[
"marian.witkowski@gmail.com"
] |
marian.witkowski@gmail.com
|
99c7a87a5d8431b21888a5a8c5512f6f205f3704
|
fd7598754b87536d3072edee8e969da2f838fa03
|
/chapter3_programming17.py
|
257697649b65df0980c01265e13efa08d4a817ce
|
[] |
no_license
|
dorabelme/Python-Programming-An-Introduction-to-Computer-Science
|
7de035aef216b2437bfa43b7d49b35018e7a2153
|
3c60c9ecfdd69cc9f47b43f4a8e6a13767960301
|
refs/heads/master
| 2020-05-02T23:19:44.573072
| 2019-03-28T21:27:20
| 2019-03-28T21:27:20
| 178,261,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
# Program using Newton's method to approximate square root
import math
def main():
# Title and description of the Program
print("\nSquare Root Approximator\n")
print("This program calculates the approximation of the square root of " +\
"a number using Newton's method.")
# Obtain the number to take the square root of, the number of times to improve
# the 'guess', and the initial guess itself
num = int(input("\nEnter the number whose square root you'd like to calculate: "))
n = int(input("Enter the number of times Newton's method should iterate: "))
guess = float(input("Enter your initial guess of what the square root should be: "))
# Calculate the square root using Newton's method
for i in range(n):
guess = (guess + num / guess) / 2
# Display result for the user
print("\nThe approximate square root of ", num, " is ", guess, ".", sep="")
print("\nThe error in this approximation is ", math.sqrt(num) - guess, ".", sep="")
main()
|
[
"contact.dorabelme@gmail.com"
] |
contact.dorabelme@gmail.com
|
e43289d08b2bee5b02db3fd8e63c0ab77b14b898
|
4f793320d5d2d003b8e32d7d0204bc152f703d31
|
/hypercane/hfilter/containing_pattern.py
|
948a41498d104eff2c5521e4ca164b1533d8e629
|
[
"MIT"
] |
permissive
|
himarshaj/hypercane
|
77ea458e75033a51fa452c557e82eb8ff5e0f887
|
99ac84834e2aad57cdf4687469a63b6305d20e47
|
refs/heads/master
| 2023-03-29T22:10:48.123857
| 2021-04-13T23:17:45
| 2021-04-13T23:17:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
import logging
import concurrent.futures
import re
from ..utils import match_pattern
module_logger = logging.getLogger('hypercane.hfilter.patterns')
def filter_pattern(input_urims, cache_storage, regex_pattern, include):
filtered_urims = []
compiled_pattern = re.compile(regex_pattern)
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_urim = {executor.submit(match_pattern, urim, cache_storage, compiled_pattern): urim for urim in input_urims }
for future in concurrent.futures.as_completed(future_to_urim):
urim = future_to_urim[future]
try:
match = future.result()
if include == True and match is not None:
filtered_urims.append(urim)
elif include == False and match is None:
filtered_urims.append(urim)
except Exception as exc:
module_logger.exception('URI-M [{}] generated an exception: [{}]'.format(urim, exc))
module_logger.critical("failed to perform pattern match for [{}], skipping...".format(urim))
return filtered_urims
|
[
"jones.shawn.m@gmail.com"
] |
jones.shawn.m@gmail.com
|
bac612e62bec9e76a649f5be726257ddc8ce1646
|
1395576291c1e8b34981dbcbfcd0fdda020083b8
|
/dist_cts/dist_fleet/thirdparty/simnet_bow/dataset_generator.py
|
4e3b7a3565639c5154043bbc8179ef3b0a6d635f
|
[] |
no_license
|
gentelyang/scripts
|
a8eb8a3cc5cc5bac753f1bb12033afaf89f03404
|
e3562ab40b574f06bba68df6895a055fa31a085d
|
refs/heads/master
| 2023-06-06T12:38:37.002332
| 2021-06-15T05:09:06
| 2021-06-15T05:09:06
| 262,957,519
| 0
| 4
| null | 2021-01-10T08:28:11
| 2020-05-11T06:28:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# @Time : 2019-09-26 17:30
# @Author : liyang109
from __future__ import print_function
import paddle.fluid.incubate.data_generator as dg
import random
class PairwiseReader(dg.MultiSlotDataGenerator):
def init_reader(self, max_len, sampling_rate):
# np.random.seed(1)
self.max_len = max_len
self.sampling_rate = sampling_rate
self.query_buffer = None
self.pos_title_buffer = None
self.neg_title_buffer = None
def infer_reader(self, infer_filelist, batch, buf_size):
def local_iter():
for fname in infer_filelist:
with open(fname, "r") as fin:
for line in fin:
items = line.strip("\t\n").split(";")
pos_num, neg_num = [int(i) for i in items[1].split(" ")]
query = [int(j) for j in items[2].split(" ")]
for i in range(pos_num):
for j in range(neg_num):
pos_title_int = [int(x) for x in items[3 + i].split(" ")]
neg_title_int = [int(x) for x in items[3 + pos_num + j].split(" ")]
yield query, pos_title_int, neg_title_int
import paddle
batch_iter = paddle.batch(
paddle.reader.shuffle(local_iter, buf_size=buf_size),
batch_size=batch)
return batch_iter
def generate_sample(self, line):
def get_rand(low=0.0, high=1.0):
return random.random()
def pairwise_iterator():
items = line.strip("\t\n").split(";")
pos_num, neg_num = [int(i) for i in items[1].split(" ")]
query = [int(j) for j in items[2].split(" ")]
for i in range(pos_num):
for j in range(neg_num):
prob = get_rand()
if prob < self.sampling_rate:
pos_title_int = [int(x) for x in items[3 + i].split(" ")]
neg_title_int = [int(x) for x in items[3 + pos_num + j].split(" ")]
yield ("query", query), \
("pos_title", pos_title_int), \
("neg_title", neg_title_int)
return pairwise_iterator
if __name__ == "__main__":
pairwise_reader = PairwiseReader()
pairwise_reader.init_reader(10000, 0.02)
pairwise_reader.run_from_stdin()
|
[
"liyang109@baidu.com"
] |
liyang109@baidu.com
|
cce4b96a715d43f53534c19733cad518beb38e8e
|
0d5c77661f9d1e6783b1c047d2c9cdd0160699d1
|
/python/paddle/fluid/tests/test_lod_tensor.py
|
f7a9dd4129027417a06a6c25ff9a801fff259c5e
|
[
"Apache-2.0"
] |
permissive
|
xiaoyichao/anyq_paddle
|
ae68fabf1f1b02ffbc287a37eb6c0bcfbf738e7f
|
6f48b8f06f722e3bc5e81f4a439968c0296027fb
|
refs/heads/master
| 2022-10-05T16:52:28.768335
| 2020-03-03T03:28:50
| 2020-03-03T03:28:50
| 244,155,581
| 1
| 0
|
Apache-2.0
| 2022-09-23T22:37:13
| 2020-03-01T13:36:58
|
C++
|
UTF-8
|
Python
| false
| false
| 4,649
|
py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor
import numpy as np
import unittest
class TestLoDTensor(unittest.TestCase):
def test_pybind_recursive_seq_lens(self):
tensor = fluid.LoDTensor()
recursive_seq_lens = []
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
recursive_seq_lens = [[], [1], [3]]
self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,
recursive_seq_lens)
recursive_seq_lens = [[0], [2], [3]]
self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,
recursive_seq_lens)
recursive_seq_lens = [[1, 2, 3]]
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
tensor.set(np.random.random([6, 1]), fluid.CPUPlace())
self.assertTrue(tensor.has_valid_recursive_sequence_lengths())
tensor.set(np.random.random([9, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
# Each level's sum should be equal to the number of items in the next level
# Moreover, last level's sum should be equal to the tensor height
recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 2]]
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
tensor.set(np.random.random([8, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 1]]
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertTrue(tensor.has_valid_recursive_sequence_lengths())
tensor.set(np.random.random([9, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
def test_create_lod_tensor(self):
# Create LoDTensor from a list
data = [[1, 2, 3], [3, 4]]
wrong_recursive_seq_lens = [[2, 2]]
correct_recursive_seq_lens = [[3, 2]]
self.assertRaises(AssertionError, create_lod_tensor, data,
wrong_recursive_seq_lens, fluid.CPUPlace())
tensor = create_lod_tensor(data, correct_recursive_seq_lens,
fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(),
correct_recursive_seq_lens)
# Create LoDTensor from numpy array
data = np.random.random([10, 1])
recursive_seq_lens = [[2, 1], [3, 3, 4]]
tensor = create_lod_tensor(data, recursive_seq_lens, fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
# Create LoDTensor from another LoDTensor, they are differnt instances
new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]]
new_tensor = create_lod_tensor(tensor, new_recursive_seq_lens,
fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
self.assertEqual(new_tensor.recursive_sequence_lengths(),
new_recursive_seq_lens)
def test_create_random_int_lodtensor(self):
# The shape of a word, commonly used in speech and NLP problem, is [1]
shape = [1]
recursive_seq_lens = [[2, 3, 5]]
dict_size = 10000
low = 0
high = dict_size - 1
tensor = create_random_int_lodtensor(recursive_seq_lens, shape,
fluid.CPUPlace(), low, high)
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
self.assertEqual(tensor.shape(), [10, 1])
if __name__ == '__main__':
unittest.main()
|
[
"xiaoyichao@haohaozhu.com"
] |
xiaoyichao@haohaozhu.com
|
abcb6982c6f5dd12b149025215077f5e7fde1359
|
1959350ca45f43806e925907c298cfae2f3f355f
|
/test/programytest/parser/pattern/nodes_tests/test_root.py
|
eca847aa29bdf9535f4c7d605bb69293e405357c
|
[
"MIT"
] |
permissive
|
tomliau33/program-y
|
8df17ff4078a0aa292b775ef869930d71843682a
|
30a3715c8501b4c2f1b4de698b679cb4bac168b1
|
refs/heads/master
| 2021-09-06T01:56:08.053131
| 2018-01-31T15:10:10
| 2018-01-31T15:10:10
| 114,656,850
| 0
| 0
| null | 2018-02-01T13:27:30
| 2017-12-18T15:27:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,716
|
py
|
from programytest.parser.pattern.base import PatternTestBaseClass
from programy.parser.exceptions import ParserException
from programy.parser.pattern.nodes.word import PatternWordNode
from programy.parser.pattern.nodes.base import PatternNode
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.pattern.nodes.template import PatternTemplateNode
from programy.parser.pattern.nodes.root import PatternRootNode
from programy.parser.pattern.nodes.topic import PatternTopicNode
from programy.parser.pattern.nodes.that import PatternThatNode
class PatternRootNodeTests(PatternTestBaseClass):
def test_init(self):
node = PatternRootNode()
self.assertIsNotNone(node)
self.assertTrue(node.is_root())
self.assertFalse(node.is_priority())
self.assertFalse(node.is_wildcard())
self.assertFalse(node.is_zero_or_more())
self.assertFalse(node.is_one_or_more())
self.assertFalse(node.is_set())
self.assertFalse(node.is_bot())
self.assertFalse(node.is_template())
self.assertFalse(node.is_that())
self.assertFalse(node.is_topic())
self.assertFalse(node.is_wildcard())
self.assertIsNotNone(node.children)
self.assertFalse(node.has_children())
self.assertTrue(node.equivalent(PatternRootNode()))
self.assertEqual(node.to_string(), "ROOT [P(0)^(0)#(0)C(0)_(0)*(0)To(0)Th(0)Te(0)]")
node.add_child(PatternNode())
self.assertEqual(len(node.children), 1)
self.assertEqual(node.to_string(), "ROOT [P(0)^(0)#(0)C(1)_(0)*(0)To(0)Th(0)Te(0)]")
def test_multiple_roots(self):
node1 = PatternRootNode()
node2 = PatternRootNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertTrue(str(raised.exception).startswith("Cannot add root node to existing root node"))
def test_root_added_to_child(self):
node1 = PatternWordNode("test")
node2 = PatternRootNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertTrue(str(raised.exception).startswith("Cannot add root node to child node"))
def test_root_to_root(self):
node1 = PatternRootNode()
node2 = PatternRootNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add root node to existing root node")
def test_template_to_root(self):
node1 = PatternRootNode()
node2 = PatternTemplateNode(TemplateNode())
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add template node to root node")
def test_topic_to_root(self):
node1 = PatternRootNode()
node2 = PatternTopicNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add topic node to root node")
def test_that_to_root(self):
node1 = PatternRootNode()
node2 = PatternThatNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add that node to root node")
def test_multiple_templates(self):
node1 = PatternTemplateNode(TemplateNode())
node2 = PatternTemplateNode(TemplateNode())
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add template node to template node")
|
[
"keith@keithsterling.com"
] |
keith@keithsterling.com
|
10adb072e9eabcec4d2ad0f61ee7d6d29b38c97c
|
fdcb2cdee4d5b398eed4eefc830213234e3e83a5
|
/01_MIT_Learning/week_2/lectures_and_examples/3_guess_my_number.py
|
12e1fa8891345cf687bad33b67bd047e96880487
|
[] |
no_license
|
daftstar/learn_python
|
be1bbfd8d7ea6b9be8407a30ca47baa7075c0d4b
|
4e8727154a24c7a1d05361a559a997c8d076480d
|
refs/heads/master
| 2021-01-20T08:53:29.817701
| 2018-01-15T22:21:02
| 2018-01-15T22:21:02
| 90,194,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
# #######################################################
# Create a program that guesses a secret number!
# The program works as follows:
# you (user) thinks of an integer between 0 (inclusive) and 100 (not inclusive).
# The computer makes guesses, and you give it input -
# is its guess too high or too low?
# Using bisection search, the computer will guess the user's
# secret number
# #######################################################
low = 0
high = 100
correct = False
print ("Please think of a number between 0 and 100!")
while correct == False:
guess = (high + low) // 2
print("Is your secret number %s?" % guess)
response = input("Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. ")
if response == 'c':
correct == True
break
elif response == 'l':
# we guessed too low. Set the floor to the current guess (midpoint)
low = guess
elif response == 'h':
# we guessed too high. Set the ceiling to the current guess (midpoint)
high = guess
else:
print("Sorry, I did not understand your input.")
print('Game over. Your secret number was: %s' % guess)
# #########
# ORIGINAL WAY, HAD WAY TOO MUCH REPETITION
# ##########
# while correct == False:
# print ("Is your secret number %s?" % guess)
# response = input("Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly.")
# if response == "c":
# print ("Game over. Your secret number was: ", mid)
# correct = True
# break
# elif response == "l":
# low = mid
# mid = round((low + high) / 2)
# guess = mid
# elif response == "h":
# high = mid
# mid = round((low + high) / 2)
# guess = mid
# else:
# response = input("Sorry, I did not understand your input.")
|
[
"nikdaftary@gmail.com"
] |
nikdaftary@gmail.com
|
b2830436f10dd100a76995d67b0f77827b8fa308
|
c19bcbc98555ef06276f9f0dcffc9ac35942a7c4
|
/tests/test_proc_pid_maps.py
|
295254aa85fb9c2904a4fc24b52c440ba608763e
|
[
"MIT"
] |
permissive
|
kellyjonbrazil/jc
|
4e81a5421cd20be5965baf375f4a5671c2ef0410
|
4cd721be8595db52b620cc26cd455d95bf56b85b
|
refs/heads/master
| 2023-08-30T09:53:18.284296
| 2023-07-30T17:08:39
| 2023-07-30T17:08:39
| 215,404,927
| 6,278
| 185
|
MIT
| 2023-09-08T14:52:22
| 2019-10-15T22:04:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
import os
import unittest
import json
from typing import Dict
import jc.parsers.proc_pid_maps
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
f_in: Dict = {}
f_json: Dict = {}
@classmethod
def setUpClass(cls):
fixtures = {
'proc_pid_maps': (
'fixtures/linux-proc/pid_maps',
'fixtures/linux-proc/pid_maps.json')
}
for file, filepaths in fixtures.items():
with open(os.path.join(THIS_DIR, filepaths[0]), 'r', encoding='utf-8') as a, \
open(os.path.join(THIS_DIR, filepaths[1]), 'r', encoding='utf-8') as b:
cls.f_in[file] = a.read()
cls.f_json[file] = json.loads(b.read())
def test_proc_pid_maps_nodata(self):
"""
Test 'proc_pid_maps' with no data
"""
self.assertEqual(jc.parsers.proc_pid_maps.parse('', quiet=True), [])
def test_proc_pid_maps(self):
"""
Test '/proc/<pid>/maps'
"""
self.assertEqual(jc.parsers.proc_pid_maps.parse(self.f_in['proc_pid_maps'], quiet=True),
self.f_json['proc_pid_maps'])
if __name__ == '__main__':
unittest.main()
|
[
"kellyjonbrazil@gmail.com"
] |
kellyjonbrazil@gmail.com
|
67d7445176b628d391bca470696e3b4247bc6228
|
aabfe137db175f0e070bd9342e6346ae65e2be32
|
/RecoEcal/EgammaClusterProducers/python/islandClusteringSequence_cff.py
|
57c2add9f6396084fa60b771d75bfcb922cb8181
|
[] |
no_license
|
matteosan1/cmssw
|
e67b77be5d03e826afd36a9ec5a6dc1b3ee57deb
|
74f7c9a4cf24913e2a9f4e6805bb2e8e25ab7d52
|
refs/heads/CMSSW_7_0_X
| 2021-01-15T18:35:33.405650
| 2013-07-30T14:59:30
| 2013-07-30T14:59:30
| 11,789,054
| 1
| 1
| null | 2016-04-03T13:48:46
| 2013-07-31T11:06:26
|
C++
|
UTF-8
|
Python
| false
| false
| 797
|
py
|
import FWCore.ParameterSet.Config as cms
#
# $Id: islandClusteringSequence.cff,v 1.7 2007/03/13 17:21:44 futyand Exp $
#
#------------------
#Island clustering:
#------------------
# Island BasicCluster producer
from RecoEcal.EgammaClusterProducers.islandBasicClusters_cfi import *
# Island SuperCluster producer
from RecoEcal.EgammaClusterProducers.islandSuperClusters_cfi import *
# Energy scale correction for Island SuperClusters
from RecoEcal.EgammaClusterProducers.correctedIslandBarrelSuperClusters_cfi import *
from RecoEcal.EgammaClusterProducers.correctedIslandEndcapSuperClusters_cfi import *
# create sequence for island clustering
islandClusteringSequence = cms.Sequence(islandBasicClusters*islandSuperClusters*correctedIslandBarrelSuperClusters*correctedIslandEndcapSuperClusters)
|
[
"sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch"
] |
sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch
|
c27074644766ba4228e511a9a1c884d8ec0e431b
|
ea262de505a1dd5ae1c7b546b85184309c3fdd35
|
/src/models/modules/scales.py
|
78234592ac2641e1791aa6240573f86204bde16e
|
[
"MIT"
] |
permissive
|
Runki2018/CvPytorch
|
306ff578c5f8d3d196d0834e5cad5adba7a89676
|
1e1c468e5971c1c2b037334f7911ae0a5087050f
|
refs/heads/master
| 2023-08-25T09:48:48.764117
| 2021-10-15T05:11:21
| 2021-10-15T05:11:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Time : 2021/3/29 9:22
# @Author : liumin
# @File : scales.py
import torch
import torch.nn as nn
class Scale(nn.Module):
"""
A learnable scale parameter
"""
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return x * self.scale
|
[
"569793357@qq.com"
] |
569793357@qq.com
|
3835bd462d27894a5442d6a412b2dd67de3d593d
|
675cdd4d9d2d5b6f8e1383d1e60c9f758322981f
|
/supervised_learning/0x03-optimization/2-shuffle_data.py
|
1fc0ce20d6f011ea71c6f64624e3d65b15d7e653
|
[] |
no_license
|
AndresSern/holbertonschool-machine_learning-1
|
5c4a8db28438d818b6b37725ff95681c4757fd9f
|
7dafc37d306fcf2ea0f5af5bd97dfd78d388100c
|
refs/heads/main
| 2023-07-11T04:47:01.565852
| 2021-08-03T04:22:38
| 2021-08-03T04:22:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
#!/usr/bin/env python3
"""
Shuffles the data points in two matrices the same way
"""
import numpy as np
def shuffle_data(X, Y):
"""
Shuffles the data points in two matrices the same way
"""
i = np.random.permutation(np.arange(X.shape[0]))
return X[i], Y[i]
|
[
"bouzouitina.hamdi@gmail.com"
] |
bouzouitina.hamdi@gmail.com
|
0b90df7dbd721ecc641998896bff6d7087d4c28c
|
ac0a583e4765f2b5b97e898f30d6df0fc71ea8f6
|
/pyros_msgs/opt_as_nested/__init__.py
|
4beab2c01e9a05fe2344eb3a0f0e64941a108eae
|
[
"MIT"
] |
permissive
|
pyros-dev/pyros-msgs
|
5ce9efaa246ffa94396552fd6034c0eeacddeb76
|
28d9d6aa3cfbb42d154360f16eea1900be518f74
|
refs/heads/master
| 2022-07-06T15:53:16.764600
| 2018-02-17T15:03:36
| 2018-02-17T15:03:36
| 67,676,303
| 1
| 3
|
MIT
| 2022-06-21T21:19:34
| 2016-09-08T06:45:37
|
Python
|
UTF-8
|
Python
| false
| false
| 347
|
py
|
from __future__ import absolute_import
from __future__ import print_function
"""
pyros_msgs.opt_as_nested is a module that declares optional fields as a specific message type.
This is useful if you want to express an optional field in a message without any ambiguity.
"""
from .opt_as_nested import duck_punch
__all__ = [
'duck_punch',
]
|
[
"asmodehn@gmail.com"
] |
asmodehn@gmail.com
|
d0dade868cb00ef5e103594ae46c0d072fcbd126
|
e94d22cdb7c73b8a55262d5a6c2c7b0d75f3b63e
|
/snussum/analytics/management/commands/createanalytics.py
|
6aae31665b387f93092ff96a50482be7c680c3e8
|
[] |
no_license
|
dobestan/snussum
|
594d1169cc6a0a799c8104135dc028d65a3967d0
|
4f1f092a4c5cebd913a64c5a0d7f12b3e061552f
|
refs/heads/master
| 2021-01-18T18:25:00.237448
| 2015-06-01T06:03:29
| 2015-06-01T06:03:29
| 34,576,643
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
from django.core.management.base import BaseCommand, CommandError
from analytics.models.demographic import Demographic
class Command(BaseCommand):
help = "Create Analytics Data"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
demograhic = Demographic.objects.create_analytics()
self.stdout.write('Successfully created demograhic analytics data ...')
|
[
"dobestan@gmail.com"
] |
dobestan@gmail.com
|
d457f176565b80c978bfb00733dec4d02f4861d8
|
256644d14bd15f8e1a3e92c95b1655fd36681399
|
/pure_python/ga+ppm/main/utilities.py
|
a07447f3183438919021284b04c4c34a872f020c
|
[] |
no_license
|
mfbx9da4/neuron-astrocyte-networks
|
9d1c0ff45951e45ce1f8297ec62b69ee4159305a
|
bcf933491bdb70031f8d9c859fc17e0622e5b126
|
refs/heads/master
| 2021-01-01T10:13:59.099090
| 2018-06-03T12:32:13
| 2018-06-03T12:32:13
| 12,457,305
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
import random
import math
from pylab import zeros, where, array, empty_like
def crossover(m1, m2, NN):
# Maybe could be sped up using flatten/reshape output?
net = NN()
r = random.randint(0, net.wi.size + net.wo.size)
output1 = [empty_like(net.wi), empty_like(net.wo)]
output2 = [empty_like(net.wi), empty_like(net.wo)]
for i in xrange(len(m1)):
for j in xrange(len(m1[i])):
for k in xrange(len(m1[i][j])):
if r >= 0:
output1[i][j][k][:] = m1[i][j][k]
output2[i][j][k][:] = m2[i][j][k]
elif r < 0:
output1[i][j][k][:] = m2[i][j][k]
output2[i][j][k][:] = m1[i][j][k]
r -= 1
return output1, output2
def mutate(m, mutation_rate):
# Variation: could include a constant to control
# how much the weight is mutated by
for i in xrange(len(m)):
for j in xrange(len(m[i])):
for k in xrange(len(m[i][j])):
if random.random() < mutation_rate:
m[i][j][k] = random.uniform(-2.0,2.0)
def percentAcc(all_aos, targets):
correct = 0
for i, trg in enumerate(targets):
sample_res = where(trg == array(all_aos[i]), True, False)
if sample_res.all():
correct += 1
total = len(all_aos)
return float(correct) / total
def sigmoid(x):
return math.tanh(x)
def randomizeMatrix(matrix, a, b):
for i in range(len(matrix)):
for j in range(len(matrix[0])):
matrix[i][j] = random.uniform(a, b)
def roulette(fitnessScores):
cumalativeFitness = 0.0
r = random.random()
for i in range(len(fitnessScores)):
cumalativeFitness += fitnessScores[i]
if cumalativeFitness > r:
return i
def calcFit(numbers):
"""each fitness is a fraction of the total error"""
# POTENTIAL IMPROVEMENTS:
# maybe give the better scores much higher weighting?
# maybe use the range to calculate the fitness?
# maybe do ind / range of accuracies?
total, fitnesses = sum(numbers), []
for i in range(len(numbers)):
try:
fitness = numbers[i] / total
except ZeroDivisionError:
print 'individual outputted zero correct responses'
fitness = 0
fitnesses.append(fitness)
return fitnesses
|
[
"dalberto.adler@gmail.com"
] |
dalberto.adler@gmail.com
|
a775e4b0f818ac2bdd927c36c645d58aea22d114
|
389d95ee1f8d4ba992114e36c5fc427d02ba2a6c
|
/flexmessage_project/settings.py
|
ef5e0ea9c78442b5078ec03584e4b733d9fc65ac
|
[
"MIT"
] |
permissive
|
adepeter/sleekmessage
|
d7a6b4279f6a60659cf8a98897136ca22c1b830a
|
64621842cb9b0d707523e87f8bd6549d4e2d8433
|
refs/heads/master
| 2022-11-16T23:58:18.477628
| 2020-07-15T15:50:16
| 2020-07-15T15:50:16
| 265,276,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,406
|
py
|
"""
Django settings for flexmessage_project project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SETTINGS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@k0lop(x*yo$2jm03k)@2c3$ch0@4l=)0)0ab+(10)5sn#llx@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'messages.apps.MessagesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'flexmessage_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'string_if_invalid': '%s is not a valid template variable',
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'flexmessage_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
|
[
"adepeter26@gmail.com"
] |
adepeter26@gmail.com
|
f53f414f7ee5fbc8b13847a32418970ec312c561
|
4af454bced0f99e4ed8269d71e97284f0ef13afb
|
/loginserver/keys/rsa.py
|
02c4c9811d4e423b0a531fa48b9e687d9ba12cbd
|
[] |
no_license
|
L2jBrasil/L2py
|
c46db78238b4caf272a2399f4e4910fc256b3cca
|
d1c2e7bddb54d222f9a3d04262c09ad70329a226
|
refs/heads/master
| 2022-11-19T01:39:02.019777
| 2020-07-24T20:07:15
| 2020-07-24T20:07:15
| 292,115,581
| 1
| 1
| null | 2020-09-01T21:53:54
| 2020-09-01T21:53:54
| null |
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
from Cryptodome.PublicKey import RSA
from M2Crypto import BIO
from M2Crypto import RSA as M2RSA
from common.helpers.bytearray import ByteArray
class L2RsaKey(RSA.RsaKey):
def scramble_mod(self) -> bytes:
n = ByteArray(self.n_bytes)
# step 1: 0x4d - 0x50 <-> 0x00 - 0x04
for i in range(4):
n[i], n[0x4d + i] = n[0x4d + i], n[i]
# step 2 : xor first 0x40 bytes with last 0x40 bytes
for i in range(0x40):
n[i] = n[i] ^ n[0x40 + i]
# step 3 : xor bytes 0x0d-0x10 with bytes 0x34-0x38
for i in range(4):
n[0x0d + i] = n[0x0d + i] ^ n[0x34 + i]
# step 4 : xor last 0x40 bytes with first 0x40 bytes
for i in range(0x40):
n[0x40 + i] = n[0x40 + i] ^ n[i]
return bytes(n)
@classmethod
def unscramble_mod(cls, n: bytes) -> int:
n = ByteArray(n)
for i in range(0x40):
n[0x40 + i] = n[0x40 + i] ^ n[i]
for i in range(4):
n[0x0d + i] = n[0x0d + i] ^ n[0x34 + i]
for i in range(0x40):
n[i] = n[i] ^ n[0x40 + i]
for i in range(4):
temp = n[0x00 + i]
n[0x00 + i] = n[0x4d + i]
n[0x4d + i] = temp
return int.from_bytes(bytes(n), "big")
@property
def n_bytes(self):
return self.n.to_bytes(128, "big")
@classmethod
def from_scrambled(cls, data) -> "L2RsaKey":
modulus = cls.unscramble_mod(data)
key = RSA.construct((modulus, 65537))
key.__class__ = L2RsaKey
return key
@classmethod
def generate(cls, bits=1024, randfunc=None, e=65537) -> "L2RsaKey":
key = RSA.generate(bits, randfunc, e)
key.__class__ = cls
return key
def __repr__(self):
return "L2" + super().__repr__()
@property
def m2crypto_key(self):
key_bio = BIO.MemoryBuffer(self.export_key())
if self.has_private():
return M2RSA.load_key_bio(key_bio)
else:
return M2RSA.load_pub_key_bio(key_bio)
@property
def scrambled_key(self):
scrambled_key = RSA.construct((int.from_bytes(self.scramble_mod(), "big"), self.e))
key_bio = BIO.MemoryBuffer(scrambled_key.export_key())
return M2RSA.load_key_bio(key_bio)
|
[
"yurzs@icloud.com"
] |
yurzs@icloud.com
|
3ed03f450ecd93b825fa1583fb79154b40c83ff4
|
70d4ef0863906b3ca64f986075cd35b8412b871e
|
/packages/blueking/component/apis/sops.py
|
9446f23f6bd7a4629b842b45ea8ea69b7a4e32f0
|
[
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
selinagyan/bk-sops
|
72db0ac33d9c307f51769e4baa181ceb8e1b279e
|
39e63e66416f688e6a3641ea8e975d414ece6b04
|
refs/heads/master
| 2020-05-07T16:44:33.312442
| 2019-04-11T02:09:25
| 2019-04-11T02:09:25
| 180,696,241
| 0
| 0
| null | 2019-04-11T02:07:11
| 2019-04-11T02:07:10
| null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from ..base import ComponentAPI
class CollectionsSOPS(object):
"""Collections of SOPS APIS"""
def __init__(self, client):
self.client = client
self.create_task = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/create_task/',
description=u'创建任务'
)
self.get_task_status = ComponentAPI(
client=self.client, method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_task_status/',
description=u'查询任务或节点状态'
)
self.get_template_info = ComponentAPI(
client=self.client, method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_template_info/',
description=u'查询单个模板详情'
)
self.get_template_list = ComponentAPI(
client=self.client, method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_template_list/',
description=u'查询模板列表'
)
self.operate_task = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/operate_task/',
description=u'操作任务'
)
self.query_task_count = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/query_task_count/',
description=u'查询任务分类统计'
)
self.start_task = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/start_task/',
description=u'开始任务'
)
|
[
"pagezhou@tencent.com"
] |
pagezhou@tencent.com
|
602d5661471469217459de0236ec43a9a1f0e8de
|
81344c55ed60bf12818d1a0ec246f3c24c79cb4c
|
/力扣习题/8字符串转整数/atoi.py
|
73fb9e626ac6852e2287bfbded03dddd0161775c
|
[
"MIT"
] |
permissive
|
lollipopnougat/AlgorithmLearning
|
7d5c4a37bd5c814c5caea6963e81fbe0cb44b7b7
|
cb13caa0159f0179d3c1bacfb1801d156c7d1344
|
refs/heads/master
| 2023-05-11T04:47:09.758889
| 2023-05-07T06:55:48
| 2023-05-07T06:55:48
| 194,078,151
| 7
| 2
|
MIT
| 2023-03-25T01:23:44
| 2019-06-27T10:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 144
|
py
|
class Solution:
def myAtoi(self, str: str) -> int:
return max(min(int(*re.findall('^[\+\-]?\d+', str.lstrip())), 2**31 - 1), -2**31)
|
[
"ab2defg145@gmail.com"
] |
ab2defg145@gmail.com
|
889a29dd98a7786a22e8d2fbde68e5a1ce2d4137
|
a6ed0c42659f54f88024a9171c353e7cbe51328e
|
/Python/flask_MySQL/emailval/server.py
|
1d9d6e7aa490bb6f47f766d7b83b106c0677f317
|
[] |
no_license
|
tomama1/Practice
|
c4a44a044fe67b3f4eb34dca0a0dd9ea38f4c766
|
8adecd0ee985db06497578a11d067ac16502da7b
|
refs/heads/master
| 2021-09-05T04:32:42.020673
| 2018-01-24T05:51:16
| 2018-01-24T05:51:16
| 104,159,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
from flask import Flask, request, redirect, render_template, flash
from mysqlconnection import MySQLConnector
import re
app = Flask(__name__)
app.secret_key = ("CodingDojo")
mysql = MySQLConnector(app,'listserv')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/process', methods=['POST'])
def create():
# grabbing user input from form
email = request.form['emailcheck']
# query for checking the database
query = "SELECT email from listserv WHERE email = :echeck"
data = {
'echeck':email
}
emailcheck = mysql.query_db(query, data)
# if email exists in database
if emailcheck:
flash("Valid Email")
return redirect('/success')
else:
# regex check for valid email string
if re.match(r"[^@]+@[^@]+\.[^@]+",email):
# insert query into database
query = "INSERT INTO listserv (email, created_at, updated_at) VALUES (:emailtobeinserted, NOW(), NOW())"
# mysql.query_db("INSERT INTO listserv(email, created_at, updated_at) VALUES (:emailtobeinserted, NOW(), NOW())",{'emailtobeinserted':email})"
data = {
'emailtobeinserted': request.form['emailcheck']
}
mysql.query_db(query, data)
flash("Email has been Inserted!")
else:
# not a valid email string ( no @ sign)
flash("Not a valid email")
return redirect('/')
@app.route('/success')
def success():
# display all rows in the listserv table
emails = mysql.query_db("SELECT * FROM listserv")
return render_template('success.html', all_emails = emails)
@app.route('/goback')
def goback():
return redirect('/')
app.run(debug=True)
|
[
"matthewtoma123@gmail.com"
] |
matthewtoma123@gmail.com
|
ea0207d1f4614c56c66b011cec3e7d9ecefe2d10
|
58f6184fbfe4782bccf7803fbb978b5a5f93bb50
|
/src/scs_analysis/cmd/cmd_sample_tally.py
|
a7dff9ca4f518978eee941ce646bb2796fd1ea4b
|
[
"MIT"
] |
permissive
|
seoss/scs_analysis
|
d41db35a1c7d97d75776a797df099749dbced824
|
c203093fd6728eafe576a1798bd9040ca18c73f8
|
refs/heads/master
| 2020-04-04T20:14:48.026665
| 2018-11-05T12:51:23
| 2018-11-05T12:51:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
"""
Created on 22 Aug 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdSampleTally(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog [-t TALLY] [-p PRECISION] [-v] [PATH]", version="%prog 1.0")
# optional...
self.__parser.add_option("--tally", "-t", type="int", nargs=1, action="store", dest="tally",
help="generate a rolling aggregate for TALLY number of data points (default all)")
self.__parser.add_option("--prec", "-p", type="int", nargs=1, action="store", default=None, dest="precision",
help="precision (default 0 decimal places)")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.tally is not None and self.tally < 1:
return False
return True
# ----------------------------------------------------------------------------------------------------------------
@property
def tally(self):
return self.__opts.tally
@property
def precision(self):
return self.__opts.precision
@property
def verbose(self):
return self.__opts.verbose
@property
def path(self):
return self.__args[0] if len(self.__args) > 0 else None
@property
def args(self):
return self.__args
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdSampleTally:{tally:%s, tally:%s, verbose:%s, path:%s, args:%s}" % \
(self.tally, self.precision, self.verbose, self.path, self.args)
|
[
"bruno.beloff@southcoastscience.com"
] |
bruno.beloff@southcoastscience.com
|
d470117b87c20044939b34206f9e9d67c89cc690
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.0_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=76/params.py
|
29fdaca73a9d96a41ddea9479708049d1a27dfc2
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '2.010214',
'max_util': '2.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 76,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
629585562843f773778c17fec9276488963e4e18
|
515e7d6e5756e3922df0b874b241c8b0744b4570
|
/packs/python_packs.py
|
1d34ff441b4097d542aca3c6d08a9dd2b0ef7e4d
|
[] |
no_license
|
mjgpy3/udm_script
|
d77f4904df62e33c72f690cdf4049a1118be105b
|
d04802d21797fa6ed03cfc35c955bcc6d028f1c2
|
refs/heads/master
| 2021-01-23T11:40:25.415072
| 2013-07-30T16:53:31
| 2013-07-30T16:53:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
#!/usr/bin/env python
# Created by Michael Gilliland
# Date: Fri Jan 25 16:47:44 EST 2013
#
#
from package_container import PackageContainer
packages = {'Pygame': 'python-pygame',
'Sympy': 'python-sympy',
'Numpy': 'python-numpy',
'Scipy': 'python-scipy',
'Virtualenv': 'python-virtualenv',
'PIP': 'python-pip',
'Django': 'python-django',
'Pychecker': 'pychecker',
'IPython': 'ipython',
'IDLE': 'idle',
'Epydoc': 'python-epydoc',
'Sphinx': 'python-sphinx',
'SQLAlchemy': 'python-sqlalchemy',
'Requests': 'python-requests',
'Flask': 'python-flask',
'Python Dev': 'python-dev',
'Beautiful Soup': 'python-beautifulsoup',
'Jython': 'jython',
'Cython': 'cython',
'PyPy': 'pypy',
'Python Openoffice': 'python-openoffice',
'CX Freeze': 'cx-freeze'}
special_package_instructions = {'sh': ['pip install sh'],
'Selenium': ['pip install selenium']}
container = PackageContainer("Python", 'python', packages, special_package_instructions)
|
[
"mjg.py3@gmail.com"
] |
mjg.py3@gmail.com
|
306169c51708eb9ebd6f3a4715d52aaf5b2f46c0
|
09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3
|
/Research/async play/wxasync1.py
|
938fa1c468981bdc521f7644434f52312729c2b3
|
[] |
no_license
|
abulka/pynsource
|
8ad412b85dc1acaeb83d7d34af8cc033c6baba91
|
979436525c57fdaeaa832e960985e0406e123587
|
refs/heads/master
| 2023-04-13T12:58:02.911318
| 2023-04-11T09:56:32
| 2023-04-11T09:56:32
| 32,249,425
| 271
| 46
| null | 2022-10-10T04:36:57
| 2015-03-15T07:21:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
import wx
from wxasync import AsyncBind, WxAsyncApp, StartCoroutine
import asyncio
from asyncio.events import get_event_loop
import time
class TestFrame(wx.Frame):
def __init__(self, parent=None):
super(TestFrame, self).__init__(parent)
vbox = wx.BoxSizer(wx.VERTICAL)
button1 = wx.Button(self, label="Submit")
self.edit = wx.StaticText(self, style=wx.ALIGN_CENTRE_HORIZONTAL|wx.ST_NO_AUTORESIZE)
self.edit_timer = wx.StaticText(self, style=wx.ALIGN_CENTRE_HORIZONTAL|wx.ST_NO_AUTORESIZE)
vbox.Add(button1, 2, wx.EXPAND|wx.ALL)
vbox.AddStretchSpacer(1)
vbox.Add(self.edit, 1, wx.EXPAND|wx.ALL)
vbox.Add(self.edit_timer, 1, wx.EXPAND|wx.ALL)
self.SetSizer(vbox)
self.Layout()
AsyncBind(wx.EVT_BUTTON, self.async_callback, button1)
StartCoroutine(self.update_clock, self)
async def async_callback(self, event):
self.edit.SetLabel("Button clicked")
await asyncio.sleep(1)
self.edit.SetLabel("Working")
await asyncio.sleep(1)
self.edit.SetLabel("Completed")
async def update_clock(self):
while True:
self.edit_timer.SetLabel(time.strftime('%H:%M:%S'))
await asyncio.sleep(0.5)
app = WxAsyncApp()
frame = TestFrame()
frame.Show()
app.SetTopWindow(frame)
loop = get_event_loop()
loop.run_until_complete(app.MainLoop())
|
[
"abulka@gmail.com"
] |
abulka@gmail.com
|
24f2de63f6fe12b2e69518221df7bc7cef282fb6
|
078e35f6b03e4e7a9616f2335a740109d8292176
|
/examples/adwords/v201609/advanced_operations/add_ad_customizer.py
|
f3c8da4ffc6854a0fdba2a28bd13a0f160fd0adb
|
[
"Apache-2.0"
] |
permissive
|
parander/googleads-python-lib
|
5f5b09e8adf7d733bddca314f6aa624b60c5abde
|
bc1bdff2d58fdc7cf4f09b879c68757c5b9b3abc
|
refs/heads/master
| 2021-01-12T16:36:44.861582
| 2017-02-27T04:27:18
| 2017-02-27T04:27:18
| 71,418,777
| 0
| 0
| null | 2016-10-20T02:38:33
| 2016-10-20T02:38:32
| null |
UTF-8
|
Python
| false
| false
| 7,140
|
py
|
#!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that uses the feed to populate
dynamic data.
"""
from datetime import datetime
from uuid import uuid4
# Import appropriate classes from the client library.
from googleads import adwords
from googleads import errors
FEED_NAME = 'Interplanetary Feed Name %s' % uuid4()
ADGROUPS = [
'INSERT_ADGROUP_ID_1_HERE',
'INSERT_ADGROUP_ID_2_HERE'
]
def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):
"""Creates ExpandedTextAds that use ad customizations for specified AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.
feed_name: the name of the feed used to apply customizations.
Raises:
GoogleAdsError: if no ExpandedTextAds were added.
"""
# Get the AdGroupAdService
adgroup_ad_service = client.GetService('AdGroupAdService')
expanded_text_ad = {
'xsi_type': 'ExpandedTextAd',
'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name,
'headlinePart2': 'Only {=%s.Price}' % feed_name,
'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name,
'finalUrls': ['http://www.example.com'],
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': expanded_text_ad
}
} for adgroup in adgroup_ids]
response = adgroup_ad_service.mutate(operations)
if response and 'value' in response:
for ad in response['value']:
print ('Created an ad with ID \'%s\', type \'%s\', and status \'%s\'.'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise errors.GoogleAdsError('No ads were added.')
def CreateCustomizerFeed(client, feed_name):
"""Creates a new AdCustomizerFeed.
Args:
client: an AdWordsClient instance.
feed_name: the name for the new AdCustomizerFeed.
Returns:
The new AdCustomizerFeed.
"""
# Get the AdCustomizerFeedService
ad_customizer_feed_service = client.GetService('AdCustomizerFeedService')
customizer_feed = {
'feedName': feed_name,
'feedAttributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = ad_customizer_feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['feedId'],
'nameId': feed['feedAttributes'][0]['id'],
'priceId': feed['feedAttributes'][1]['id'],
'dateId': feed['feedAttributes'][2]['id']
}
print ('Feed with name \'%s\' and ID %s was added with:\n'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
return feed
else:
raise errors.GoogleAdsError('No feeds were added')
def CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed):
"""Creates FeedItems for the specified AdGroups.
These FeedItems contain values to use in ad customizations for the AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing two AdGroup Ids.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Raises:
GoogleAdsError: if no FeedItems were added.
"""
# Get the FeedItemService
feed_item_service = client.GetService('FeedItemService')
now = datetime.now()
mars_date = datetime(now.year, now.month, 1, 0, 0)
venus_date = datetime(now.year, now.month, 15, 0, 0)
time_format = '%Y%m%d %H%M%S'
feed_item_operations = [
CreateFeedItemAddOperation(
'Mars', '$1234.56', mars_date.strftime(time_format), adgroup_ids[0],
ad_customizer_feed),
CreateFeedItemAddOperation(
'Venus', '$1450.00', venus_date.strftime(time_format),
adgroup_ids[1], ad_customizer_feed)
]
response = feed_item_service.mutate(feed_item_operations)
if 'value' in response:
for feed_item in response['value']:
print 'Added FeedItem with ID %d.' % feed_item['feedItemId']
else:
raise errors.GoogleAdsError('No FeedItems were added.')
def CreateFeedItemAddOperation(name, price, date, adgroup_id,
ad_customizer_feed):
"""Creates a FeedItemOperation.
The generated FeedItemOperation will create a FeedItem with the specified
values and AdGroupTargeting when sent to FeedItemService.mutate.
Args:
name: the value for the name attribute of the FeedItem.
price: the value for the price attribute of the FeedItem.
date: the value for the date attribute of the FeedItem.
adgroup_id: the ID of the ad_group to target with the FeedItem.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Returns:
A new FeedItemOperation for adding a FeedItem.
"""
feed_item = {
'feedId': ad_customizer_feed['feedId'],
'adGroupTargeting': {
'TargetingAdGroupId': adgroup_id
},
'attributeValues': [
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][0]['id'],
'stringValue': name
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][1]['id'],
'stringValue': price
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][2]['id'],
'stringValue': date
}
]
}
return {'operator': 'ADD', 'operand': feed_item}
def main(client, adgroup_ids, feed_name=FEED_NAME):
# Create a customizer feed. One feed per account can be used for all ads.
ad_customizer_feed = CreateCustomizerFeed(client, feed_name)
# Add feed items containing the values we'd like to place in ads.
CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed)
# All set! We can now create ads with customizations.
CreateAdsWithCustomizations(client, adgroup_ids, feed_name)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
|
[
"msaniscalchi@users.noreply.github.com"
] |
msaniscalchi@users.noreply.github.com
|
f160905d816728acf5ab28b38fe37cd56249ef23
|
a95aebf977058d32fa4298e35939fb5813f11276
|
/nn/layers.py
|
f339ba6e01b645a013632b3b8d3cd2e47a1ae2a2
|
[
"MIT"
] |
permissive
|
CV-IP/uqvi
|
f6e595c60ab86eb00c3b221d24f7300a4f872839
|
2534c26c41a4745e98d4b12d66270691002d1a5f
|
refs/heads/master
| 2022-12-22T20:47:44.140964
| 2020-10-03T17:40:17
| 2020-10-03T17:40:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,452
|
py
|
import os
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _triple
from nn.bayes_conv import BayesConv3d, BayesConv2d
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel, stride, padding=1, bayes = False):
super(ConvBlock, self).__init__()
if bayes:
self.conv = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
BayesConv3d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=padding, bias=False))
else:
self.conv = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=padding, bias=False))
def forward(self, x):
x = self.conv(x)
return x
class BasicDownBlock(nn.Module):
def __init__(self, in_ch, out_ch, downsample, bayes=False):
super(BasicDownBlock, self).__init__()
if downsample:
str = 2
else:
str = 1
self.conv_1 = ConvBlock(in_ch, out_ch, kernel=3, stride=str, bayes=bayes)
self.conv_2 = ConvBlock(out_ch, out_ch, kernel=3, stride=1, bayes=bayes)
self.down = None
if downsample:
self.down = ConvBlock(in_ch, out_ch, kernel=1, stride=2, padding=0, bayes=False)
def forward(self, inp):
x = self.conv_1(inp)
x = self.conv_2(x)
if self.down is not None:
return x + self.down(inp)
else:
return x + inp
class BasicUpBlock(nn.Module):
def __init__(self, in_ch, out_ch, bayes=False):
super(BasicUpBlock, self).__init__()
self.upsample = nn.Sequential(
ConvBlock(in_ch, out_ch, kernel=1, stride=1, padding=0, bayes=False),
nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
)
self.conv_1 = ConvBlock(out_ch, out_ch, kernel=3, stride=1, bayes=bayes)
self.conv_2 = ConvBlock(out_ch, out_ch, kernel=3, stride=1, bayes=bayes)
def forward(self, inp, skip_connection=None):
x = self.upsample(inp)
if skip_connection is not None:
x = x + skip_connection
x1 = self.conv_1(x)
x1 = self.conv_2(x1)
return x1 + x
|
[
"noreply@github.com"
] |
CV-IP.noreply@github.com
|
a0e0bfbddd2d9003785d592b78d9b8475e63b70c
|
097eae4e0190da97570ae7db748fca306f977fbd
|
/py/learn/test/class/example.py
|
f8642e40064bba601cac875200d08370551f363f
|
[] |
no_license
|
jiaolj/other
|
42257c593495d97ab98b9a9af00d3791ccce7a57
|
78d0366cbd599f4dde7bf6e44ca4cfc373132418
|
refs/heads/master
| 2021-05-24T04:14:03.829126
| 2016-08-28T07:40:49
| 2016-08-28T07:40:49
| 64,064,262
| 0
| 1
| null | 2020-07-23T17:05:36
| 2016-07-24T12:25:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
# -*- coding: utf-8 -*-
class b(object):
def __init__(self):
self.t=2
def getb(self):
self.t+=1
class a(b):
#----如果不声明init函数,会继承基类init属性。声明init是为了加一些自定义属性
def __init__(self):
b.__init__(self)
def get(self):
print 1
temp=a()
temp.getb()
print temp.t
|
[
"841232468@qq.com"
] |
841232468@qq.com
|
54bbd219f19c1ed9466ccdbb26db23e887394dba
|
6cb11cb804f316d16efa083effb3def1c2cab57c
|
/22.py
|
c55af12e976c5a84557d4af19a98af4e455b732f
|
[] |
no_license
|
davemolk/python_practice
|
8879cd5bdcb77c3d84ff5c7f961fda1cd48b2f93
|
91d3e411b32f3a4a29d60148b352b91ce8e1d11b
|
refs/heads/main
| 2023-08-01T12:57:45.779824
| 2021-09-18T16:54:11
| 2021-09-18T16:54:11
| 400,767,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
'''
return True if array has two consecutive 2s, otherwise return false
'''
import re
def has22(nums):
pattern = r"[2]{2}"
regex = re.compile(pattern)
match = regex.findall(("".join(str(el) for el in nums)))
return True if match else False
print(has22([1, 2, 2]))
|
[
"davemolk@gmail.com"
] |
davemolk@gmail.com
|
f3d109ee8baa41ca18eaa3f3d511d490209b0c12
|
0619b1ba176456c4b62d78d6a72fc4d9a9084287
|
/thesite/communication_app/forms.py
|
4eabf764b1d1037b42e5497319e87205eb1f6f36
|
[
"Apache-2.0"
] |
permissive
|
jacinda/petwitter
|
c13dd43a5b76786f5d5c5c3f29420153cb5a16c7
|
ea7ffa16b8d8b1207f04ace619b31dba4efc45bc
|
refs/heads/master
| 2021-01-13T06:38:31.439749
| 2015-04-15T17:25:03
| 2015-04-15T17:25:03
| 33,678,730
| 0
| 0
| null | 2015-04-09T16:02:42
| 2015-04-09T16:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 699
|
py
|
from django import forms
import communication_app.models
class PetForm(forms.ModelForm):
class Meta:
model = communication_app.models.Pet
fields = ['name']
def __init__(self, *args, **kwargs):
super(PetForm, self).__init__(*args, **kwargs)
self.fields['name'].widget = forms.TextInput(attrs={
'class': 'form-control'})
class UpdateForm(forms.ModelForm):
class Meta:
model = communication_app.models.Update
fields = ['text']
def __init__(self, *args, **kwargs):
super(UpdateForm, self).__init__(*args, **kwargs)
self.fields['text'].widget = forms.TextInput(attrs={
'class': 'form-control'})
|
[
"asheesh@asheesh.org"
] |
asheesh@asheesh.org
|
7e1029ad59d5a3c4e3e7636aa5802f22953086cd
|
e15d63ccde04e7458bff5af1bdad63a5c699b489
|
/example/Transformer_vision/2dpose/vit/multi_branch/config.py
|
5582a68fa5d82c8142ce319cab34a1901077d3e7
|
[
"WTFPL"
] |
permissive
|
ddddwee1/TorchSUL
|
775b6a2b1e4ab7aac25a3f0411de83affc257af5
|
6c7cd41b14fc8b746983e8b981d1ba4d08370ca2
|
refs/heads/master
| 2023-08-21T15:21:24.131718
| 2023-08-18T09:37:56
| 2023-08-18T09:37:56
| 227,628,298
| 13
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
import numpy as np
# size
inp_size = 224
out_size = 56
base_sigma = 2.5
num_pts = 17
pairs = [[0,1], [1,2],[2,3], [0,4], [4,5],[5,6], [0,7],[7,8],[8,9],[9,10], [8,11],[11,12],[12,13],[8,14],[14,15],[15,16]]
# augmentation
rotation = 0
min_scale = 1 # this controls largest size
max_scale = 1 # this controls smallest sise
max_translate = 0
blur_prob = 0.0
blur_size = [7, 11, 15, 21]
blur_type = ['vertical','horizontal','mean']
# training
data_root = '/data/pose/mpii/images/'
max_epoch = 300
init_lr = 0.0005
decay = 0.0001
momentum = 0.9
lr_epoch = [150,250]
save_interval = 1
# extra
distributed = True
scale_var = 19.2
angle_var = np.pi
|
[
"cy960823@outlook.com"
] |
cy960823@outlook.com
|
10b0d6c77a5a22b76ba2d6593ccd3657539ce9fd
|
4a36b5979b0753b32cff3956fd97fb8ed8b11e84
|
/1.0/_downloads/469209d8040c0923f6b4f925074d58d7/evoked_topomap.py
|
f677e3d7f02abfe8f6f3546a99379b408253479f
|
[] |
permissive
|
mne-tools/mne-tools.github.io
|
8aac7ae10bf2faeeb875b9a351a5530dc0e53154
|
495e878adc1ef3374e3db88604504d7542b01194
|
refs/heads/main
| 2023-09-03T07:06:00.660557
| 2023-09-03T04:10:18
| 2023-09-03T04:10:18
| 35,639,371
| 12
| 16
|
BSD-3-Clause
| 2023-05-05T19:04:32
| 2015-05-14T22:04:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,921
|
py
|
# -*- coding: utf-8 -*-
"""
.. _ex-evoked-topomap:
========================================
Plotting topographic maps of evoked data
========================================
Load evoked data and plot topomaps for selected time points using multiple
additional options.
"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
# Tal Linzen <linzen@nyu.edu>
# Denis A. Engeman <denis.engemann@gmail.com>
# Mikołaj Magnuski <mmagnuski@swps.edu.pl>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
# %%
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path / 'MEG' / 'sample' / 'sample_audvis-ave.fif'
# load evoked corresponding to a specific condition
# from the fif file and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
# %%
# Basic :func:`~mne.viz.plot_topomap` options
# -------------------------------------------
#
# We plot evoked topographies using :func:`mne.Evoked.plot_topomap`. The first
# argument, ``times`` allows to specify time instants (in seconds!) for which
# topographies will be shown. We select timepoints from 50 to 150 ms with a
# step of 20ms and plot magnetometer data:
times = np.arange(0.05, 0.151, 0.02)
evoked.plot_topomap(times, ch_type='mag', time_unit='s')
# %%
# If times is set to None at most 10 regularly spaced topographies will be
# shown:
evoked.plot_topomap(ch_type='mag', time_unit='s')
# %%
# We can use ``nrows`` and ``ncols`` parameter to create multiline plots
# with more timepoints.
all_times = np.arange(-0.2, 0.5, 0.03)
evoked.plot_topomap(all_times, ch_type='mag', time_unit='s',
ncols=8, nrows='auto')
# %%
# Instead of showing topographies at specific time points we can compute
# averages of 50 ms bins centered on these time points to reduce the noise in
# the topographies:
evoked.plot_topomap(times, ch_type='mag', average=0.05, time_unit='s')
# %%
# We can plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad', time_unit='s')
# %%
# Additional :func:`~mne.viz.plot_topomap` options
# ------------------------------------------------
#
# We can also use a range of various :func:`mne.viz.plot_topomap` arguments
# that control how the topography is drawn. For example:
#
# * ``cmap`` - to specify the color map
# * ``res`` - to control the resolution of the topographies (lower resolution
# means faster plotting)
# * ``outlines='skirt'`` to see the topography stretched beyond the head circle
# * ``contours`` to define how many contour lines should be plotted
evoked.plot_topomap(times, ch_type='mag', cmap='Spectral_r', res=32,
outlines='skirt', contours=4, time_unit='s')
# %%
# If you look at the edges of the head circle of a single topomap you'll see
# the effect of extrapolation. There are three extrapolation modes:
#
# - ``extrapolate='local'`` extrapolates only to points close to the sensors.
# - ``extrapolate='head'`` extrapolates out to the head circle.
# - ``extrapolate='box'`` extrapolates to a large box stretching beyond the
# head circle.
#
# The default value ``extrapolate='auto'`` will use ``'local'`` for MEG sensors
# and ``'head'`` otherwise. Here we show each option:
extrapolations = ['local', 'head', 'box']
fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3)
# Here we look at EEG channels, and use a custom head sphere to get all the
# sensors to be well within the drawn head surface
for axes_row, ch_type in zip(axes, ('mag', 'eeg')):
for ax, extr in zip(axes_row, extrapolations):
evoked.plot_topomap(0.1, ch_type=ch_type, size=2, extrapolate=extr,
axes=ax, show=False, colorbar=False,
sphere=(0., 0., 0., 0.09))
ax.set_title('%s %s' % (ch_type.upper(), extr), fontsize=14)
fig.tight_layout()
# %%
# More advanced usage
# -------------------
#
# Now we plot magnetometer data as topomap at a single time point: 100 ms
# post-stimulus, add channel labels, title and adjust plot margins:
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response',
time_unit='s')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
# %%
# We can also highlight specific channels by adding a mask, to e.g. mark
# channels exceeding a threshold at a given time:
# Define a threshold and create the mask
mask = evoked.data > 1e-13
# Select times and plot
times = (0.09, 0.1, 0.11)
evoked.plot_topomap(times, ch_type='mag', time_unit='s', mask=mask,
mask_params=dict(markersize=10, markerfacecolor='y'))
# %%
# Or by manually picking the channels to highlight at different times:
times = (0.09, 0.1, 0.11)
_times = ((np.abs(evoked.times - t)).argmin() for t in times)
significant_channels = [
('MEG 0231', 'MEG 1611', 'MEG 1621', 'MEG 1631', 'MEG 1811'),
('MEG 2411', 'MEG 2421'),
('MEG 1621')]
_channels = [np.in1d(evoked.ch_names, ch) for ch in significant_channels]
mask = np.zeros(evoked.data.shape, dtype='bool')
for _chs, _time in zip(_channels, _times):
mask[_chs, _time] = True
evoked.plot_topomap(times, ch_type='mag', time_unit='s', mask=mask,
mask_params=dict(markersize=10, markerfacecolor='y'))
# %%
# Animating the topomap
# ---------------------
#
# Instead of using a still image we can plot magnetometer data as an animation,
# which animates properly only in matplotlib interactive mode.
# sphinx_gallery_thumbnail_number = 9
times = np.arange(0.05, 0.151, 0.01)
fig, anim = evoked.animate_topomap(
times=times, ch_type='mag', frame_rate=2, time_unit='s', blit=False)
|
[
"dan@mccloy.info"
] |
dan@mccloy.info
|
c157b99f15cf4b7b2d4bd05ea5b0e5f89507cf3a
|
07bb913fea5e0f1e65e35a7ca5c594fa1d144eb8
|
/publishconf.py
|
ab389e79f3df4349f62293bf934b3def399eb94a
|
[] |
no_license
|
jbzdak/pwzn-lessons
|
8373552fabb260593cf612a27bf821d7b70b452d
|
5ca58dba6220259b170c8a689a10338122c4eefd
|
refs/heads/master
| 2021-04-05T20:48:56.447870
| 2020-03-19T20:36:08
| 2020-03-19T20:36:08
| 248,600,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
#ITEURL = 'http://pwzn.s3-website-us-east-1.amazonaws.com'
SITEURL = 'http://db.fizyka.pw.edu.pl/pwzn'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
|
[
"jbzdak@gmail.com"
] |
jbzdak@gmail.com
|
86a3a8b7517688b3d439381f7baf7469c0eb82a9
|
9f2a231557a9aabc181ed388faaf2f0b3b59c530
|
/Testcode/spellCheck.py
|
5be1f5b0fa649c1d809ee849a078538109829c13
|
[] |
no_license
|
abhashjain/DIC_Project
|
7e379cd5ef99d1fc31d414985e1b04388b475fe0
|
329f8da2f61e95410292a3062c68ed06845ec6ac
|
refs/heads/master
| 2020-04-25T14:49:58.508999
| 2018-12-11T04:36:09
| 2018-12-11T04:36:09
| 172,855,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
import os, time, re
os.getcwd()
startTime = time.time()
wordFile = open("..\\src\\words.txt","r")
words = wordFile.read()
print("Words in dictionary:",len(words))
inputDoc = open("..\\src\\guten.txt", "r", encoding="utf-8")
doc = inputDoc.read().split()
print("Words in file:",len(doc))
## Processing the input document
def is_number(x):
#checking if number is int or not
try:
int(x)
return True
except (TypeError, ValueError):
pass
return False
processedInput = list()
for word in doc:
if not is_number(word):
if not "@" in word:
if not "www." in word:
if len(re.sub('[^A-Za-z0-9]+', '', word)) > 1:
processedInput.append(re.sub('[^A-Za-z0-9]+', '', word))
misspelledWords = list()
i = 0
for word in processedInput:
# i += 1
# print(i, end=", ")
if word.lower() not in words:
misspelledWords.append(word)
print("Total misspelled words =",len(misspelledWords))
print("Total execution time = %s sec"%(time.time() - startTime))
with open("..//results//outputPython.txt", "w") as outFile:
for word in misspelledWords:
outFile.write(word)
outFile.write("\n")
print ("Incorrect words written to outputPython.txt")
|
[
"nobody@ncsu.edu"
] |
nobody@ncsu.edu
|
44d7b163937a1cc756b6f3918b58cb04e955dc93
|
04aacfdb9944e6d796671198835394e07db98ecf
|
/pythonz/commands/locate.py
|
939aedb0faee04842dfa3a3a10a968e88396ce8c
|
[] |
no_license
|
rmoorman/pythonz
|
ea86f302c70b67440c2829d4a0a9161d4a006ccc
|
3d43172cae190284cf0b620aa28c0f794f770497
|
refs/heads/master
| 2021-01-12T19:51:39.057258
| 2014-10-16T07:20:06
| 2014-10-16T07:20:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
import os
from pythonz.commands import Command
from pythonz.define import PATH_PYTHONS
from pythonz.util import Package, is_installed
from pythonz.log import logger
class LocateCommand(Command):
name = "locate"
usage = "%prog [options] VERSION"
summary = "Locate the given version of python"
def __init__(self):
super(LocateCommand, self).__init__()
self.parser.add_option(
"-t", "--type",
dest="type",
default="cpython",
help="Type of Python version: cpython, stackless, pypy, pypy3 or jython."
)
def run_command(self, options, args):
if not args or len(args) > 1:
self.parser.print_help()
return
pkg = Package(args[0], options.type)
pkgname = pkg.name
if not is_installed(pkg):
logger.error("`%s` is not installed." % pkgname)
return
logger.log(os.path.join(PATH_PYTHONS, pkgname, 'bin', 'python'))
LocateCommand()
|
[
"saghul@gmail.com"
] |
saghul@gmail.com
|
d7e6fb902bb4d82e45d61c4cff79935749eb6882
|
60f75884ced267a5f0f09a0b43f68e7d8c5c7a14
|
/tester/test_handlers/test_page_handler.py
|
25b3ed3218c1b9b75b71fae6e6b25697c3bb7901
|
[
"MIT"
] |
permissive
|
liusheng6982/TorCMS
|
b0fa1fe96a814c10dc7163b127672e1076d19e02
|
cb5ee651ece0cff28eae1dcde9013edf28387073
|
refs/heads/master
| 2021-01-19T04:31:54.221405
| 2017-04-04T11:41:50
| 2017-04-04T11:41:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
# -*- coding:utf-8 -*-
from torcms.handlers.page_handler import PageHandler
from torcms.handlers.page_ajax_handler import PageAjaxHandler
def Test():
urls = [
("/label/(.*)", PageAjaxHandler, dict()),
("/label/(.*)", PageHandler, dict()),
]
assert urls
|
[
"bukun@osgeo.cn"
] |
bukun@osgeo.cn
|
845e06146026e7a00fd10824220dd35e50e2ccab
|
127d8c209b00978f4f660534363e95eca3f514f2
|
/backend/home/migrations/0002_load_initial_data.py
|
110b21901b630cf3f96ad807523e091bfc8ac157
|
[] |
no_license
|
crowdbotics-apps/sitespace-19938
|
afd070e64d32ab455f9b2b05e376152e9e28e5ad
|
416b5cef0bdb25018ec3b634bf3096e61fe8b662
|
refs/heads/master
| 2022-12-10T15:52:58.601025
| 2020-09-02T15:20:15
| 2020-09-02T15:20:15
| 292,319,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Sitespace"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Sitespace</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "sitespace-19938.botics.co"
site_params = {
"name": "Sitespace",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
348fc47cef3dc9dc96c748af7cf91394fd8222e7
|
2d7c21a793c8080a090ce8c9f05df38f6477c7c7
|
/tests/data_templates/test_field_definitions.py
|
c4f05eb9f57000460d8661f4d47b2a554f7826ea
|
[
"Apache-2.0"
] |
permissive
|
kids-first/kf-api-study-creator
|
c40e0a8a514fd52a857e9a588635ef76d16d5bc7
|
ba62b369e6464259ea92dbb9ba49876513f37fba
|
refs/heads/master
| 2023-08-17T01:09:38.789364
| 2023-08-15T14:06:29
| 2023-08-15T14:06:29
| 149,347,812
| 3
| 0
|
Apache-2.0
| 2023-09-08T15:33:40
| 2018-09-18T20:25:38
|
Python
|
UTF-8
|
Python
| false
| false
| 5,204
|
py
|
import os
import json
import pytest
import pandas
from marshmallow import ValidationError
from pprint import pprint
from creator.data_templates.models import TemplateVersion
from creator.data_templates.field_definitions_schema import (
coerce_number,
coerce_bool,
FieldDefinitionSchema,
FieldDefinitionsSchema
)
@pytest.mark.parametrize(
"in_value, expected_out",
[
("0.0", 0.0),
(0.0, 0.0),
("0", 0),
(0, 0),
("10.0", 10),
(10.0, 10),
("200", 200),
(200, 200),
("1.234", 1.234),
(1.234, 1.234),
("foo", "foo"),
(None, None),
]
)
def test_coerce_number(in_value, expected_out):
"""
Test helper function that coerces strings to float/int
"""
assert coerce_number(in_value) == expected_out
@pytest.mark.parametrize(
"in_value, expected_out",
[
(True, True),
(False, False),
("foo", "foo"),
("0.0", False),
("1", True),
("True", True),
("FALSE", False),
("Yes", True),
("no", False),
("Required", True),
("Not Required", False),
(None, False),
]
)
def test_coerce_bool(in_value, expected_out):
"""
Test helper function that coerces strings to booleans
"""
assert coerce_bool(in_value) == expected_out
def test_schema_clean():
"""
Test FieldDefinitionSchema.clean method
"""
schema = FieldDefinitionSchema()
# Test keys are all snake cased
in_data = {
"Label": None,
"Data Type": None,
}
out_data = schema.clean(in_data)
assert {"label", "data_type"} == set(out_data.keys())
# Test data_type default
assert out_data["data_type"] == "string"
# Test data_type casing
in_data["data_type"] = "Number"
out_data = schema.clean(in_data)
assert out_data["data_type"] == "number"
# Test accepted_values
in_data["accepted_values"] = None
out_data = schema.clean(in_data)
assert out_data["accepted_values"] is None
in_data["data_type"] = "foobar"
in_data["accepted_values"] = "1.0, 2.0, 3.0"
out_data = schema.clean(in_data)
assert out_data["accepted_values"] == ["1.0", "2.0", "3.0"]
assert out_data["data_type"] == "enum"
# Test missing values
in_data["missing_values"] = None
out_data = schema.clean(in_data)
assert out_data["missing_values"] is None
in_data["missing_values"] = "None, Unknown"
out_data = schema.clean(in_data)
assert ["None", "Unknown"] == out_data["missing_values"]
# Test empty strings handled properly
in_data["accepted_values"] = " "
in_data["missing_values"] = ""
in_data["required"] = " "
in_data["data_type"] = " "
out_data = schema.clean(in_data)
assert out_data["accepted_values"] is None
assert out_data["missing_values"] is None
assert out_data["required"] == False # noqa
assert out_data["data_type"] == "string"
def test_validation_error():
"""
Test custom handling of validation errors
"""
in_fields = {
"fields": [
{
"Key": "person.id",
"Label": "Person ID",
# Missing description, but has required keys
},
{
"Key": "specimen.id",
"Description": "Identifier for specimen"
# Missing label but has other required keys
}
]
}
schema = FieldDefinitionsSchema()
# Test custom validation message
with pytest.raises(ValidationError) as e:
schema.load(in_fields)
errors = e.value.messages[0]
assert "fields" not in errors
assert "Field Definition [1]" in errors
assert "Field Definition [Person ID]" in errors
# Test normal validation message
with pytest.raises(ValidationError) as e:
schema.load("foo")
assert {'_schema': ['Invalid input type.']} == e.value.messages
def test_schema_load():
"""
End to end test using the field definitions schema to clean and validate
input data
"""
in_fields = {
"fields": [
{
"Key": "person.id",
"Label": "Person ID",
"Description": "Identifier for person"
},
{
"Key": "specimen.id",
"Label": "Specimen ID",
"Description": "Identifier for specimen"
}
]
}
schema = FieldDefinitionsSchema()
data = schema.load(in_fields)
out_fields = data["fields"]
# Check version
assert data["schema_version"]["number"] == schema.SCHEMA_VERSION["number"]
# Check all fields are in output
assert len(out_fields) == len(in_fields["fields"])
# Check that defaults were set right and all components of a field
# definition are present in each field definition instance
for out in out_fields:
assert set(FieldDefinitionsSchema.key_order) == set(out.keys())
assert out["data_type"] == "string"
assert out["required"] == False # noqa
assert out["accepted_values"] is None
assert out["instructions"]is None
|
[
"dukedesi22@gmail.com"
] |
dukedesi22@gmail.com
|
19caab41b1e7e5822d71d8e70217b1ac6dda3b67
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/Sudoku_II_005_20180620141234.py
|
396d0dea7f396d2fdc9165bfceb7cd75b20f3c37
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424
| 2018-07-04T17:21:13
| 2018-07-04T17:21:13
| 139,749,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,622
|
py
|
from random import randint
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, 8, " "]
]
sudoku2 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
sudoku3 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
line = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku[i], line, i+1))
i = i + 1
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
s = 0
if choice == "R" or choice == "r":
listaSudoku = [sudoku1, sudoku2, sudoku3]
sudoku_number = randint(0, 2)
print("dupa", sudoku_number)
sudoku = listaSudoku[sudoku_number]
#print("ktore = ", sudoku)
elif int(choice) == 1:
s = 1
sudoku = sudoku1
elif int(choice) == 2:
s = 2
sudoku = sudoku2
elif int(choice) == 3:
s = 3
sudoku = sudoku3
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
if s == 1 :
sudoku = sudoku1
elif s == 1 :
sudoku = sudoku1
s == 1 :
sudoku = sudoku1
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try:
i = 0
list = []
while i < 9:
column = 0
for item in sudoku:
column = column + item[i]
list.append(column)
#p rint(list)
# print("Suma columny ", i, " = ", column)
i += 1
is45 = 0
for listElement in list:
if listElement == 45:
is45 = is45 + 1
# print("Ile kolumen OK", is45)
i = 0
for item in sudoku:
if sum(item) == 45 and is45 == 9:
i = i + 1
if i == 9:
printSudoku()
print("@@@@@@@@@@ YOU WIN @@@@@@@@@@")
break
except TypeError:
print()
|
[
"inz.kamil.wos@gmail.com"
] |
inz.kamil.wos@gmail.com
|
ba508a2958f5325258855671103405bc641ebe97
|
a5e591dc09e11e88af56fb5a881fae064fb9c495
|
/recruitment/recruitment/doctype/interview/interview.py
|
0449ed7ff48f9261f3c429e7522f6aad25c3b49d
|
[
"MIT"
] |
permissive
|
barathprathosh/recruitment
|
6b61dd1ee9c0b9d7851b0b3e5bab307f7ee2d1b5
|
9660944856e72288e47960e6802ec97a220a656d
|
refs/heads/master
| 2020-04-29T03:03:51.722972
| 2019-03-15T08:58:32
| 2019-03-15T08:58:32
| 175,794,797
| 0
| 0
|
NOASSERTION
| 2019-03-15T10:00:32
| 2019-03-15T10:00:31
| null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Interview(Document):
pass
|
[
"abdulla.pi@voltechgroup.com"
] |
abdulla.pi@voltechgroup.com
|
cac8cca8bbafc756a771cbbd21f316a640e98cd7
|
6b4a48fb6142789326654c48d32acda3eb5e7b08
|
/formationproject/wsgi.py
|
a9ea3c0a982ffb7af95cba5e2211d90796a89dd1
|
[] |
no_license
|
mwesterhof/formationproject
|
0d9795c218b5010bfbb716216d3d8f4fa5bd4799
|
1b4a057b996829609e308c78721aca840ec58ee7
|
refs/heads/master
| 2023-08-19T00:08:58.282341
| 2021-10-08T16:19:18
| 2021-10-08T16:19:18
| 401,425,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
WSGI config for formationproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "formationproject.settings.dev")
application = get_wsgi_application()
|
[
"m.westerhof@lukkien.com"
] |
m.westerhof@lukkien.com
|
433dc5780c6bf966236e507e8947e87df83870a2
|
43e900f11e2b230cdc0b2e48007d40294fefd87a
|
/Amazon/VideoOnsite/926.flip-string-to-monotone-increasing.py
|
d4efde64ddbe2e4540f93d5acfa3516e947730ab
|
[] |
no_license
|
DarkAlexWang/leetcode
|
02f2ed993688c34d3ce8f95d81b3e36a53ca002f
|
89142297559af20cf990a8e40975811b4be36955
|
refs/heads/master
| 2023-01-07T13:01:19.598427
| 2022-12-28T19:00:19
| 2022-12-28T19:00:19
| 232,729,581
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
#
# @lc app=leetcode id=926 lang=python3
#
# [926] Flip String to Monotone Increasing
#
# @lc code=start
class Solution:
def minFlipsMonoIncr(self, s: str) -> int:
n = len(s)
cnt0 = s.count('0')
cnt1 = 0
res = n - cnt0
for i in range(n):
if s[i] == '0':
cnt0 -= 1
elif s[i] == '1':
res = min(res, cnt1 + cnt0)
cnt1 += 1
return res
# @lc code=end
|
[
"wangzhihuan0815@gmail.com"
] |
wangzhihuan0815@gmail.com
|
89b1685f529264b86004c272eb59419b27a1315b
|
4a42fefd8945c73402ddf36f8943e011cd9c4151
|
/projects/myhellowebapp/hellowebapp/wsgi.py
|
2b6fe00b6b8875c39ed849cf147b0eb94f51d25b
|
[] |
no_license
|
momentum-cohort-2018-10/hello-web-app-SowmyaAji
|
c2c1374b460232822ff91fc1d034f1d89a400332
|
2cfe7fd6d22db4f9b9ac0d8fdc611787cb1372c5
|
refs/heads/master
| 2020-04-06T11:53:35.991478
| 2018-11-18T20:48:49
| 2018-11-18T20:48:49
| 157,434,877
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for hellowebapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hellowebapp.settings")
application = get_wsgi_application()
|
[
"sowmya.aji@gmail.com"
] |
sowmya.aji@gmail.com
|
c718408ccc29e4bca88b5deef7e84bb586acddfc
|
ea0c0b8d67a42086f840149b3dbe1c0e4f58e56f
|
/members_area/forms.py
|
06d19b868f16f535ae4172f3cc5f191a2c75b8b0
|
[
"MIT"
] |
permissive
|
AzeezBello/raodoh
|
78b27e0886f8882144a4def160d9c3f53bcc6af9
|
296bd44069bd750557bf49995374601f5052d695
|
refs/heads/master
| 2022-05-03T05:07:21.632642
| 2020-02-26T10:16:08
| 2020-02-26T10:16:08
| 235,878,080
| 0
| 0
|
MIT
| 2022-04-22T23:01:27
| 2020-01-23T20:15:39
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
from django.forms import ModelForm
from .models import Lesson
class LessonForm(ModelForm):
class Meta:
model = Lesson
fields = ('title', 'course', 'body', 'url', 'video')
|
[
"azeez@scholarx.co"
] |
azeez@scholarx.co
|
e78a07d5a9ac0d6375bab50be733a669fac273ff
|
e5b6d2e79d6593587fa8f5854def9ebf4d47a9e1
|
/djangocli/wsgi.py
|
8e9c0ba06187289fb8d23d2abffc8b6bcf5721d6
|
[] |
no_license
|
redeyed-archive/DjangoSiteCheckerExample
|
35756664f0b9667e151d4608c6ebd5d279523534
|
e53b2fad15d2a768e75bc853c69113c0d54c2ed2
|
refs/heads/master
| 2023-03-17T06:22:46.129989
| 2019-02-17T05:48:43
| 2019-02-17T05:48:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for djangocli project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangocli.settings')
application = get_wsgi_application()
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
16d37fe91e6e6174ecc5ebf06d10063687980ee8
|
97e54e4b18c1d696926678f1e320b2fc9cef5436
|
/jaraco/text/strip-prefix.py
|
761717a9b9e1f837eeacf0e888822f6fad881361
|
[
"MIT"
] |
permissive
|
jaraco/jaraco.text
|
8ff2d7d49b3af0ca5e98c1cb337562bde9d3ba72
|
460dc329b799b88adb32ea95435d3a9e03cbdc00
|
refs/heads/main
| 2023-09-04T06:57:23.624303
| 2023-07-30T01:01:42
| 2023-07-30T01:01:42
| 48,551,451
| 15
| 8
|
MIT
| 2023-07-30T14:52:20
| 2015-12-24T17:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 412
|
py
|
import sys
import autocommand
from jaraco.text import Stripper
def strip_prefix():
r"""
Strip any common prefix from stdin.
>>> import io, pytest
>>> getfixture('monkeypatch').setattr('sys.stdin', io.StringIO('abcdef\nabc123'))
>>> strip_prefix()
def
123
"""
sys.stdout.writelines(Stripper.strip_prefix(sys.stdin).lines)
autocommand.autocommand(__name__)(strip_prefix)
|
[
"jaraco@jaraco.com"
] |
jaraco@jaraco.com
|
2bc4f1ab2384a7e76f74641976a53715c495cc2a
|
b0c528e2650dec1ff011215537fc5ea536627966
|
/main/urls.py
|
58a80f586c83f786718a9f83bb105e9b11210f7e
|
[] |
no_license
|
trinhgliedt/Python_Great_number_game
|
9cb84a1bd95333df15140cc2e1c466d0911b7b19
|
8358c84012981b8dfaafb9017fc9a92450a98e7b
|
refs/heads/master
| 2023-02-08T21:14:23.124896
| 2021-01-01T06:18:02
| 2021-01-01T06:18:02
| 325,926,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('result/', views.process_form),
]
|
[
"chuot2008@gmail.com"
] |
chuot2008@gmail.com
|
dc72573a696b1184ae2cf899bda0ecd956d49f9d
|
0931b32140ba932b3ba02f5109a087c6c70a244d
|
/frappe/desk/desk_page.py
|
fc7281e06c18d9766c2efcb8f939fa6938c5c494
|
[
"MIT"
] |
permissive
|
cstkyrilos/frappe
|
b60ed4e95ce929c74c2fc46000080d10b343190e
|
27d9306bc5924c11c2749503454cc6d11a8cc654
|
refs/heads/main
| 2023-03-23T10:35:42.732385
| 2021-03-22T21:55:58
| 2021-03-22T21:55:58
| 350,292,784
| 0
| 0
|
MIT
| 2021-03-22T10:01:08
| 2021-03-22T10:01:07
| null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.translate import send_translations
@frappe.whitelist()
def get(name):
"""
Return the :term:`doclist` of the `Page` specified by `name`
"""
page = frappe.get_doc('Page', name)
if page.is_permitted():
page.load_assets()
docs = frappe._dict(page.as_dict())
if getattr(page, '_dynamic_page', None):
docs['_dynamic_page'] = 1
return docs
else:
frappe.response['403'] = 1
raise frappe.PermissionError, 'No read permission for Page %s' % \
(page.title or name)
@frappe.whitelist(allow_guest=True)
def getpage():
"""
Load the page from `frappe.form` and send it via `frappe.response`
"""
page = frappe.form_dict.get('name')
doc = get(page)
# load translations
if frappe.lang != "en":
send_translations(frappe.get_lang_dict("page", page))
frappe.response.docs.append(doc)
def has_permission(page):
if frappe.session.user == "Administrator" or "System Manager" in frappe.get_roles():
return True
page_roles = [d.role for d in page.get("roles")]
if page_roles:
if frappe.session.user == "Guest" and "Guest" not in page_roles:
return False
elif not set(page_roles).intersection(set(frappe.get_roles())):
# check if roles match
return False
if not frappe.has_permission("Page", ptype="read", doc=page):
# check if there are any user_permissions
return False
else:
# hack for home pages! if no Has Roles, allow everyone to see!
return True
|
[
"cst.kyrilos@gmail.com"
] |
cst.kyrilos@gmail.com
|
c9a91552c1b8f4b8a2ff609676b81cd11cf08ead
|
48df99f4358be7a51becd3d685e1ec825d295ba4
|
/dentalstate/models.py
|
36c642462ac4cabb367d2fe592fdd0be94d557a6
|
[
"Apache-2.0"
] |
permissive
|
kuyesu/tscharts
|
21d2aedeea4aad3b126defaa1703f60f44f14de6
|
9ed4e4bb0a6d296e1156afca5b55d0f71dfb894b
|
refs/heads/master
| 2023-06-03T04:50:15.282855
| 2021-06-12T19:50:51
| 2021-06-12T19:50:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,258
|
py
|
#(C) Copyright Syd Logan 2020
#(C) Copyright Thousand Smiles Foundation 2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import unicode_literals
from django.db import models
from patient.models import Patient
from clinic.models import Clinic
from dentalcdt.models import DentalCDT
class DentalState(models.Model):
clinic = models.ForeignKey(Clinic)
patient = models.ForeignKey(Patient)
username = models.CharField(max_length=64, default = "") # user supplied name
time = models.DateTimeField(auto_now=True)
'''
tooth location is relative to location (top or bottom). Zero
indicates the treatment applies to whole mouth (and location
is ignored
'''
tooth = models.IntegerField(default = 0)
DENTAL_LOCATION_TOP = 't'
DENTAL_LOCATION_BOTTOM = 'b'
DENTAL_LOCATION_CHOICES = ((DENTAL_LOCATION_TOP, "top"), (DENTAL_LOCATION_BOTTOM, "bottom"))
location = models.CharField(max_length = 1, choices = DENTAL_LOCATION_CHOICES, default = DENTAL_LOCATION_TOP)
code = models.ForeignKey(DentalCDT)
DENTAL_STATE_NONE = 'n'
DENTAL_STATE_UNTREATED = 'u'
DENTAL_STATE_TREATED = 't'
DENTAL_STATE_OTHER = 'o'
DENTAL_STATE_MISSING = 'm'
DENTAL_STATE_CHOICES = ((DENTAL_STATE_MISSING, "missing"), (DENTAL_STATE_NONE, "none"), (DENTAL_STATE_UNTREATED, "untreated"), (DENTAL_STATE_TREATED, "treated"), (DENTAL_STATE_OTHER, "other"))
state = models.CharField(max_length = 1, choices = DENTAL_STATE_CHOICES, default = DENTAL_STATE_NONE)
DENTAL_SURFACE_NONE = 'n'
DENTAL_SURFACE_BUCCAL = 'b'
DENTAL_SURFACE_LINGUAL = 'u'
DENTAL_SURFACE_MESIAL = 'm'
DENTAL_SURFACE_OCCLUSAL = 'c'
DENTAL_SURFACE_LABIAL = 'a'
DENTAL_SURFACE_INCISAL = 'i'
DENTAL_SURFACE_WHOLE_MOUTH_OR_VISIT = 'w'
DENTAL_SURFACE_OTHER = 'o'
DENTAL_SURFACE_CHOICES = ((DENTAL_SURFACE_NONE, "none"), (DENTAL_SURFACE_BUCCAL, "buccal"), (DENTAL_SURFACE_LINGUAL, "lingual"), (DENTAL_SURFACE_MESIAL, "mesial"), (DENTAL_SURFACE_OCCLUSAL, 'occlusal'), (DENTAL_SURFACE_LABIAL, 'labial'), (DENTAL_SURFACE_INCISAL, 'incisal'), (DENTAL_SURFACE_WHOLE_MOUTH_OR_VISIT, 'whole_mouth_or_visit'), (DENTAL_SURFACE_OTHER, 'other'))
# here we define a charfield as a string to hold a set of surfaces
# this won't work with forms, but since we are just a REST API, doesn't
# matter much. The DENTAL_STATE_CHOICES tuple will be useful as we
# serialize/unserialize values between the client and the model. We
# could also have done this as an integer bitmask, but a string of chars
# facilitates debugging.
surface = models.CharField(max_length = 10, choices = DENTAL_SURFACE_CHOICES, default = DENTAL_SURFACE_NONE)
comment = models.TextField(default = "")
|
[
"slogan621@gmail.com"
] |
slogan621@gmail.com
|
a650fcc83f32dd0898f953ec683b1b54eb77b733
|
233f97c6f360d478bf975016dd9e9c2be4a64adb
|
/guvi3.py
|
6dd143242eb16cf5b6ec3091f1ddba172fd1f82f
|
[] |
no_license
|
unknownboyy/GUVI
|
3dbd1bb2bc6b3db52f5f79491accd6c56a2dec45
|
d757dd473c4f5eef526a516cf64a1757eb235869
|
refs/heads/master
| 2020-03-27T00:07:12.449280
| 2019-03-19T12:57:03
| 2019-03-19T12:57:03
| 145,595,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
def check(n):
count = 0
for i in str(n):
count+=int(i)
if str(count)[0]=='1':
return count
else:
return False
n = int(input())
l = [8]
c = 0
diff = 2
curr = 800
while curr+diff<=n:
curr+=diff
w = check(curr)
if w!=False:
l.append(w)
diff+=2
c+=1
print(*l)
print(c)
|
[
"ankitagrawal11b@gmail.com"
] |
ankitagrawal11b@gmail.com
|
24ebdd333e00edb3f74ccd4677e9ab43d5c096e3
|
c03d7a4e03c581d4be98b6363003cddb9c213ec0
|
/pets/migrations/0007_auto_20180910_0016.py
|
6228879999e3df790cc687d09ad854b059402325
|
[] |
no_license
|
hernandavidc/plataforma
|
b333e4f06290713072d8dc609c27d4ce8af1d9df
|
4316e2a59db76e74f1e6106958631ad4a7a653c7
|
refs/heads/master
| 2020-04-06T17:08:21.019355
| 2019-04-09T04:41:00
| 2019-04-09T04:41:00
| 157,648,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
# Generated by Django 2.1 on 2018-09-10 05:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pets', '0006_auto_20180910_0011'),
]
operations = [
migrations.RemoveField(
model_name='mascota',
name='dueno',
),
migrations.AddField(
model_name='mascota',
name='dueno',
field=models.ForeignKey(default=3, on_delete=django.db.models.deletion.PROTECT, related_name='get_pets', to=settings.AUTH_USER_MODEL, verbose_name='Dueños'),
preserve_default=False,
),
]
|
[
"hernandavidc@hotmail.com"
] |
hernandavidc@hotmail.com
|
a3eefa3f23a8dfe00c158170d73f421c29d1e373
|
c79737296bdf4b3a969ab5ceb69198daf66def0e
|
/python/solutii/bogdan_iacoboae/caesar/caesar.py
|
315bde89ddbea8afd9d78e0152861ba4b9c51fa0
|
[
"MIT"
] |
permissive
|
ilieandrei98/labs
|
96c749072b6455b34dc5f0bd3bb20f7a0e95b706
|
cda09cbf5352e88909f51546c2eb360e1ff2bec1
|
refs/heads/master
| 2020-04-26T03:23:48.220151
| 2019-03-01T08:56:43
| 2019-03-01T08:56:43
| 173,265,757
| 0
| 0
|
MIT
| 2019-03-01T08:37:14
| 2019-03-01T08:37:14
| null |
UTF-8
|
Python
| false
| false
| 1,939
|
py
|
# coding=utf-8
# from __future__ import print_function
"""Împăratul a primit serie de mesaje importante pe care este
important să le descifreze cât mai repede.
Din păcate mesagerul nu a apucat să îi spună împăratul care au fost
cheile alese pentru fiecare mesaj și tu ai fost ales să descifrezi
misterul.
Informații:
În criptografie, cifrul lui Caesar este o metodă simplă de a cripta
un mesaj prin înlocuirea fiecărei litere cu litera de pe poziția aflată
la un n pași de ea în alfabet (unde este n este un număr întreg cunoscut
"""
def afla_pasul(mesaj):
""" Afla pasul encodarii """
first_letter = 'a'
my_letter = mesaj[0]
return ord(my_letter) - ord(first_letter)
def real_letter(character, key):
""" Afla caracterul """
if character.isalpha():
character = ord(character)-key
if character < ord('a'):
character = ord('z') - abs(ord('a') - character) + 1
return chr(character)
else:
return character
def decripteaza_mesajul(mesaj, fisier):
""" Decriptarea mesajului """
key = afla_pasul(mesaj)
puncte = 0.
for index in mesaj:
if index == ".":
if puncte == 1:
print ".\n"
fisier.write("\n")
else:
puncte = puncte + 1
print ".",
fisier.write(".")
else:
print real_letter(index, key),
fisier.write(real_letter(index, key))
def main():
""" Main function docstring """
try:
fisier = open("../../../date_intrare/mesaje.secret", "r")
towrite = open("../../../date_iesire/mesaje.decodat", "w")
mesaje = fisier.read()
fisier.close()
except IOError:
print "Nu am putut obtine mesajele."
return
for mesaj in mesaje.splitlines():
decripteaza_mesajul(mesaj, towrite)
if __name__ == "__main__":
main()
|
[
"mmicu@cloudbasesolutions.com"
] |
mmicu@cloudbasesolutions.com
|
989528ac7820dca22e21aec571ce43ed89e4c1a0
|
e3e3071e5f01f75ba3716ac229abef484e8c051a
|
/mnist.py
|
f9639c963cb10a4bdcfc9a82659ccfe73a01289c
|
[] |
no_license
|
schmit/dictlearning
|
9efc9e15e73a99f840db71d81925dbe7c0bd22d0
|
14c37631aa4d330d58fc174b2294866e2484d5d0
|
refs/heads/master
| 2021-01-10T04:06:33.899917
| 2013-03-15T18:40:33
| 2013-03-15T18:40:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,943
|
py
|
import numpy as np
import scipy.io as sio
import dictionary
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from multiOGD import *
from kernels import *
import sys
import argparse
import utility
sys.stdout = utility.Logger()
print 'Starting run of MNIST.py'
parser = argparse.ArgumentParser(description=\
'MNIST: Encode sparse dictionary and fit model')
parser.add_argument('dict_fit',\
help="model for fitting dictionary (linreg, lasso, lars)")
parser.add_argument('dict_init',\
help='initialization of dictionary')
parser.add_argument('dict_atoms',\
help='nr of atoms in dictionary')
parser.add_argument('dict_reg',\
help='regularization in sparse encoding')
parser.add_argument('mod_reg', \
help='regularization svm fit')
params = parser.parse_args(sys.argv[1:])
DICT_FIT = params.dict_fit
DICT_INIT = params.dict_init
DICT_ATOMS = int(params.dict_atoms)
DICT_REG = float(params.dict_reg)
MOD_REG = float(params.mod_reg)
print params
def showimage(x):
img = np.reshape(x, (28, 28), order = 'F')
imgplot = plt.imshow(img)
plt.show()
mnist_train = sio.loadmat('./data/mnist/MNIST_train.mat')
mnist_test = sio.loadmat('./data/mnist/MNIST_test.mat')
X_train = mnist_train['X'][0][0][2].transpose()
y_train = mnist_train['y']
X_test = mnist_test['Xtest'].transpose()
y_test = mnist_test['ytest']
dim = X_train.shape[1]
## Dictionary
lasso_d = dictionary.Dictionary(dim, DICT_ATOMS, DICT_FIT, DICT_REG, \
DICT_INIT)
lasso_d.batchtrain(X_train
# Save dictionary atoms as images
#lasso_d.dimagesave((28, 28), 'mnist')
# Find reconstructions
alphas_train = lasso_d.batchreconstruction(X_train, \
'mnist_train_s')
alphas_test = lasso_d.batchreconstruction(X_test, \
'mnist_test_s')
## Classification
ogd_m = multiOGD(10, DICT_ATOMS, MOD_REG)
ogd_m.train(alphas_train, y_train)
ogd_m.predict(alphas_test, y_test)
print 'Run of MNIST.py is complete!'
'''
Atoms: 200
Reg: 0.05 too much
'''
|
[
"schmit@stanford.edu"
] |
schmit@stanford.edu
|
a98677c79904384ea4f9182f45560317822060b0
|
1eb50735e3861cde4bca8f4feab5afc730003078
|
/future/flags_threadpool.py
|
68c2812cf5337155961672ac7f2d7ec0945eca02
|
[] |
no_license
|
chinaylssly/fluent-python
|
442e6458215e3c5a74c4d34d020b714da108f81d
|
126c1d3e7853628c4a2c0e6ff475362b7d7fe33a
|
refs/heads/master
| 2020-04-17T13:58:03.534184
| 2019-02-01T14:40:42
| 2019-02-01T14:40:42
| 166,637,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
# -*- coding: utf-8 -*-
import random,sys
from time import sleep,time
from concurrent import futures
MAX_WORKERS= 20
tl=[i*0.01 for i in range(20)]
def do_one(t=0.2):
# print (t)
sleep(t)
return t
def do_many(tl=tl):
workers=min(len(tl),MAX_WORKERS)
with futures.ThreadPoolExecutor(workers) as executor:
'''
executor.__exit__()方法会调用executor.shutdown(wait=True)方法,它会在所有的线程都执行完毕前阻塞线程
'''
res=executor.map(do_one,tl)
return len(list(res))
##返回获取结果的数量,如果有线程抛出异常,异常会在这里抛出,这与隐式调用next()函数从迭代器中回去相应的返回值一样
def main(do_many=do_many):
t0=time()
count=do_many()
t=time()-t0
msg='execute {:2d} task cost {:.2f} s'
print (msg.format(count,t))
if __name__ =='__main__':
main()
|
[
"chinaylssly@qq.com"
] |
chinaylssly@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.