blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13d5ff501856936d48c7481261ed51a44a688cff
|
faf005f0a01920f38ef9971e21e97d39da19e6ee
|
/experiments/rgb_pretrainedFlownet_laplace_ep400/params.py
|
8f9b70e839a9a7db00a14330987eafc9efe8d7a6
|
[] |
no_license
|
CORaisch/DeepVO-pytorch
|
8524040042225e4b1c406ef343feeb89ad3db15f
|
7ac019cc60971dd45840ab1bab38a6e516afb429
|
refs/heads/master
| 2020-12-11T11:47:41.568603
| 2020-08-24T11:11:36
| 2020-08-24T11:11:36
| 233,840,415
| 0
| 0
| null | 2020-01-14T12:50:14
| 2020-01-14T12:50:13
| null |
UTF-8
|
Python
| false
| false
| 6,743
|
py
|
import os
class Parameters():
def __init__(self, unpack_to = None):
self.n_processors = 4
# Path
self.data_dir = '/media/claudio/1AC5-C2D4/Datasets/KITTI/DeepVO-pytorch/'
self.image_dir = os.path.join(self.data_dir, 'images/')
self.pose_dir = os.path.join(self.data_dir, 'poses_gt/')
self.train_video = ['00', '01', '02', '05', '08', '09']
self.valid_video = ['04', '06', '07', '10']
self.partition = None # partition videos in 'train_video' to train / valid dataset #0.8
# Data Preprocessing
self.grayscale = False # specifiy if grayscale images should be used for training/testing
self.laplace_preprocessing = True # enable laplace preprocessing NOTE instead of normalizing the inputs with mean and std images will be laplace filtered
self.resize_mode = 'rescale' # choice: 'crop' 'rescale' None
self.img_w = 608 # original size is about 1226
self.img_h = 184 # original size is about 370
# means and std for kitti rgb image sequence [00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10]
self.img_means = (-0.15116102640573548, -0.1322411015338543, -0.13887598313286317)
self.img_stds = (0.31308950448998596, 0.3176070324487968, 0.3232656266278995)
# means and std for kitti grayscale image sequence [00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10]
# self.img_means = (-0.14046805191159092, -0.14046805191159092, -0.14046805191159092)
# self.img_stds = (0.3128824310845576, 0.3128824310845576, 0.3128824310845576)
self.minus_point_5 = True
self.seq_len = (5, 7)
self.sample_times = 3
# Data info path
self.train_data_info_path = 'datainfo/train_df_t{}_v{}_p{}_seq{}x{}_sample{}.pickle'.format(''.join(self.train_video), ''.join(self.valid_video), self.partition, self.seq_len[0], self.seq_len[1], self.sample_times)
self.valid_data_info_path = 'datainfo/valid_df_t{}_v{}_p{}_seq{}x{}_sample{}.pickle'.format(''.join(self.train_video), ''.join(self.valid_video), self.partition, self.seq_len[0], self.seq_len[1], self.sample_times)
# Model
self.rnn_hidden_size = 1000
self.conv_dropout = (0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.5)
self.rnn_dropout_out = 0.5
self.rnn_dropout_between = 0 # 0: no dropout
self.clip = None
self.batch_norm = True
# Training
self.epochs = 400
self.batch_size = 8
self.pin_mem = True
self.optim = {'opt': 'Adagrad', 'lr': 0.0005}
# Choice:
# {'opt': 'Adagrad', 'lr': 0.001}
# {'opt': 'Adam'}
# {'opt': 'Cosine', 'T': 100 , 'lr': 0.001}
# Pretrain, Resume training
# self.pretrained_flownet = None
self.pretrained_flownet = 'pretrained/flownets_EPE1.951.pth.tar'
# Choice:
# None
# './pretrained/flownets_bn_EPE2.459.pth.tar'
# './pretrained/flownets_EPE1.951.pth.tar'
# self.resume = True # resume training
self.resume = False
self.resume_t_or_v = '.train'
self.experiment_name = '/rgb_pretrainedFlownet_laplace_ep400/'
self.load_model_path = 'models{}t{}_v{}_im{}x{}_s{}x{}_b{}_rnn{}_{}.model{}'.format(self.experiment_name, ''.join(self.train_video), ''.join(self.valid_video), self.img_h, self.img_w, self.seq_len[0], self.seq_len[1], self.batch_size, self.rnn_hidden_size, '_'.join([k+str(v) for k, v in self.optim.items()]), self.resume_t_or_v)
self.load_optimizer_path = 'models{}t{}_v{}_im{}x{}_s{}x{}_b{}_rnn{}_{}.optimizer{}'.format(self.experiment_name, ''.join(self.train_video), ''.join(self.valid_video), self.img_h, self.img_w, self.seq_len[0], self.seq_len[1], self.batch_size, self.rnn_hidden_size, '_'.join([k+str(v) for k, v in self.optim.items()]), self.resume_t_or_v)
self.record_path = 'records{}t{}_v{}_im{}x{}_s{}x{}_b{}_rnn{}_{}.txt'.format(self.experiment_name, ''.join(self.train_video), ''.join(self.valid_video), self.img_h, self.img_w, self.seq_len[0], self.seq_len[1], self.batch_size, self.rnn_hidden_size, '_'.join([k+str(v) for k, v in self.optim.items()]))
self.save_model_path = 'models{}t{}_v{}_im{}x{}_s{}x{}_b{}_rnn{}_{}.model'.format(self.experiment_name, ''.join(self.train_video), ''.join(self.valid_video), self.img_h, self.img_w, self.seq_len[0], self.seq_len[1], self.batch_size, self.rnn_hidden_size, '_'.join([k+str(v) for k, v in self.optim.items()]))
self.save_optimzer_path = 'models{}t{}_v{}_im{}x{}_s{}x{}_b{}_rnn{}_{}.optimizer'.format(self.experiment_name, ''.join(self.train_video), ''.join(self.valid_video), self.img_h, self.img_w, self.seq_len[0], self.seq_len[1], self.batch_size, self.rnn_hidden_size, '_'.join([k+str(v) for k, v in self.optim.items()]))
self.results_dir = 'results{}'.format(self.experiment_name)
if not os.path.isdir(os.path.dirname(self.record_path)):
os.makedirs(os.path.dirname(self.record_path))
if not os.path.isdir(os.path.dirname(self.save_model_path)):
os.makedirs(os.path.dirname(self.save_model_path))
if not os.path.isdir(os.path.dirname(self.save_optimzer_path)):
os.makedirs(os.path.dirname(self.save_optimzer_path))
if not os.path.isdir(os.path.dirname(self.train_data_info_path)):
os.makedirs(os.path.dirname(self.train_data_info_path))
def set_remote_dir(self, remote_dir):
if remote_dir:
self.data_dir = remote_dir
self.image_dir = os.path.join(self.data_dir, 'images/')
self.pose_dir = os.path.join(self.data_dir, 'poses_gt/')
def set_home_dir(self, home_dir):
if home_dir:
self.train_data_info_path = os.path.join(home_dir, self.train_data_info_path)
self.valid_data_info_path = os.path.join(home_dir, self.valid_data_info_path)
if self.pretrained_flownet:
self.pretrained_flownet = os.path.join(home_dir, self.pretrained_flownet)
self.load_model_path = os.path.join(home_dir, self.load_model_path)
self.load_optimizer_path = os.path.join(home_dir, self.load_optimizer_path)
self.record_path = os.path.join(home_dir, self.record_path)
self.save_model_path = os.path.join(home_dir, self.save_model_path)
self.save_optimzer_path = os.path.join(home_dir, self.save_optimzer_path)
self.results_dir = os.path.join(home_dir, self.results_dir)
def set_resume(self, val):
self.resume = val
par = Parameters()
|
[
"claudio.raisch@hotmail.de"
] |
claudio.raisch@hotmail.de
|
94db2d21c6ea193c15fc2e11340acffb4bf510df
|
f7ded947c1ba3aed5e4f8c32a14d26ca66ad837a
|
/cogs/games/instances.py
|
df416d10e0347f715234188f6126cf1174988be9
|
[] |
no_license
|
Project-Cerebrus/cerebrus-v2
|
307a5d881d404d76b805f7ffeaa4a35363078d46
|
214f9dbbf7387f91f5bc6ec32c2bbab954344af4
|
refs/heads/master
| 2023-07-02T14:17:42.381524
| 2021-08-04T05:16:33
| 2021-08-04T05:16:33
| 379,460,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
import discord, random
from discord.ext import commands
import os
import subprocess
import asyncio
import pickle
devs = ['775198018441838642', '750755612505407530', '746904488396324864']
try:
duckusers = pickle.load(open("static/duckusers/duckbuck.dat","rb"))
except FileNotFoundError:
print("run _instance <action> <instance> to start instance module")
duckbuck = []
def addduckuser(id):
duckbuck.append(id)
duckusers.append(id)
os.system("rm -rf static/duckbuck.dat")
pickle.dump(duckbuck,open("static/duckusers/duckbuck.dat", "wb"))
pickle.dump(duckusers,open("static/duckusers/duckbuck.dat", "wb"))
return
class instances(commands.Cog, name='Instances'):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["instance"])
async def instances(self,ctx,action=None,instance=None):
if action == "add":
if instance != None:
if instance == "duckbucks":
msg = await ctx.send(f"adding {ctx.author.mention} to duckbucks instance...")
addduckuser(ctx.author.id)
msg.edit(f"successfully added {ctx.author.mention} to {instance} instance\n type `_dhelp` to get started")
def setup(bot):
bot.add_cog(instances(bot))
|
[
"57133270+Eris9@users.noreply.github.com"
] |
57133270+Eris9@users.noreply.github.com
|
4faf8ce733700a649ba464af173b8566c1d2ca73
|
e670abf195fff3c4e6415775feb456b0cd38f42f
|
/Exercise.5.3.py
|
b6046d8cdb920973af1fade5fa32f025e2858095
|
[] |
no_license
|
jryan0004/pythonBasicProjects
|
eae0af9429d429b4618b98b8e2dc49899966e9d9
|
9125baa96bc55bd001cc12a4a980ad3ff7187394
|
refs/heads/master
| 2021-08-31T12:07:26.343708
| 2017-12-21T08:02:38
| 2017-12-21T08:02:38
| 114,514,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
def is_triangle(x ,y, z):
sum=x+z
if(sum<y):
print('No')
sum=y+x
if(sum<z):
print('No')
sum=y+z
if(sum<x):
print('No')
else:
print('Yes')
def prompts_user():
sideone=input('Please enter side one')
sidetwo=input('Please enter side two')
sidethree=input('Please enter side three')
sideone=int(sideone)
sidetwo=int(sidetwo)
sidethree=int(sidethree)
is_triangle(sideone,sidetwo,sidethree)
prompts_user()
|
[
"jryan0004@gmail.com"
] |
jryan0004@gmail.com
|
0b0ef11edb76922f627a568b5dc906cb5f7935dc
|
1de2ba9743e8b3f63f46c30bdeb57c65569e47e5
|
/train.py
|
abcfa6c7e9c6c1369a3dbb9d613454092104271f
|
[
"MIT"
] |
permissive
|
ZuchniakK/bw_imagenet
|
b74a1a5eefe27553d7410c4fafbe8224cfb6c7aa
|
37d4d90d8e1985f2b0c03e9b00077fef1522464a
|
refs/heads/main
| 2023-01-23T16:37:13.672517
| 2020-11-19T17:21:31
| 2020-11-19T17:21:31
| 314,097,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,155
|
py
|
import tensorflow as tf
import numpy as np
from os.path import join
import os
import sys
import json
from datasets_generator import BWImageNetDataGenerator
from custom_calbacks import BackupAndRestore
# DATA_DIRECTORY = '/net/scratch/datasets/AI/imagenet/data'
DATA_DIRECTORY = 'data'
MODEL_DIRECTORY = 'models'
MAX_TRAIN_EPOCH = 1000
EPOCH_PER_DATASET = 0.2
VALIDATION_SPLIT = 0.2
TRAIN_VAL_N_SAMPLES = 1281167
TEST_N_SAMPLES = 50000
def get_bw_model(model_name):
model_builder = getattr(tf.keras.applications, model_name)
model = model_builder(
include_top=True, weights=None, input_tensor=None, input_shape=None,
pooling=None, classes=1000)
input_shape = model.layers[0]._batch_input_shape
print(input_shape)
x, y = 0, 1
if input_shape[0] is None:
x += 1
y += 1
new_shape = (input_shape[x], input_shape[y], 1)
target_size = (input_shape[x], input_shape[y])
model = model_builder(
include_top=True, weights=None, input_tensor=None, input_shape=new_shape,
pooling=None, classes=1000)
print(model.summary())
return model, target_size
def train(model_name, batch_size):
model, target_size = get_bw_model(model_name)
bw_gen = BWImageNetDataGenerator(directory=DATA_DIRECTORY,
batch_size=batch_size,
target_size=target_size,
validation_split=VALIDATION_SPLIT)
model_dir = join(MODEL_DIRECTORY, model_name.lower() + '_' + str(batch_size))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# backup = tf.keras.callbacks.experimental.BackupAndRestore(join(model_dir, 'backup'))
backup = BackupAndRestore(join(model_dir, 'backup'))
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_lr=0.00005)
early_stoping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=30, restore_best_weights=True)
csv_logger = tf.keras.callbacks.CSVLogger(join(model_dir, 'train_log.csv'), append=True)
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=0.005),
metrics=['accuracy'])
model.fit(
x=bw_gen.train_flow(),
epochs=MAX_TRAIN_EPOCH,
callbacks=[backup, lr_reducer, early_stoping, csv_logger],
validation_data=bw_gen.val_flow(),
steps_per_epoch=int((TRAIN_VAL_N_SAMPLES * (1 - VALIDATION_SPLIT)) / batch_size * EPOCH_PER_DATASET),
validation_steps=int((TRAIN_VAL_N_SAMPLES * VALIDATION_SPLIT) / batch_size * EPOCH_PER_DATASET))
result = model.evaluate(
x=bw_gen.test_flow(),
steps=int(batch_size(TRAIN_VAL_N_SAMPLES * VALIDATION_SPLIT)),
return_dict=True)
with open(join(model_dir, 'evaluation.txt'), 'w') as file:
file.write(json.dumps(result)) # use `json.loads` to do the reverse
model.save(model_dir)
if __name__ == '__main__':
model_name = sys.argv[1]
batch_size = int(sys.argv[2])
train(model_name, batch_size=batch_size)
# 'ResNet50'
|
[
"konrad.zuchniak@gmail.com"
] |
konrad.zuchniak@gmail.com
|
8171f232b9307faed6cd6e928dd2c6672bcbc15a
|
2cd81d13739e7ae6b6e31a4ff96fb27a2f7621b5
|
/mbfmri/models/__init__.py
|
4df28220332af09c6d7eaeb24fb5232079fcc30e
|
[] |
no_license
|
CCS-Lab/project_model_based_fmri
|
5076afebd6140afe8577127e84c308f6bec2b50d
|
5b4b5cc301585d614458edf12053d34258512db6
|
refs/heads/main
| 2023-07-25T20:52:45.536368
| 2021-09-09T05:22:51
| 2021-09-09T05:22:51
| 289,861,786
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
__all__ = ["tf_cnn","tf_mlp","elasticnet", "mvpa_general", "tf_base"]
|
[
"cjfwndnsl@snu.ac.kr"
] |
cjfwndnsl@snu.ac.kr
|
df3574704ca6de6c81f9b6c51a22941941257d42
|
1424812c4f211d3d5e356e8b3889a689162062f3
|
/arcade/core/11_extra_number.py
|
22d5506bb65bd3268eccb335015a9603f73e2e72
|
[] |
no_license
|
nazomeku/codefights
|
cb7d3c40be0809695ec524a87c88dbebcf5b47bc
|
b23f6816f9b5b0720feac1c49c31163923e0a554
|
refs/heads/master
| 2021-01-22T12:49:35.905165
| 2017-11-21T19:03:37
| 2017-11-21T19:03:37
| 102,357,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
"""You're given three integers, a, b and c. It is guaranteed that two of these
integers are equal to each other. What is the value of the third integer?"""
def extra_number(a, b, c):
if a == b:
return c
elif b == c:
return a
else:
return b
|
[
"cols.nazo@gmail.com"
] |
cols.nazo@gmail.com
|
74d5a6d33f7179d09e915ed96b207a22933ba675
|
ad408c6244d3474d7631acbbf38476a16ff80730
|
/LaonSill/src/log/decodeHotLog.py
|
c56eca330d01e8124ebcc8d6eb657ffa2fe06e15
|
[
"Apache-2.0"
] |
permissive
|
alice965/LaonSillv2
|
e229cc9351bd4befb23a50820108f489125e18e7
|
826f664d5fd8a8625f2abfb9cd566e4f700c16d1
|
refs/heads/main
| 2023-02-16T16:57:06.699971
| 2020-11-30T06:44:43
| 2020-11-30T06:44:43
| 354,684,554
| 1
| 0
| null | 2021-04-05T01:25:49
| 2021-04-05T01:25:43
| null |
UTF-8
|
Python
| false
| false
| 5,858
|
py
|
#!/usr/bin/env python
"""decodeHotCode.py: """
import json
import sys
import os
import struct
def checkEventProperty(hotCodeDic, hotCode, propertyName):
if not propertyName in hotCodeDic[hotCode]:
print "ERROR: hotCode %s does not have %s property" % (hotCode, propertyName)
exit(-1)
# XXX: we only considers Linux 64bit platform.
# XXX: python does not support long double format..
typeFmtDic = {\
"UINT8" : "B", "INT8" : "b",\
"UINT16" : "H", "INT16" : "h",\
"UINT32" : "I", "INT32" : "i",\
"UINT64" : "Q", "INT64" : "q",\
"BOOL" : "?", "FLOAT" : "f",\
"DOUBLE" : "d", "LONGDOUBLE" : "d",\
}
def getValueSize(typeStr):
if typeStr in ["UINT8", "INT8"]:
return 1
elif typeStr in ["UINT16", "INT16"]:
return 2
elif typeStr in ["UINT32", "INT32"]:
return 4
elif typeStr in ["UINT64", "INT64"]:
return 8
elif typeStr in ["FLOAT"]:
return 4
elif typeStr in ["DOUBLE"]:
return 8
elif typeStr in ["LONGDOUBLE"]:
return 16
elif typeStr in ["BOOL"]:
return 1
return 0
def decodeFile(srcFileName, hotCodeDic):
try:
srcFile = open(srcFileName, 'rb')
eventCount = 0
print '[ decode ' + srcFileName + ' starts ]'
print "================================================"
while True:
chunk = srcFile.read(4)
if chunk == '':
break
codeId = struct.unpack('i', chunk)[0]
if codeId == 0:
chunk = srcFile.read(4)
failCount = struct.unpack('i', chunk)[0]
print "================================================"
print " - event count=%d" % eventCount
print " - fail count=%d" % failCount
print "================================================\n"
break
eventCount = eventCount + 1
hotCode = str(codeId)
if hotCode not in hotCodeDic:
print 'ERROR: hotcode (%s) is not defined in hotCodeDic' % hotCode
exit(-1)
paramList = hotCodeDic[hotCode]['ARGS']
paramSize = 0
t = ()
for param in paramList:
arrayString = ""
foundNull = False
if param in typeFmtDic:
paramSize = int(getValueSize(param))
fmt = typeFmtDic[param]
elif "CHAR" in param:
# XXX: needs error-check
arrayCount = int(param.replace(")", "$").replace("(", "$").split("$")[1])
paramSize = arrayCount
fmt = '%ds' % paramSize
else:
print "ERROR: invalid hotCode type(%s) for hotCode(%s)" % (param, hotCode)
exit(-1)
chunk = srcFile.read(paramSize)
if chunk == '':
print 'ERROR: data is truncated'
exit(-1)
# change fmt if there is '\0' middle of chunk
if "CHAR" in param:
nullOffset = 0
for char in chunk:
if char == '\0':
fmt = '%ds' % nullOffset
foundNull = True
break
nullOffset = nullOffset + 1
if foundNull == True:
t = t + struct.unpack(fmt, chunk[:nullOffset])
else:
t = t + struct.unpack(fmt, chunk)
print hotCodeDic[hotCode]['FMT'] % t
except Exception as e:
print str(e)
exit(-1)
finally:
srcFile.close()
def printUsage():
print "USAGE: ./decodeHotLog hotCodeDefFilePath hotLogTopDir pid"
print "USAGE: ./decodeHotLog hotCodeDefFilePath hotLogTopDir pid tid"
exit(0)
# (1) parsing argument
try:
if len(sys.argv) < 4:
printUsage()
elif len(sys.argv) == 4:
defFilePath = sys.argv[1]
hotLogDir = sys.argv[2]
pid = int(sys.argv[3])
tid = -1
elif len(sys.argv) == 5:
defFilePath = sys.argv[1]
hotLogDir = sys.argv[2]
pid = int(sys.argv[3])
tid = int(sys.argv[4])
else:
printUsage()
except Exception as e:
print str(e)
exit(-1)
# (2) loading hotCodeDef json file into hotCodeDic
try:
jsonFile = open(defFilePath, 'r')
hotCodeDic = json.load(jsonFile)
for hotCode in hotCodeDic:
checkEventProperty(hotCodeDic, hotCode, "FMT")
checkEventProperty(hotCodeDic, hotCode, "ARGS")
except Exception as e:
print str(e)
exit(-1)
finally:
jsonFile.close()
# (3) search target file(s)
# XXX: deep in depth
try:
targetFilePathList = []
if tid == -1:
pidFilePrefixName = 'hot.%d.' % pid
pidFilePrefixNameLen = len(pidFilePrefixName)
for searchPath, searchDirs, searchFiles in os.walk(hotLogDir):
if searchFiles:
for searchFile in searchFiles:
if pidFilePrefixName == searchFile[:pidFilePrefixNameLen]:
targetFilePathList.append(os.path.join(searchPath, searchFile))
else:
pidTidFileName = 'hot.%d.%d' % (pid, tid)
for searchPath, searchDirs, searchFiles in os.walk(hotLogDir):
if searchFiles:
for searchFile in searchFiles:
if pidTidFileName == searchFile:
targetFilePathList.append(os.path.join(searchPath, searchFile))
except Exception as e:
print str(e)
exit(-1)
# (4) decode file(s)
for targetFilePath in targetFilePathList:
print 'target file : ' + targetFilePath
decodeFile(targetFilePath, hotCodeDic)
|
[
"12prog@naver.com"
] |
12prog@naver.com
|
9e6f6d233693b155fa65d04a75733acc84d9bc0a
|
24b874d65c92487cf70bbb64d60b8f3979adb6eb
|
/app/user/views.py
|
784cf564da7335889f411ce23e952da9db5dfc03
|
[
"MIT"
] |
permissive
|
JOsorio01/recipe-app-api
|
7e461ee535cb59ef1f7084e623d80873da4501ed
|
4ea9f8f178bf4f76c11553b0f328791f847ff91c
|
refs/heads/main
| 2021-07-02T19:13:53.064450
| 2021-05-10T19:17:20
| 2021-05-10T19:17:20
| 233,429,653
| 0
| 0
|
MIT
| 2020-01-12T19:20:22
| 2020-01-12T17:20:51
|
Python
|
UTF-8
|
Python
| false
| false
| 873
|
py
|
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework import generics, authentication, permissions
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
return self.request.user
|
[
"joseosorio001@outlook.com"
] |
joseosorio001@outlook.com
|
d8d386e51d85e26f35ba5f58f8c383ce5dfd47cc
|
4fa1af506bca9d3f5b66bad379c49ec8b7f6e3a2
|
/src/meas_daily_roc.py
|
530d80ec477254004660a7f50f433e0a86626eb1
|
[] |
no_license
|
SutronPyto/LinkPython
|
9e31dc9820fdbf678ac1bf7b62b828b32a184d6d
|
a7128da73012c0ba5d91133ab551bbadd5e704f2
|
refs/heads/master
| 2023-05-13T19:14:27.135039
| 2023-05-05T18:37:15
| 2023-05-05T18:37:15
| 146,764,795
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,020
|
py
|
# Example: Rain during the last 24 hours, Rate of change measurements
from sl3 import *
@MEASUREMENT
def precip_last_24_hours(inval):
"""
Computes rainfall during the last 24 hours.
If called at 5PM today, it tells you how much rain fell since 5PM yesterday.
If called at 8:15AM today, gives rain since 8:15AM yesterday.
* Reads the current precip accumulation.
* Reads the precip accumulation from 24 hours ago.
* Computes result based on difference.
Must have a measurement setup to log precip accumulation on same schedule
as this measurement. It must have a specific label (see below).
Here is the required setup (interval may be adjusted)::
!M2 Active=On
!M2 Label=RAIN ACCU
!M2 Meas Type=Precip Accumulation
!M2 Accumulation Reset=Off
!M2 Meas Interval=00:15:00
!M3 Active=On
!M3 Label=RAIN 24h
!M3 Meas Type=Manual Entry
!M3 Meas Interval=00:15:00
!M3 Use Script=On
!M3 Script Function=precip_last_24_hours
:param inval: this value is ignored
:return: precip during last 24 hours
:rtype: float
"""
# the precip accumulation measurement must have this label:
precip_accu_label = "RAIN ACCU"
# what index is the precip accumulation measurement?
precip_accu_index = meas_as_index(precip_accu_label)
# current reading of precip accumulation
precip_current = measure(precip_accu_index)
# compute previous time based on precip reading's timestamp
# routine is made for 24 hours, but any interval could be used
time_24_hours_ago = precip_current.time - 24 * 60 * 60
# Read the log, starting with the newest precip reading
# and going backwards until we find the oldest reading within the time bounds.
# That allows us to produce a result before the first 24 hours pass.
oldest_reading = Reading(value=0.0)
try:
logthing = Log(oldest=time_24_hours_ago,
newest=precip_current.time,
match=precip_accu_label,
pos=LOG_NEWEST)
for itero in logthing:
oldest_reading = itero
except LogAccessError:
print('No logged readings found. Normal until recording starts.')
return 0.0
rain_24_hour = precip_current.value - oldest_reading.value
if rain_24_hour < 0.0:
# If the difference is negative, precip accumulation has been reset.
# Use the current precip accumulation value as the 24 hour value
rain_24_hour = precip_current.value
return rain_24_hour
def differential_reading(meas_label, period_sec, allow_negative):
"""
Computes the difference between the most recent reading of the specified measurement,
and an older reading of the same measurement.
Routine reads the log looking for the older reading.
This is a generic version of the precip_last_24_hours routine.
:param meas_label: the label of the measurement in question
:type meas_label: str
:param period_sec: how long ago the old reading was made in seconds
:type period_sec: int
:param allow_negative: should a negative difference be allowed? set to False for rain accumulation
:type allow_negative: bool
:return: the difference between the two readings
:rtype: float
"""
# current reading
current = measure(meas_as_index(meas_label))
# compute previous time based on current reading and period_sec
time_old = current.time - period_sec
# Read the log, looking for the measurement starting with the newest
# and going backwards until we find the oldest reading within the time bounds.
oldest_reading = Reading(value=0.0)
try:
logthing = Log(oldest=time_old,
newest=current.time,
match=meas_label,
pos=LOG_NEWEST)
for itero in logthing:
oldest_reading = itero
except LogAccessError:
print('No logged readings found. Normal until recording starts.')
return 0.0
# if both readings are valid, compute the difference
if (current.quality == 'G') and (oldest_reading.quality == 'G'):
result = current.value - oldest_reading.value
if (result < 0.0) and (not allow_negative):
# If the difference is negative, the measurement has been reset.
print('Negative change not allowed')
return current.value
else:
print('Change computed successfully')
return result
else:
print('Readings were not valid')
return 0.0
@MEASUREMENT
def precip_last_hour(inval):
"""
Computes the precipitation during the last hour.
Please see precip_last_24_hours.
This example uses differential_reading.
Another measurement labeled RAIN ACCU must be recording precip accumulation.
"""
return differential_reading("RAIN ACCU", 3600, False) # 3600 sec = 1 hour. False means no negative readings.
def rate_of_change_2meas_setup(meas_index_or_label):
"""
Computes the change between the current and the previous readings.
This script needs to reference another measurement which logs sensor readings.
Please note that it takes two measurements to compute rate of change.
The first measurement needs to log the sensor values.
The second measurement will compute the difference of two sensor values.
"""
# find the index of the measurement
meas_index = meas_as_index(meas_index_or_label)
# find out this measurement's interval
interval_text = setup_read("M{} Meas Interval".format(meas_index))
interval_sec = sl3_hms_to_seconds(interval_text)
meas_label = meas_find_label(meas_index)
# Find the difference in two readings. True means allow negative change.
change = differential_reading(meas_label, interval_sec, True)
return change
@MEASUREMENT
def roc_m1(inval):
"""
Computes rate of change for measurement M1.
This script must be associated with a measurement other than M1.
"""
return rate_of_change_2meas_setup(1)
""" The variables below are used to compute the rate of change"""
roc_valid = False
roc_previous = 0.0
@MEASUREMENT
def rate_of_change_1meas_setup(inval):
"""
Computes rate of change for the measurement setup with this script.
This never logs the value of the sensor. Instead, it remembers the sensor
reading and uses it the next time it computes the rate of change.
"""
global roc_valid
global roc_previous
# If we have the previous reading, compute the rate of change.
# If not, return zero.
if roc_valid:
result = inval - roc_previous
else:
result = 0.0
roc_valid = True
# Remember the current value. It gets used next measurement cycle.
roc_previous = inval
return result
def measurement_previous():
"""
Gets the previous measurement from the log using the measurement schedule
Must be called by an @MEASUREMENT function
Make sure to check the quality of the returned reading!
:return: previously logged measurement
:rtype: Reading
"""
# find the previous reading of this measurement in the log
time_previous = time_scheduled() - 1 # anything older than current reading
meas_label = meas_find_label(index())
# find out this measurement's interval to compute time of previous
interval_text = setup_read("M{} Meas Interval".format(index()))
interval_sec = sl3_hms_to_seconds(interval_text)
try:
previous_reading = Log(
oldest=time_previous - interval_sec,
newest=time_previous,
match=meas_label).get_newest()
print("got one from log")
return previous_reading
except LogAccessError:
# could not find it. create a bad reading
print("did not find one")
return Reading(time=time_previous, label=meas_label, value=-999.0, quality="B")
|
[
"adi@sutron.com"
] |
adi@sutron.com
|
a9143811595cdaa7b4a3a7168623fb0ccda77175
|
859b2f7c2426044c9d049e3cbf94907981fa6d33
|
/products/migrations/0002_offer.py
|
79ae44d253e9619baa14f089fd2245885f212a6a
|
[] |
no_license
|
sjortiz/PyShop
|
984908c2003b2cba2bf0f74a251c7004c3086c3f
|
4b9656b39051931be453419bf9e589a70f1235d1
|
refs/heads/master
| 2022-04-25T13:30:56.194261
| 2020-04-01T17:07:39
| 2020-04-01T17:07:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
# Generated by Django 2.1 on 2020-03-29 16:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('discount', models.FloatField()),
],
),
]
|
[
"paola.rom.d@gmail.com"
] |
paola.rom.d@gmail.com
|
7e8e9f7f360955e59b325d5aecb3d2f81b94fe51
|
9b41bd4d829b7b4b5fc7ea2f375089793f34beb0
|
/lib/googlecloudsdk/core/exceptions.py
|
5583df4ffd56b139284ece43063e8bfba35a3ac8
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
eyalev/gcloud
|
20a596f9cbf7873eaea652a0b2ad080678f1598c
|
421ee63a0a6d90a097e8530d53a6df5b905a0205
|
refs/heads/master
| 2020-12-25T14:48:11.142544
| 2016-06-22T08:43:20
| 2016-06-22T08:43:20
| 61,703,392
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,161
|
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base exceptions for the Cloud SDK."""
import os
from googlecloudsdk.core.util import platforms
class _Error(Exception):
"""A base exception for all Cloud SDK errors.
This exception should not be used directly.
"""
pass
class InternalError(_Error):
"""A base class for all non-recoverable internal errors."""
pass
class Error(_Error):
"""A base exception for all user recoverable errors.
Any exception that extends this class will not be printed with a stack trace
when running from CLI mode. Instead it will be shows with a message of how
the user can correct this problem.
All exceptions of this type must have a message for the user.
"""
def __init__(self, *args, **kwargs):
"""Initialize a core.Error.
Args:
*args: positional args for exceptions.
**kwargs: keyword args for exceptions, and additional arguments:
- exit_code: int, The desired exit code for the CLI.
"""
super(Error, self).__init__(*args)
self.exit_code = kwargs.get('exit_code', 1)
class RequiresAdminRightsError(Error):
"""An exception for when you don't have permission to modify the SDK.
This tells the user how to run their command with administrator rights so that
they can perform the operation.
"""
def __init__(self, sdk_root):
message = (
'You cannot perform this action because you do not have permission '
'to modify the Google Cloud SDK installation directory [{root}].\n\n'
.format(root=sdk_root))
if (platforms.OperatingSystem.Current() ==
platforms.OperatingSystem.WINDOWS):
message += (
'Click the Google Cloud SDK Shell icon and re-run the command in '
'that window, or re-run the command with elevated privileges by '
'right-clicking cmd.exe and selecting "Run as Administrator".')
else:
# Specify the full path because sudo often uses secure_path and won't
# respect the user's $PATH settings.
gcloud_path = os.path.join(sdk_root, 'bin', 'gcloud')
message += (
'Re-run the command with sudo: sudo {0} ...'.format(gcloud_path))
super(RequiresAdminRightsError, self).__init__(message)
class NetworkIssueError(Error):
"""An error to wrap a general network issue."""
def __init__(self, message):
super(NetworkIssueError, self).__init__(
'{message}\n'
'This may be due to network connectivity issues. Please check your '
'network settings, and the status of the service you are trying to '
'reach.'.format(message=message))
|
[
"eyalev@gmail.com"
] |
eyalev@gmail.com
|
462f4904a74ccfa7602b49f6f264443431f251d9
|
42d90670a17eca8d842f91064693d11cbbdf0055
|
/Day_19/main.py
|
7337b88bf86a60a0b8ff072daa0b9f673e0d916f
|
[] |
no_license
|
Smithwill1/100_Days_Of_Python
|
9b7448ed3a930806f7b32775ab9c2cfb34b6bfa0
|
1e7323b371fbff14252c1a16537f3b9708e861c1
|
refs/heads/main
| 2023-05-07T09:08:27.402304
| 2021-05-24T13:42:13
| 2021-05-24T13:42:13
| 359,936,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
from turtle import Turtle, Screen
bob = Turtle()
screen = Screen()
def move_forward():
bob.fd(10)
def turn_left():
bob.lt(10)
def turn_right():
bob.rt(10)
def move_backward():
bob.bk(10)
def reset():
bob.clear()
bob.reset()
screen.listen()
screen.onkey(key='w', fun=move_forward)
screen.onkey(key='s', fun=move_backward)
screen.onkey(key='a', fun=turn_left)
screen.onkey(key='d', fun=turn_right)
screen.onkey(key='c', fun=reset)
screen.exitonclick()
|
[
"williamssmith526@gmail.com"
] |
williamssmith526@gmail.com
|
05c3b18d77e9010b88570d85d73ed0243daa71e4
|
d39a237efe0e722482772ca924b35fe08afb495f
|
/neptune_call.py
|
1431bd766f5ebefb071e1949b120b2680c9afb96
|
[] |
no_license
|
rafajak/training_cnn
|
80374166f45651128d89b9a238fc71ccfead9cc5
|
2ff6a08cb7b6fd0e029eea322e1ca37f31f84278
|
refs/heads/master
| 2020-03-07T09:55:14.016020
| 2018-03-30T09:04:41
| 2018-03-30T09:04:41
| 126,157,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
from keras.callbacks import Callback
from deepsense import neptune
ctx = neptune.Context()
class NeptuneCallback(Callback):
def __init__(self, x_test, y_test, images_per_epoch=-1):
self.epoch_id = 0
self.images_per_epoch = images_per_epoch
self.x_test = x_test
self.y_test = y_test
def on_epoch_end(self, epoch, logs={}):
self.epoch_id += 1
ctx.channel_send('Log-loss training', self.epoch_id, logs['loss'])
ctx.channel_send('Log-loss validation', self.epoch_id, logs['val_loss'])
ctx.channel_send('Accuracy training', self.epoch_id, logs['acc'])
ctx.channel_send('Accuracy validation', self.epoch_id, logs['val_acc'])
|
[
"paulina.knut@pascal01.intra.codilime.com"
] |
paulina.knut@pascal01.intra.codilime.com
|
920c9431ac19add6412fbf5a3cb1b54ba1426ec3
|
f6bba50fccc6fb0dae2f046193434cfb4b9d32d5
|
/M-SOLUTIONS/C.py
|
d6366993e299e934fa4ded2c7800c14cd8bee32f
|
[] |
no_license
|
seven320/AtCoder
|
4c26723d20004fe46ce118b882faabc05066841c
|
45e301e330e817f1ace4be4088d3babe18588170
|
refs/heads/master
| 2021-11-22T22:57:32.290504
| 2021-10-24T09:15:12
| 2021-10-24T09:15:12
| 162,827,473
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# encoding:utf-8
import copy
import random
import bisect #bisect_left これで二部探索の大小検索が行える
import fractions #最小公倍数などはこっち
import math
import sys
mod = 10**9+7
sys.setrecursionlimit(mod) # 再帰回数上限はでdefault1000
N,A,B,C = map(int,input().split())
|
[
"yosyuaomenw@yahoo.co.jp"
] |
yosyuaomenw@yahoo.co.jp
|
36f32166bf2196d2e242e489ddee2954524f846d
|
735bd7bde06afa5999a2a76ba0b5a11c70159524
|
/tensor_network/neural_net/fully_connected.py
|
0c5136777c57a53588fa4250fedb16fd003f6581
|
[
"MIT"
] |
permissive
|
prithvinambiar/tensor_network
|
8f3c25e237153c600dc7c1ee6a7a4c38aea63d14
|
565cc834c2b7e59769e8fa30adae959f257bf85f
|
refs/heads/master
| 2021-01-02T23:08:37.383408
| 2017-09-10T06:51:28
| 2017-09-10T06:51:28
| 99,473,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,260
|
py
|
import logging
import tempfile
import numpy as np
import tensorflow as tf
def initialize_network(input, arch, is_training):
hidden_layer_output = input
for output in arch[0:-1]:
h2 = tf.contrib.layers.batch_norm(hidden_layer_output,
center=True, scale=True,
is_training=is_training)
hidden_layer_output = tf.contrib.layers.fully_connected(h2, output, activation_fn=tf.nn.relu
, weights_regularizer=tf.contrib.layers.l2_regularizer)
last_layer_output = tf.contrib.layers.fully_connected(hidden_layer_output, arch[-1], activation_fn=tf.nn.sigmoid
, weights_regularizer=tf.contrib.layers.l2_regularizer)
return last_layer_output
def log_loss(actual_y, predicted_y):
return tf.reduce_mean(-tf.reduce_sum(
(actual_y * tf.log(predicted_y + 1e-10) + ((1 - actual_y) * tf.log(1 - predicted_y + 1e-10))), axis=[1]))
class FullyConnected:
def __init__(self, number_of_features, number_of_output, neurons_list):
self.session = tf.Session()
self.x = tf.placeholder(tf.float32, [None, number_of_features])
self.y = tf.placeholder(tf.float32, [None, number_of_output])
self.is_training = tf.placeholder(tf.bool)
self.network = initialize_network(self.x, neurons_list, self.is_training)
self.cost_function = log_loss(self.y, self.network)
self.global_step = tf.Variable(0, trainable=False)
def __del__(self):
self.session.close()
def __enter__(self):
self.session.__enter__()
return self
def __exit__(self, exec_type, exec_value, exec_tb):
self.session.__exit__(exec_type, exec_value, exec_tb)
tf.reset_default_graph()
def train(self, train_data, validation_data=None, iterations=10000,
optimiser=tf.train.GradientDescentOptimizer(learning_rate=0.05), import_prev_model=False,
model_save_frequency=0, log_frequency=10, folder=tempfile.gettempdir() + "/tensorflow",
reg_lambda=0.0001, batch_size_pct=0.2):
(train_input, train_output) = train_data
(validation_input, validation_output) = train_data if validation_data is None else validation_data
tensorflow_dir = folder
log_dir = tensorflow_dir + "/log"
model_file = tensorflow_dir + "/model/model_data"
logging.info("Logging TensorFlow data to %s " % log_dir)
writer = tf.summary.FileWriter(log_dir)
writer.add_graph(self.session.graph)
with tf.name_scope("train"):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = optimiser.minimize(self.cost_function, name="train_step", global_step=self.global_step)
saver = tf.train.Saver(max_to_keep=1)
if import_prev_model:
saver.restore(self.session, model_file)
else:
self.session.run(tf.global_variables_initializer())
tf.summary.scalar('cost', self.cost_function)
merged_summary = tf.summary.merge_all()
if len(train_input) * .2 < 1:
batch_size = len(train_input)
else:
batch_size = int(len(train_input) * batch_size_pct)
print("Number of rows in train data = ", len(train_input))
print("Number of rows in batch data = ", batch_size)
for i in range(iterations):
j = i + 1
indices = np.random.choice(range(len(train_input)), size=batch_size, replace=False)
batch_input = train_input[indices]
batch_output = train_output[indices]
if (log_frequency != 0 and j % log_frequency == 0) or j == 1:
validation_accuracy = self.cost(validation_input, validation_output)
train_accuracy = self.cost(train_input, train_output)
cost = self.session.run(self.cost_function,
feed_dict={self.x: batch_input, self.y: batch_output, self.is_training: 0})
print("Iterations = %s and Cost = %s and Train accuracy = %s and Validation accuracy = %s" % (
j, cost, train_accuracy, validation_accuracy))
self.session.run(train_step,
feed_dict={self.x: batch_input, self.y: batch_output, self.is_training: 1})
if model_save_frequency != 0 and j % model_save_frequency == 0:
summary = self.session.run(merged_summary, feed_dict={self.x: train_input, self.y: train_output, self.is_training: 1})
writer.add_summary(summary, j)
print("Saving model")
saver.save(self.session, model_file)
def predict(self, test_input_data):
return self.session.run(self.network, feed_dict={self.x: test_input_data, self.is_training: 0})
def cost(self, input_data, output_data):
with tf.name_scope("validation"):
return self.session.run(self.cost_function,
feed_dict={self.y: output_data, self.x: input_data, self.is_training: 0})
|
[
"prithvinambiar@gmail.com"
] |
prithvinambiar@gmail.com
|
07414b27e5c330d92b6289b40133449889b81b6e
|
48bcb11f671f983392f57d9a68a6716af54bdfbb
|
/D3/soundmaking4676.py
|
90acdfeecb7425e73af88b4fea6898e92fbfc852
|
[] |
no_license
|
KimYeong-su/sw_expert
|
49d66f891da4f171b82201b8ad2c950c1e79949c
|
e5557f6df5dc4aa1ef1aa6a94cab0b58d2f77f58
|
refs/heads/master
| 2021-07-13T12:07:51.299710
| 2021-03-13T12:20:47
| 2021-03-13T12:20:47
| 235,712,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
T = int(input())
for tc in range(1,T+1):
s = input()
H = int(input())
place = list(sorted(map(int,input().split()),reverse=True))
for i in place:
s = s[:i]+'-'+s[i:]
print('#{} {}'.format(tc,s))
|
[
"suwon0521@naver.com"
] |
suwon0521@naver.com
|
24d1ca99fb8dc86ffac3c88e0b1f4b7184893bae
|
2ffd6167697a417dfd78de0fe5a188768d93dab6
|
/ytpomodoroweb/ytpomodoroweb/urls.py
|
58cc57d16fa8268680c6d732b95e9ae0077dd204
|
[] |
no_license
|
alikhundmiri/ytpomodoro
|
ef58904267c515b522cae21d0366cf9201cd4b47
|
d2234996b9a8cac1450d03a3fa3b402b1d41f605
|
refs/heads/master
| 2022-12-05T03:04:08.067489
| 2019-06-15T14:49:01
| 2019-06-15T14:49:01
| 191,381,468
| 0
| 0
| null | 2022-12-03T13:13:10
| 2019-06-11T13:50:54
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 812
|
py
|
"""ytpomodoroweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('core.urls', namespace='core')),
]
|
[
"salikhundmiri@gmail.com"
] |
salikhundmiri@gmail.com
|
edbec072d7e749e091aeb1a3300e433f4fd278bd
|
35c1a31e21cf43dc594e5f11f3b67e2fa63bd2a9
|
/utils/temp.py
|
221c958b0fd706b74a9cacb5be402470db2da8cf
|
[] |
no_license
|
smallsharp/mPython
|
6877cec933e1533c42636fdec4e725e22292a605
|
4696d592864d8f8c7e7ffcf72ab27ac396236811
|
refs/heads/master
| 2022-12-05T13:36:48.894360
| 2020-08-29T16:01:35
| 2020-08-29T16:01:35
| 119,930,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
import Form2Json
Form2Json.test()
|
[
"kai.li@baozun.com"
] |
kai.li@baozun.com
|
4c65be3170be56f8abcc246658328d38d1c027df
|
1b6e7e55935c7ebe51674748a3405e29e666dd97
|
/agent.py
|
d39d0231358512913eeccebcde104a3f5376a830
|
[] |
no_license
|
tanzeyy/DDPG
|
f13d2d04e06ac99277ebb370a0f2378e30f895cd
|
de32cfefd1b969616fd828614bc88ccf89e861e3
|
refs/heads/main
| 2023-01-06T05:42:10.653556
| 2020-11-09T13:08:47
| 2020-11-09T13:08:47
| 311,338,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class DDPGAgent(object):
def __init__(self, model, config, device):
self.model = model.to(device)
self.target_model = deepcopy(self.model).to(device)
self.policy_opt = optim.Adam(params=self.model.policy.parameters(),
lr=config.policy_lr)
self.value_opt = optim.Adam(params=self.model.value.parameters(),
lr=config.value_lr)
self.gamma = config.gamma
self.polyak = config.polyak
self.noise = config.noise
self.device = device
def predict(self, obs):
obs = torch.from_numpy(obs).to(self.device)
out = self.model.policy(obs)
return np.squeeze(out.cpu().detach().numpy())
def learn(self, obs, act, reward, next_obs, terminal):
obs = torch.from_numpy(obs).to(self.device)
act = torch.from_numpy(act).to(self.device)
reward = torch.from_numpy(reward).to(self.device)
next_obs = torch.from_numpy(next_obs).to(self.device)
terminal = torch.from_numpy(terminal).to(self.device)
value_loss = self._learn_value(obs, act, reward, next_obs, terminal)
policy_loss = self._learn_policy(obs)
self._update_target()
return value_loss.cpu().detach().numpy(), \
policy_loss.cpu().detach().numpy()
def _learn_value(self, obs, act, reward, next_obs, terminal):
# Compute target
with torch.no_grad():
next_act = self.target_model.policy(next_obs)
next_Q = self.target_model.value(next_obs, next_act)
target_Q = reward + self.gamma * (1.0 - terminal.float()) * next_Q
# Minimize MSBE
Q = self.model.value(obs, act)
loss = F.mse_loss(Q, target_Q)
self.value_opt.zero_grad()
loss.backward()
self.value_opt.step()
return loss
def _learn_policy(self, obs):
act = self.model.policy(obs)
Q = self.model.value(obs, act)
loss = torch.mean(-1.0 * Q)
self.policy_opt.zero_grad()
loss.backward()
self.policy_opt.step()
return loss
def _update_target(self):
# Polyak updating
for target_param, param in zip(self.target_model.value.parameters(),
self.model.value.parameters()):
target_param.data.copy_(self.polyak * target_param.data +
(1.0 - self.polyak) * param.data)
for target_param, param in zip(self.target_model.policy.parameters(),
self.model.policy.parameters()):
target_param.data.copy_(self.polyak * target_param.data +
(1.0 - self.polyak) * param.data)
def save(self, path):
torch.save(self.model.state_dict(), path)
def load(self, path):
self.model.load_state_dict(torch.load(path))
self.target_model = deepcopy(self.model)
|
[
"tan_ze@outlook.com"
] |
tan_ze@outlook.com
|
25737e432d9c21e059ad74753a8928679d5184e3
|
03e321ac2f9a1282b1c31e39508f1b21e3d2342d
|
/day 17 python.py
|
8595e263c5ed78d20636f6f3e3c3c05045a89f29
|
[] |
no_license
|
Magendra-ssn/30-day-challenge-python-
|
2c0d3c58e9963b9d614520f66469040f47592e3e
|
8ca313aaac503aa6d082230873556d53458c3a61
|
refs/heads/main
| 2023-06-30T07:39:12.774076
| 2021-08-03T08:24:16
| 2021-08-03T08:24:16
| 376,813,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
import mysql.connector
mydb=mysql.connector.connect(host="localhost",user="root",passwd="Magendra6the",database="employee")
dbse=mydb.cursor()
dbse.execute("SELECT * FROM emp")
for i in dbse:
print(i)
dbse.close()
|
[
"noreply@github.com"
] |
Magendra-ssn.noreply@github.com
|
734b32bb5662b6675f25830ad27aa18770ea7d46
|
d115ddc8be0086cf94ec3998a53616bea7b625da
|
/common/EmailMethod.py
|
c4bcaf98ff61f1f5efd3c2cc374139c3c351358a
|
[] |
no_license
|
DaQianJiang/python-request
|
918d12bf12cd35e84d40d664f73468105787fde8
|
966b5c4b2d815afa773e98fc2cd93c4290665deb
|
refs/heads/master
| 2023-01-03T04:51:44.479230
| 2020-11-04T14:57:20
| 2020-11-04T14:57:20
| 257,204,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,553
|
py
|
from common.config_reader import configReader
from common.log import Log
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import datetime
from common.config_reader import ROOT_PATH
import os
REPORT_FILE = os.path.join(ROOT_PATH,"testReport")
class Email():
config_email = configReader().get_email()
logger = Log().get_log()
def __init__(self):
self.smtp_host = self.config_email['stmp_host']
self.smtp_port = self.config_email['stmp_port']
self.smtp_password = self.config_email['stmp_password']
self.smtp_user = self.config_email['mail_user']
self.mail_sender = self.config_email["sender"]
self.mail_receivers = self.config_email["receiver"]
self.revicer = []
if self.mail_receivers:
self.revicer = str(self.mail_receivers).split("/")
else:
self.logger.info("---邮件接收者为空-----")
print(self.mail_receivers)
print(self.revicer)
self.mail_title = self.config_email["title"]
self.mail_content = self.config_email["content"]
self.mail_test_user = self.config_email["testuser"]
self.mail_on_off = self.config_email["on_off"]
self.data = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.mail_subject = self.mail_title +" "+self.data
#设置总的邮件体对象
self.msg_root = MIMEMultipart("mixed")
def set_mail_header(self):
self.msg_root["subject"] = Header(self.mail_subject,"utf-8")
self.msg_root["From"] = self.mail_sender
self.msg_root["To"] = ";".join(self.revicer)
def set_mail_content(self):
self.content = MIMEText(self.mail_content,"plain","utf-8")
self.msg_root.attach(self.content)
#添加文件
def set_mail_file(self):
if os.path.exists(REPORT_FILE):
self.report = os.path.join(REPORT_FILE,"report.html")
#self.testlist = os.path.join(ROOT_PATH,"testFile/case/user.xls")
self.file = open(self.report,"rb").read()
self.att_file = MIMEText(self.file,"base64","utf-8")
self.att_file["Content-Type"]= 'application/octet-stream'
self.att_file["Content-Disposition"] = 'attachment; filename="report.html"'
self.msg_root.attach(self.att_file)
else:
raise FileNotFoundError("testReport文件未找到")
def send_email(self):
self.set_mail_header()
self.set_mail_content()
self.set_mail_file()
try:
self.smtp_server = smtplib.SMTP_SSL(self.smtp_host,self.smtp_port)
except smtplib.SMTPConnectError as e:
self.logger.error("服务器链接失败:",e)
else:
try:
self.smtp_server.login(self.smtp_user,self.smtp_password)
except smtplib.SMTPAuthenticationError as e:
self.logger.error("服务器登陆失败:",e)
else:
try:
self.smtp_server.sendmail(self.mail_sender,self.revicer,self.msg_root.as_string())
self.smtp_server.quit()
self.logger.info("----邮件发送成功----")
self.logger.info("发件人%s,收件人%s",self.mail_sender,self.revicer)
except smtplib.SMTPException as e:
self.logger.error("---发送邮件失败---:",e)
if __name__ == '__main__':
email = Email()
email.send_email()
|
[
"1404482005@qq.com"
] |
1404482005@qq.com
|
3730fe7e084859788829b841d8f659cb3e767ca7
|
99e32ab1325a303f92457fc6e0615786c4944ef2
|
/accounts/migrations/0045_admin.py
|
89f9551bcb1290310266ca8d274723897489610f
|
[] |
no_license
|
mafohoT/Threeness-Intelect
|
3936a43ed96ba5cefdcae965e923ccf003bb54b2
|
df64f02e9774b9e620eec1f0b15c21aa04818bf7
|
refs/heads/master
| 2023-08-17T22:46:09.742200
| 2021-10-20T13:33:27
| 2021-10-20T13:33:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,285
|
py
|
# Generated by Django 3.2.7 on 2021-10-10 07:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0044_remove_appointment_user'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20, null=True)),
('id_number', models.CharField(max_length=13, null=True)),
('phonenumber', phonenumber_field.modelfields.PhoneNumberField(max_length=128, null=True, region=None)),
('name', models.CharField(max_length=200, null=True)),
('employeenumber', models.CharField(max_length=200, null=True, unique=True)),
('appointment_reason', models.CharField(max_length=200, null=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"thabo3111@gmail.com"
] |
thabo3111@gmail.com
|
e3232181ab26b3496489c74e74fc52babb6b3a76
|
7eb7c263634a40b7397727854346f703e2be5a63
|
/core/estimators/models/omar_cnn.py
|
3296eb27919aeb8d258fde9727b86ff4d6df584c
|
[] |
no_license
|
Iqra350/krock2_traversability
|
7b745d1664d9d14facc0d3fbff324bbaac2c391e
|
7dd5e03d768fd3218838417ac0f195b25a1b8efa
|
refs/heads/master
| 2022-07-17T04:59:35.529123
| 2019-09-02T14:07:40
| 2019-09-02T14:07:40
| 205,813,838
| 0
| 0
| null | 2022-06-21T22:42:16
| 2019-09-02T08:42:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
import torch
import torch.nn as nn
def conv_block(in_channels, out_channels):
return nn.Sequential(nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
padding=1,
bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU())
class OmarCNN(nn.Module):
"""
From original paper https://arxiv.org/abs/1709.05368
"""
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(conv_block(1, 5),
conv_block(5, 5),
nn.MaxPool2d(kernel_size=2),
conv_block(5, 5))
self.decoder = nn.Sequential(nn.Linear(7605, 128),
nn.ReLU(),
nn.Dropout(),
nn.Linear(128, 2),
# nn.Sigmoid()
)
def forward(self, x):
x = self.encoder(x)
x = x.view(x.size(0), -1)
x = self.decoder(x)
return x
|
[
"noorsyen@gmail.com"
] |
noorsyen@gmail.com
|
bf1d1240627f1a4c67140e7e6a2fd53532cd3d68
|
d812e86676c88dd70d0d829cf507de7777502d72
|
/Django/zqxt_form2/zqxt_form2/urls.py
|
bc54e18f005ef54dc230520ba7b8f9011af35f4e
|
[] |
no_license
|
MikeNg2018/PythonCode
|
020777d3620d1aeb7c03dd90d1452934eee9663f
|
683a6d3cb529ec2a92adc92c6cd6db6d1604fc3b
|
refs/heads/master
| 2021-08-22T08:15:50.083878
| 2018-07-23T15:05:05
| 2018-07-23T15:05:05
| 104,327,895
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
"""zqxt_form2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from tools import views as tools_views
admin.autodiscover()
urlpatterns = [
path('', tools_views.index, name='home'),
path('admin/', admin.site.urls),
]
|
[
"mikeng83@gmail.com"
] |
mikeng83@gmail.com
|
071bfee000566985c17e29e87d6d225d8ee2deb3
|
16e3bda8d377753f14aaa58812fd36d6a7abb7cf
|
/examples/durations/components/takeslongtime/component.py
|
961733e9ca60e8085619eb9c25f6904cc2235c68
|
[
"BSD-2-Clause"
] |
permissive
|
flyingcircusio/batou
|
942c08debd8a011bc7fe36fc630c6b8552ad9f3e
|
f87a00acc3ce256014d49bef2eac581e91009275
|
refs/heads/main
| 2023-08-31T07:20:17.450557
| 2023-04-14T12:14:52
| 2023-04-14T12:14:52
| 206,573,151
| 42
| 28
|
NOASSERTION
| 2023-09-12T13:32:40
| 2019-09-05T13:40:03
|
Python
|
UTF-8
|
Python
| false
| false
| 167
|
py
|
from time import sleep
from batou.component import Component
from batou.lib.file import File
class Takeslongtime(Component):
def verify(self):
sleep(2)
|
[
"ct@flyingcircus.io"
] |
ct@flyingcircus.io
|
f82cf90fe60daaa0440a23c3fff77d3f36ea6f32
|
21899ea0e94cb58f8ac99b7c731f59e0232839eb
|
/src/python/T0/WMBS/Oracle/SMNotification/UpdateOfflineFileStatus.py
|
d3bfae8976ed4a4f7831ef497cd82c240dac52e8
|
[
"Apache-2.0"
] |
permissive
|
dmwm/T0
|
a6ee9d61abc05876fc24f8af69fe932a2f542d21
|
1af91d0b1971b7d45ea7378e754f2218ff9a8474
|
refs/heads/master
| 2023-08-16T10:55:27.493160
| 2023-08-11T09:38:03
| 2023-08-11T09:38:03
| 4,423,801
| 9
| 54
|
Apache-2.0
| 2023-09-14T11:43:30
| 2012-05-23T18:33:56
|
Python
|
UTF-8
|
Python
| false
| false
| 687
|
py
|
"""
_UpdateOfflineFileStatus_
Oracle implementation of UpdateOfflineFileStatus
"""
from WMCore.Database.DBFormatter import DBFormatter
class UpdateOfflineFileStatus(DBFormatter):
def execute(self, binds, conn = None, transaction = False):
sql = """UPDATE file_transfer_status_offline
SET t0_repacked_time = CURRENT_TIMESTAMP,
repacked_retrieve = 1
WHERE p5_fileid = :P5_ID
AND t0_repacked_time IS NULL
AND repacked_retrieve is NULL
"""
self.dbi.processData(sql, binds, conn = conn,
transaction = transaction)
return
|
[
"Dirk.Hufnagel@cern.ch"
] |
Dirk.Hufnagel@cern.ch
|
37f310a3f5ec60c0871d9232dd93a28a7b4b7d30
|
af70d6e37c3b95c6a497ecb6b6b54dc56a9aced3
|
/config.py
|
638aeaba248783af2a124a63ae24391fac4311bd
|
[
"Apache-2.0"
] |
permissive
|
TuringKi/Quantize_Project
|
b4357b9e1946e29318ce90ef72bca2f6aded4454
|
e3ba6bae8fff7e1981c93ce954a79987feddfe33
|
refs/heads/master
| 2021-08-30T20:02:24.938319
| 2017-12-19T08:04:12
| 2017-12-19T08:04:12
| 125,996,907
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,031
|
py
|
#!/usr/bin/python
all_para =['Variable',
'Variable_1',
'Variable_2',
'Variable_3',
'Variable_4',
'Variable_5',
'Variable_6',
'Variable_7',
'Variable_8',
'Variable_9',
'Variable_10',
'Variable_11',
'Variable_12',
'Variable_13',
'Variable_14',
'Variable_15',
'Variable_16',
'Variable_17',
'Variable_18',
'Variable_19',
'Variable_20',
'Variable_21',
'Variable_22',
'Variable_23',
'Variable_24',
'Variable_25',
'Variable_26',
'Variable_27',
'Variable_28',
'Variable_29',
'Variable_30',
'Variable_31',
'Variable_32',
'Variable_33',
'Variable_34',
'Variable_35',
'Variable_36',
'Variable_37',
'Variable_38',
'Variable_39']
exc_para =[#'Variable',
#'Variable_1',
'Variable_2',
'Variable_3',
'Variable_4',
'Variable_5',
'Variable_6',
'Variable_7',
'Variable_8',
'Variable_9',
'Variable_10',
'Variable_11',
'Variable_12',
# 'Variable_13',
'Variable_14',
'Variable_15',
'Variable_16',
'Variable_17',
'Variable_18',
'Variable_19',
'Variable_20',
'Variable_21',
'Variable_22',
'Variable_23',
'Variable_24',
'Variable_25',
# 'Variable_26',
'Variable_27',
'Variable_28',
'Variable_29',
'Variable_30',
'Variable_31',
'Variable_32',
'Variable_33',
'Variable_34',
'Variable_35',
'Variable_36',
'Variable_37',
'Variable_38']
# 'Variable_39']
trans_layer = ['Variable_13','Variable_26']
block_1 = [#'Variable_1',
'Variable_2',
'Variable_3',
'Variable_4',
'Variable_5',
'Variable_6',
'Variable_7',
'Variable_8',
'Variable_9',
'Variable_10',
'Variable_11',
'Variable_12',]
block_2 = ['Variable_14',
'Variable_15',
'Variable_16',
'Variable_17',
'Variable_18',
'Variable_19',
'Variable_20',
'Variable_21',
'Variable_22',
'Variable_23',
'Variable_24',
'Variable_25',]
block_3 = ['Variable_27',
'Variable_28',
'Variable_29',
'Variable_30',
'Variable_31',
'Variable_32',
'Variable_33',
'Variable_34',
'Variable_35',
'Variable_36',
'Variable_37',
'Variable_38']
fc_layer = ['Variable_39']
prune_para = {}
for k in all_para:
prune_para[k] = 0.75
prune_para['Variable'] = 0.1
prune_para['Variable_1'] = 0.45
prune_para['Variable_3'] = 0.65
prune_para['Variable_5'] = 0.7
prune_para['Variable_6'] = 0.65
prune_para['Variable_13'] = 0.35
prune_para['Variable_26'] =0.5
prune_para['Variable_34'] = 0.85
prune_para['Variable_35'] = 0.9
prune_para['Variable_36'] = 0.95
prune_para['Variable_37'] = 0.95
prune_para['Variable_38'] = 0.95
prune_para['Variable_39'] = 0.9
dns_para =[#'Variable',
'Variable_1',
'Variable_2',
'Variable_3',
'Variable_4',
'Variable_5',
'Variable_6',
'Variable_7',
'Variable_8',
'Variable_9',
'Variable_10',
'Variable_11',
'Variable_12',
'Variable_13',
'Variable_14',
'Variable_15',
'Variable_16',
'Variable_17',
'Variable_18',
'Variable_19',
'Variable_20',
'Variable_21',
'Variable_22',
'Variable_23',
'Variable_24',
'Variable_25',
'Variable_26',
'Variable_27',
'Variable_28',
'Variable_29',
'Variable_30',
'Variable_31',
'Variable_32',
'Variable_33',
'Variable_34',
'Variable_35',
'Variable_36',
'Variable_37',
'Variable_38',
'Variable_39']
crate = {}
for k in all_para:
crate[k] = 3
crate['Variable'] = 0
crate['Variable_1'] = 1
crate['Variable_13'] = 1
crate['Variable_26'] =1
inqpercen_para = {}
for k in all_para:
inqpercen_para[k] = 1.0
inq_para = {}
for k in all_para:
inq_para[k] = 16
#inq_para['Variable'] = 256
#inq_para['Variable_1'] = 128
#inq_para['Variable_13'] = 128
#inq_para['Variable_26'] =128
inqprune_para = {}
for k in all_para:
inqprune_para[k] = 1-0.75
inqprune_para['Variable'] = 1-0.1
inqprune_para['Variable_1'] = 1-0.45
inqprune_para['Variable_3'] = 1-0.65
inqprune_para['Variable_5'] = 1-0.7
inqprune_para['Variable_6'] = 1-0.65
inqprune_para['Variable_13'] = 1-0.35
inqprune_para['Variable_26'] =1-0.5
inqprune_para['Variable_34'] = 1-0.85
inqprune_para['Variable_35'] = 1-0.9
inqprune_para['Variable_36'] = 1-0.95
inqprune_para['Variable_37'] = 1-0.95
inqprune_para['Variable_38'] = 1-0.95
inqprune_para['Variable_39'] = 1-0.9
kmeans_para = {}
for k in all_para:
kmeans_para[k] = 64
len_dict = {'Variable': 432,
'Variable_1': 1728,
'Variable_10': 13392,
'Variable_11': 14688,
'Variable_12': 15984,
'Variable_13': 25600,
'Variable_14': 17280,
'Variable_15': 18576,
'Variable_16': 19872,
'Variable_17': 21168,
'Variable_18': 22464,
'Variable_19': 23760,
'Variable_2': 3024,
'Variable_20': 25056,
'Variable_21': 26352,
'Variable_22': 27648,
'Variable_23': 28944,
'Variable_24': 30240,
'Variable_25': 31536,
'Variable_26': 92416,
'Variable_27': 32832,
'Variable_28': 34128,
'Variable_29': 35424,
'Variable_3': 4320,
'Variable_30': 36720,
'Variable_31': 38016,
'Variable_32': 39312,
'Variable_33': 40608,
'Variable_34': 41904,
'Variable_35': 43200,
'Variable_36': 44496,
'Variable_37': 45792,
'Variable_38': 47088,
'Variable_39': 4480,
'Variable_4': 5616,
'Variable_5': 6912,
'Variable_6': 8208,
'Variable_7': 9504,
'Variable_8': 10800,
'Variable_9': 12096}
|
[
"noreply@github.com"
] |
TuringKi.noreply@github.com
|
5517b266df69423eb51103ed7b51cea7cdb833ec
|
df8eeeb91a0dce2eb27911a61b40041da0ea55a3
|
/honda_janken/4_pygame.py
|
ba530b20dc2a57a7884f471ac55a738d21010d16
|
[] |
no_license
|
dannyso16/Play-IT
|
89e8c60a3069ac6e678c8f9849a58d6cb9e38a79
|
14efa295e12ba208313beb655f57fb1ae8051a49
|
refs/heads/master
| 2020-06-23T21:38:28.183648
| 2020-02-26T05:03:37
| 2020-02-26T05:03:37
| 198,760,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
import pygame
from pygame.locals import *
import sys
pygame.init()
screen = pygame.display.set_mode((1200, 700))
image = pygame.image.load('assets/honda_paper.jpg')
def main():
while True:
screen.fill((0,0,0))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
screen.blit(image, (0, 0))
pygame.display.update()
if __name__ == '__main__':
main()
|
[
"osatoshi.11@gmail.com"
] |
osatoshi.11@gmail.com
|
9167dd129fd4984452144aa2a908ee2f67d171e1
|
1267670526be8a4669b0e724e2e0ebcd20c85a15
|
/users/apps.py
|
97315a96a3e7a750bb82d4b564b77f2967f63791
|
[
"MIT"
] |
permissive
|
DPsalmist/Damios-Blog
|
46ba2bf2294d821e6656f7232170f389ca55c6b7
|
af711fc9ad8e581df166beb6c61c5514a14310f2
|
refs/heads/main
| 2023-05-06T08:24:39.208531
| 2021-05-24T02:40:30
| 2021-05-24T02:40:30
| 340,534,096
| 0
| 0
|
MIT
| 2021-02-20T02:35:05
| 2021-02-20T01:39:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
import users.signals
|
[
"sdamilare420@gmail.com"
] |
sdamilare420@gmail.com
|
f18610e3ece9eeea21f5e15acef6c46718b270c4
|
87415616ce962e5ae09ce2afea14e821631232fb
|
/meeting_point/response.py
|
fd036fe9d55095ca8c80d993e42eee690e427de2
|
[] |
no_license
|
proabhishek/meeting_point
|
33bbf715c965dc6c9ff406e46f8689784668912f
|
6f8166ef09b229e35fe966a249c675fd19895e4e
|
refs/heads/master
| 2022-05-11T08:09:54.802826
| 2021-06-23T16:52:16
| 2021-06-23T16:52:16
| 204,439,826
| 0
| 1
| null | 2022-04-22T22:13:59
| 2019-08-26T09:16:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
import traceback
from rest_framework.views import exception_handler
from rest_framework.response import Response
def meeting_exception_handler(exc, context):
response = exception_handler(exc, context)
try:
data = response.data
message = ""
for key, value in data.items():
message += "%s: %s. " % (key, value)
except:
message = ""
if response is not None:
response.data = {
'success': False,
'error': message,
'message': message,
'data': {}
}
return response
def api_response(func):
def wrapper(*args, **kwargs):
try:
res = func(*args, **kwargs)
if res['success']:
return Response({
'data': res.get('data', {}),
'success': True,
'message': res.get('message', 'success'),
'error': res.get('error', ''),
'statusCode': res.get('statusCode', 200)
}, status=res.get('status', 200))
else:
return Response({
'data': res.get('data', {}),
'success': False,
'message': res.get('message', ''),
'error': res.get('error', ''),
'statusCode': res.get('statusCode', 400)
}, status=res.get('status', 400))
except Exception as e:
return Response({
'data': {},
'success': False,
'error': "Something wrong. Please try again after sometime.\nDev Hint(hidden in production): %s" % str(e),
'exception': str(e),
'traceback': traceback.format_exc(),
'statusCode': 500
}, status=500)
return wrapper
|
[
"theabhishek.srm@gmail.com"
] |
theabhishek.srm@gmail.com
|
defb13f9aa31c99e117b8eacc4b05d56d393dc48
|
bf79252a8cbd297f3f8392f0a9c57eda61f336cc
|
/3 Triangle.py
|
b81a05e5efb8e8c70a7e7e3948c5afc7a7424144
|
[] |
no_license
|
besteffects/Python_tutorials
|
5af8df8d6458d37f9d2f12e4b40b0cff2d07442b
|
cb7a525d70b87446a68e32ecb42bc56e3639d532
|
refs/heads/master
| 2020-05-07T05:50:46.458059
| 2019-05-28T02:57:24
| 2019-05-28T02:57:24
| 180,289,270
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,041
|
py
|
Python 3.4.3 (v3.4.3:9b73f1c3e601, Feb 24 2015, 22:44:40) [MSC v.1600 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> def f(x)
SyntaxError: invalid syntax
>>> def f(x): return x**2
>>> f(3)
9
>>> result = f(3)
>>> result
9
>>> def area(base, height):
return base*height/2
>>> area(3,4)
6.0
>>> area (10,7.45)
37.25
>>> ================================ RESTART ================================
>>> area(4,4)
Traceback (most recent call last):
File "<pyshell#11>", line 1, in <module>
area(4,4)
NameError: name 'area' is not defined
>>> ================================ RESTART ================================
>>>
>>> area(10,2)
10.0
>>> perimeter(3,4,5)
Traceback (most recent call last):
File "<pyshell#13>", line 1, in <module>
perimeter(3,4,5)
NameError: name 'perimeter' is not defined
>>> ================================ RESTART ================================
>>>
>>> perimeter(3,4,5)
12
>>> help(round)
Help on built-in function round in module builtins:
round(...)
round(number[, ndigits]) -> number
Round a number to a given precision in decimal digits (default 0 digits).
This returns an int when called with one argument, otherwise the
same type as the number. ndigits may be negative.
>>> round(45,8)
45
>>> round()
Traceback (most recent call last):
File "<pyshell#17>", line 1, in <module>
round()
TypeError: Required argument 'number' (pos 1) not found
>>> round(45.345,2)
45.34
>>> round(45)
45
>>> round(45.345,2,5)
Traceback (most recent call last):
File "<pyshell#20>", line 1, in <module>
round(45.345,2,5)
TypeError: round() takes at most 2 arguments (3 given)
>>> round()
Traceback (most recent call last):
File "<pyshell#21>", line 1, in <module>
round()
TypeError: Required argument 'number' (pos 1) not found
>>> help(id)
Help on built-in function id in module builtins:
id(...)
id(object) -> integer
Return the identity of an object. This is guaranteed to be unique among
simultaneously existing objects. (Hint: it's the object's memory address.)
>>> x=8/4
>>> x
2.0
>>> x=12//3
>>> x
4
>>> x=3
>>> y=5
>>> x=y
>>> x
5
>>> x=3
>>> y=5
>>> x=y
>>> y
5
>>> 8=x
SyntaxError: can't assign to literal
>>> round(45.342)
45
>>> def bigger(x):
return x**x
bigger(12)
SyntaxError: invalid syntax
>>>
>>> bigger(12)
Traceback (most recent call last):
File "<pyshell#41>", line 1, in <module>
bigger(12)
NameError: name 'bigger' is not defined
>>> ================================ RESTART ================================
>>> def bigger(x):
return x**x
>>> bigger(12)
8916100448256
>>> HELP(ID)
Traceback (most recent call last):
File "<pyshell#46>", line 1, in <module>
HELP(ID)
NameError: name 'HELP' is not defined
>>> help(id)
Help on built-in function id in module builtins:
id(...)
id(object) -> integer
Return the identity of an object. This is guaranteed to be unique among
simultaneously existing objects. (Hint: it's the object's memory address.)
>>>
|
[
"besteffects@gmail.com"
] |
besteffects@gmail.com
|
5c12dac608c49d1b596a81739abc27e3105323b7
|
ae93357b563cf602b207df92bce4cacaeb690534
|
/thisAndThat/387b.py
|
22bd1cc8a1a0850da802a59c1164dba3eafe4561
|
[
"MIT"
] |
permissive
|
SnowOnion/CodeForcesLee
|
aed472769e6b1449f60b561d31d6150cdd027042
|
c2e964432ce9c2f6ca0c9038934c10f1bf1268b3
|
refs/heads/master
| 2020-04-06T06:58:25.082842
| 2016-06-21T14:00:00
| 2016-06-21T14:00:00
| 16,841,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# @status AC
n,m=map(int,raw_input().split(' '))
a=map(int,raw_input().split(' '))
b=map(int,raw_input().split(' '))
# a.reverse()
# b.reverse()
aptr,bptr=len(a)-1,len(b)-1
comeup=0
while True:
if aptr<0: # end
break
elif bptr<0: # not enough
aptr-=1
comeup+=1
elif a[aptr]>b[bptr]: # can't satisfy
aptr-=1
comeup+=1
else: # normal
aptr-=1
bptr-=1
print comeup
|
[
"snowonionlee@gmail.com"
] |
snowonionlee@gmail.com
|
33f29496027b2d49f9264bab1239e924a9f86347
|
a29c5402b81fe3026408755f793636ed389d6316
|
/tests/test_utils.py
|
2b7f181cb6e3219533d64c7cd92a2e978d15fd32
|
[
"MIT"
] |
permissive
|
Originator2019/requestz
|
66f90b1ec38d89f77c2a5e60e8f385501ca37417
|
6ec81e4bf520ade2ed6537f24dc0a395b3af754b
|
refs/heads/master
| 2023-03-15T15:08:08.525057
| 2020-11-23T12:19:05
| 2020-11-23T12:19:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
import pytest
from requestz.utils import double_split, parse_cookies, params_check
def test_double_split():
line = 'BAIDUID=1874C759F48209DB07521C82EF9B6A77:FG=1; expires=Thu, 31-Dec-37 23:55:55 GMT; max-age=2147483647; path=/; domain=.baidu.com'
result = double_split(line, ';', '=')
print(dict(result))
def test_parse_cookies():
line = 'BAIDUID=7F9E96B69ECAA4758C3981749DDCFBC2:FG=1; expires=Thu, 31-Dec-37 23:55:55 GMT; max-age=2147483647; path=/; domain=.baidu.com'
result = parse_cookies(line)
print(result)
def test_check_type():
@params_check(a=int)
def func(a):
print(a)
with pytest.raises(TypeError):
func('1')
|
[
"hanzhichao@secoo.com"
] |
hanzhichao@secoo.com
|
26e7e3cf9621bfefc6acced6c9d8091d231ec685
|
a30588adcdaca87777142556e428e354b651de8c
|
/lib/config.py
|
8d04fd35e030189b042b4a989451e453544a85d9
|
[
"MIT"
] |
permissive
|
ardegra/ardegra
|
8abde2aa4e13b51553e6a8ff10aa1e769346fc06
|
96b7840faa41d43fd5c20e0a26f4cd7cf58f3aa7
|
refs/heads/master
| 2022-12-10T17:32:17.520324
| 2018-02-14T03:26:34
| 2018-02-14T03:26:34
| 120,867,420
| 0
| 0
|
MIT
| 2022-06-28T14:37:23
| 2018-02-09T06:36:02
|
Python
|
UTF-8
|
Python
| false
| false
| 349
|
py
|
class Config:
PRODUCTION = {"BASE_EXTRACT_API": "http://159.65.9.37:8000", "DATABASE_ADDRESS": "35.198.212.145:27017"}
DEVELOPMENT = {"BASE_EXTRACT_API": "http://13.250.161.228:8000", "DATABASE_ADDRESS": "35.198.212.145:27017"}
STAGE = PRODUCTION
BASE_EXTRACT_API = STAGE["BASE_EXTRACT_API"]
DATABASE_ADDRESS = STAGE["DATABASE_ADDRESS"]
|
[
"frans@zannete.com"
] |
frans@zannete.com
|
afe785822353007a0e383a753e0a33698f083809
|
402d45a429fae0649bcbf82858e891cb3032da68
|
/migrations/versions/9b5b92f1c540_tasks.py
|
c9461593abe1f15847daabfd207d3ef7110fa65c
|
[] |
no_license
|
awei82/microblog
|
548f2dae0709d3fce976a8ee9f8e8cab4e5ebdeb
|
b7b7c09563ae5b079c5acf7101c02b192d27d308
|
refs/heads/main
| 2023-02-12T12:14:10.585844
| 2021-01-11T22:03:49
| 2021-01-11T22:03:49
| 318,905,127
| 0
| 0
| null | 2020-12-23T03:36:37
| 2020-12-05T22:49:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
"""tasks
Revision ID: 9b5b92f1c540
Revises: ddd5814a8c2f
Create Date: 2020-12-13 19:47:11.847133
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9b5b92f1c540'
down_revision = 'ddd5814a8c2f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('task',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=128), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('complete', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_task_name'), 'task', ['name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_task_name'), table_name='task')
op.drop_table('task')
# ### end Alembic commands ###
|
[
"awei34@gatech.edu"
] |
awei34@gatech.edu
|
27286200eb7bf4c3f812531f1265ffac62c024fd
|
17d5736896e79d4b8a11ed8d8ecddd6ede56b2a6
|
/day_30_PrimesNumberCount.py
|
444f0850b3aa82b07ef8dbe663d71ddb940aa701
|
[] |
no_license
|
parulsharma-121/CodingQuestions
|
e733e5b24c30f137593267d8464721a83df3f241
|
9c3a99501878edd22052505b8bda9863e5855fd7
|
refs/heads/master
| 2021-04-23T19:19:13.651753
| 2020-10-22T16:30:29
| 2020-10-22T16:30:29
| 249,979,493
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
'''
Count the number of prime numbers less than a non-negative number, n.
Example:
Input: 10
Output: 4
Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.
'''
def countPrimes(n):
if(n<=2):return 0
primes = [True]*(n)
primes[0]=False
primes[1]=False
count = 0
for i in range(2,n):
if(primes[i]==True):
count +=1
j=2
while((j*i)<n):
primes[j*i]=False
j +=1
return count
n = 10
print(countPrimes(n))
|
[
"contactparulsharma11@gmail.com"
] |
contactparulsharma11@gmail.com
|
ec05d34177c32d090ae98a11348615565109dae7
|
c72ecc8414497d5e0711886737088de7f5ec4021
|
/Computer-Science/Lens_Slice.py
|
eb43afe21e72127937afb40af80832e375e5bd88
|
[] |
no_license
|
gjholman/Codecademy
|
bf3557e6c64be5f3eb4b53ed2ffbcc8b1961ee3d
|
1f424ac39040c56bacab5a3925dc8926477b3342
|
refs/heads/master
| 2020-04-05T09:33:44.991984
| 2019-05-06T19:52:43
| 2019-05-06T19:52:43
| 156,763,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
toppings = ['pepperoni', 'pineapple', 'cheese', 'sausage', 'olives', 'anchovies', 'mushrooms']
prices = [2, 6, 1, 3, 2, 7, 2]
pizzas = list(zip(prices, toppings))
num_pizzas = len(toppings)
print('We sell %s different kinds of pizzas' % num_pizzas)
pizzas.sort()
cheapest_pizza = pizzas[0]
priciest_pizzas = pizzas[-1]
three_cheapest = pizzas[:3]
num_two_dollar_slices = prices.count(2)
print(num_two_dollar_slices)
|
[
"gholman@wpi.edu"
] |
gholman@wpi.edu
|
0fc8150de357aa26ebfde427639c7a4f5377e2b0
|
eea7d5b8b745c4865a32f1f14f2d0e80f7d1a1f0
|
/fetch-pmid-date_year.py
|
e101c0ae0d252debdad7b682f25a59b598642496
|
[
"Unlicense"
] |
permissive
|
asadrazaa1/emails-extraction
|
c404372bc10115b082c578e40707878faa4bd04c
|
bb2b7b9f4caa9f62a81e6d9588c1c652d074dfde
|
refs/heads/master
| 2022-12-10T19:04:42.146796
| 2020-08-28T12:55:03
| 2020-08-28T12:55:03
| 291,040,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,347
|
py
|
import psycopg2
import sys
from nltk.tokenize import sent_tokenize
import re
import csv
try:
#connecting with database
connection = psycopg2.connect(user="database-user",
password="database-password",
host="host-ip-address",
port="5432",
database="data-base-name")
#using divide rule method to access data between some range
starting_pmid = 21
intermediate_pmid = 100000
maximum_pmid = 32078260
base_url = 'https://pubmed.ncbi.nlm.nih.gov/'
while (1):
if(intermediate_pmid<maximum_pmid):
with connection.cursor() as cursor:
#query to extract specific data from database
temp_query = """select a.pmid, a.forename, a.lastname, a.affiliation, b.date_year from pm2o.author a
inner join pm2o.studies b on a.pmid = b.pmid
where a.affiliation is not null and
a.pmid between '{}' and '{}';""".format(starting_pmid, intermediate_pmid )
cursor.execute(temp_query)
temp_data = cursor.fetchall()
print("Fetched data from db ...")
print(str(starting_pmid) + " - " + str(intermediate_pmid))
#making csv files
with open('pmid [%s - %s].csv' % (starting_pmid, intermediate_pmid), mode='w') as csv_file:
#creating header for csv file
fieldnames = ['pmid', 'forename', 'lastname', 'email', 'date_year', 'urls']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for data in temp_data:
#using regex on data[3] column i.e original data
match = re.findall(r'[\w\.-]+@[\w\.-]+', str(data[3]))
if len(match) != 0:
#csv write operation
writer.writerow({'pmid': data[0], 'forename': data[1], 'lastname': data[2], 'email':match[0], 'date_year': data[4],'urls': base_url + str(data[0])})
print("For loop ended ...")
print("Data written into file ...")
starting_pmid = intermediate_pmid + 1
intermediate_pmid = 100000 + intermediate_pmid
else:
print("Entering base case")
with connection.cursor() as cursor:
#query to extract specific data from database
temp_query = """select a.pmid, a.forename, a.lastname, a.affiliation, b.date_year from pm2o.author a
inner join pm2o.studies b on a.pmid = b.pmid
where a.affiliation is not null and
a.pmid between '{}' and '{}';""".format(starting_pmid, maximum_pmid )
cursor.execute(temp_query)
temp_data = cursor.fetchall()
print("Fetched data from db ...")
print(str(starting_pmid) + " - " + str(maximum_pmid))
#making csv file
with open('pmid [%s - %s].csv' % (starting_pmid, maximum_pmid), mode='w') as csv_file:
#creating header for csv file
fieldnames = ['pmid', 'forename', 'lastname', 'email', 'date_year', 'urls']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for data in temp_data:
#using regex on data[3] column i.e original data
match = re.findall(r'[\w\.-]+@[\w\.-]+', str(data[3]))
if len(match) != 0:
#csv write operation
writer.writerow({'pmid': data[0], 'forename': data[1], 'lastname': data[2], 'email':match[0], 'date_year': data[4],'urls': base_url + str(data[0])})
print("For loop ended ...")
print("Data written into file ...")
break
#94357012, total rows
#51556076, null affiliation
#42800936, not null affiliation
#21, minimum pmid
#32078260, maximum pmid
# print(len(temp_data))
sys.exit('Script completed')
except (Exception, psycopg2.Error) as error:
print("connection error: ", error)
sys.exit('Script failed')
sys.exit(0)
|
[
"noreply@github.com"
] |
asadrazaa1.noreply@github.com
|
69ee3f8346b35e78155451d2a3a4225518fd34a0
|
40a20dee8c96ce4d3a8f351e33763c30f9dc3bd3
|
/apps/projects/migrations/0002_auto_20151205_2101.py
|
f6960971c3372875ce92ec9ac094efd2bcbc7d3a
|
[
"Apache-2.0"
] |
permissive
|
theSoloBrewer/Project_Management
|
236553a4bbce46ea2c6928e02f09d5772f296f40
|
20db0abc400568da593bdea075b414dfde89f564
|
refs/heads/master
| 2020-05-25T14:20:06.311620
| 2016-10-06T09:53:05
| 2016-10-06T09:53:05
| 61,943,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='lat',
field=models.DecimalField(default=34.397, max_digits=8, decimal_places=3),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='lng',
field=models.DecimalField(default=150.644, max_digits=8, decimal_places=3),
preserve_default=False,
),
]
|
[
"computerzombie@msn.com"
] |
computerzombie@msn.com
|
8e1f86b970140ffd868c55fa1c235d8495f21f60
|
d5401d70cd380adf0f57553d5873ea7bf4b1f943
|
/bpproject/education/urls.py
|
50c3189a3384b2c883f030ebf843624e21fb5f0d
|
[] |
no_license
|
Nima-Niroumand/bpproject
|
854b1192456f795bc87e736541b2256964d0f5e1
|
06489c29345674a734c35b4060b3d34074c8d1d0
|
refs/heads/main
| 2023-03-21T12:00:30.794387
| 2021-03-07T17:53:20
| 2021-03-07T17:53:20
| 344,255,737
| 0
| 0
| null | 2021-03-07T17:53:20
| 2021-03-03T20:31:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
from django.conf.urls import url,include
from django.urls import path
from education.views import dashboard, exerciseIndex ,exerciseUpload,VideoUpload,videoIndex,downloadExerciseFiles,sendExercise,videoPlay
from education.views import submitExerciseIndex,sendExercise,downloadSubmitedExerciseFiles,accessdenied
urlpatterns = [
url(r"^dashboard/", dashboard, name="dashboard"),
url(r"^accounts/", include("django.contrib.auth.urls")),
url(r"^exercise/index/",exerciseIndex,name="exerciseIndex"),
url(r"^exercise/upload/",exerciseUpload,name="exerciseUpload"),
url(r"^video/index/", videoIndex, name="videoIndex"),
url(r"^video/index", videoIndex, name="videoIndex"),
url(r"^video/upload/",VideoUpload,name="VideoUpload"),
path("exercise/download/exerciseFiles/<str:path>", downloadExerciseFiles, name="downloadExerciseFiles"),
path("video/play/<int:id>", videoPlay, name="videoPlay"),
path("submitExercise/index/",submitExerciseIndex,name="submitExerciseIndex"),
path("submitExercise/sendExercise/<int:ExeId>",sendExercise,name="sendExercise"),
path("submitExercise/sendExercise/<int:ExeId>/",sendExercise,name="sendExercise"),
path("submitExercise/download/exerciseFiles/<str:path>", downloadExerciseFiles, name="downloadExerciseFiles"),
path("error", accessdenied, name="accessdenied"),
path("/", dashboard, name="dashboard"),
path("" ,dashboard, name="dashboard"),
path( " ",dashboard, name="dashboard"),
]
|
[
"nimaniroumand80@gmail.com"
] |
nimaniroumand80@gmail.com
|
1a1372d2a403d52f3d6ef2b3ffc42325961a89fe
|
609b6c5f1e5663055e3df96e75b58e7a7ad87e4f
|
/finddjangofiles.py
|
99b51e04ec353994f116dfbf5019c64fd56ed553
|
[] |
no_license
|
rroyales/testing
|
a4028fe4b9ba6dafb8f1681a905d1f6fae9465b8
|
a011510c375cffefb08c0a0542967e8f4f9a4e14
|
refs/heads/master
| 2016-09-06T21:42:53.859393
| 2013-12-08T04:57:13
| 2013-12-08T04:57:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
import sys
sys.path = sys.path[1:]
import django
print(django.__path__)
|
[
"rroyales@gmail.com"
] |
rroyales@gmail.com
|
ac78fc59ed4c8357d5b5b442db111b99b0a4b56c
|
7df9872f1975f8a7b9f61b511c30b789b5462ebf
|
/examples/signal.py
|
7a1a8bb381a9c572a0f75038603dcec16e01d7cf
|
[
"Apache-2.0"
] |
permissive
|
rtyler/graphy
|
02bab359b51e58657bcc7b793e329dda1fe6f591
|
b0a83588217fc15ffc728fa33de880a7e6658058
|
refs/heads/master
| 2020-04-06T03:42:19.531845
| 2009-06-26T22:35:13
| 2009-06-26T22:35:13
| 567,805
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from graphy.backends import google_chart_api
from graphy import bar_chart
left_channel = []
right_channel = []
for i in xrange(0, 360, 3):
left_channel.append(100.0 * math.sin(math.radians(i)))
right_channel.append(100.0 * math.sin(math.radians(i + 30)))
chart = google_chart_api.BarChart()
chart.AddBars(left_channel, color='0000ff')
chart.AddBars(right_channel, color='ff8040')
chart.display.enhanced_encoding = True
print '<html><head><title>Audio Signal</title></head><body>'
print '<h1>Separate</h1>'
chart.stacked = False
chart.style = bar_chart.BarChartStyle(None, 0, 1)
print chart.display.Img(640, 120)
print '<h1>Joined</h1>'
chart.stacked = True
chart.style = bar_chart.BarChartStyle(None, 1)
print chart.display.Img(640, 120)
print '</body></html>'
|
[
"zovirl@zovirl.com@30582518-8026-11dd-8d1c-71c7e1663bfb"
] |
zovirl@zovirl.com@30582518-8026-11dd-8d1c-71c7e1663bfb
|
c7a18bf956c7f59fd07ed23f4cdcbf46870edcb1
|
6e0972dba67d369041894d9f59d22fa463528ff9
|
/orders/migrations/0016_auto_20200525_1610.py
|
dd927d61cad845c944d760b00dc57bc4f995331c
|
[] |
no_license
|
tonywh/cs50w-project3
|
5d0efb4579d5dc6f30c3813322a52e3fb47d585b
|
7c5bf4df9a80658501091cbc18836a5870c26dc1
|
refs/heads/master
| 2023-05-27T23:42:38.764391
| 2020-06-10T06:54:06
| 2020-06-10T06:54:06
| 265,222,097
| 0
| 0
| null | 2021-06-10T22:56:57
| 2020-05-19T10:56:56
|
Python
|
UTF-8
|
Python
| false
| false
| 709
|
py
|
# Generated by Django 2.0.3 on 2020-05-25 16:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0015_auto_20200525_1428'),
]
operations = [
migrations.AlterField(
model_name='pizza',
name='description',
field=models.TextField(max_length=1024),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.TextField(max_length=1024),
),
migrations.AlterField(
model_name='sub',
name='description',
field=models.TextField(max_length=1024),
),
]
|
[
"tony.whittam@gmail.com"
] |
tony.whittam@gmail.com
|
a0366ed314803e1af96ec9306d27d26d291e8153
|
c0eee87cbd58162315b4f51a2d64e45bb76401d2
|
/locations/spiders/chargepoint.py
|
24a9e1ebeca6e4a05e272fc8f5e3c230f232de88
|
[
"MIT",
"CC0-1.0"
] |
permissive
|
gmavian/alltheplaces
|
bdcbcbb851c8891c76fabce349cb7e071c2f3768
|
0fdd61e084a764dedf483a7074265956a38a3303
|
refs/heads/master
| 2023-08-03T19:46:17.734124
| 2023-07-31T12:36:38
| 2023-07-31T12:36:38
| 162,633,873
| 0
| 0
|
NOASSERTION
| 2018-12-20T21:37:18
| 2018-12-20T21:37:17
| null |
UTF-8
|
Python
| false
| false
| 6,642
|
py
|
import json
import random
import urllib.parse
import scrapy
from locations.categories import Categories, apply_category
from locations.items import Feature
class ChargePointSpider(scrapy.Spider):
name = "chargepoint"
item_attributes = {"brand": "ChargePoint", "brand_wikidata": "Q5176149"}
def start_requests(self):
bounds = (-180.0, -90.0, 180.0, 90.0)
query = {
"map_data": {
"screen_width": 1024,
"screen_height": 1024,
"sw_lon": bounds[0],
"sw_lat": bounds[1],
"ne_lon": bounds[2],
"ne_lat": bounds[3],
"filter": {
"connector_l1": False,
"connector_l2": False,
"is_bmw_dc_program": False,
"is_nctc_program": False,
"connector_chademo": False,
"connector_combo": False,
"connector_tesla": False,
"price_free": False,
"status_available": False,
"network_chargepoint": True,
"network_blink": False,
"network_semacharge": False,
"network_evgo": False,
"connector_l2_nema_1450": False,
"connector_l2_tesla": False,
},
}
}
yield scrapy.http.JsonRequest(
url="https://mc.chargepoint.com/map-prod/get?" + urllib.parse.quote(json.dumps(query).encode("utf8")),
method="GET",
)
def parse(self, response):
response_data = response.json()["map_data"]
for summary in response_data.get("summaries"):
port_count = summary.get("port_count", {}).get("total", 0)
if port_count < 100:
# If there's a small-ish number of ports in this summary bounding box
# then request station list for the bbox
# If there's a single station here, the bounding box will have zero area
# around the point, and the API doesn't like that. So we make it a little
# bigger manually.
if summary["ne_lon"] - summary["sw_lon"] < 0.001:
summary["ne_lon"] += 0.01
summary["sw_lon"] -= 0.01
if summary["ne_lat"] - summary["sw_lat"] < 0.001:
summary["ne_lat"] += 0.01
summary["sw_lat"] -= 0.01
query = {
"station_list": {
"screen_width": 1024,
"screen_height": 1024,
"sw_lon": summary["sw_lon"],
"sw_lat": summary["sw_lat"],
"ne_lon": summary["ne_lon"],
"ne_lat": summary["ne_lat"],
"page_size": 100,
"page_offset": "",
"filter": {
"connector_l1": False,
"connector_l2": False,
"is_bmw_dc_program": False,
"is_nctc_program": False,
"connector_chademo": False,
"connector_combo": False,
"connector_tesla": False,
"price_free": False,
"status_available": False,
"network_chargepoint": True,
"network_blink": False,
"network_semacharge": False,
"network_evgo": False,
"connector_l2_nema_1450": False,
"connector_l2_tesla": False,
},
}
}
yield scrapy.http.JsonRequest(
url="https://mc.chargepoint.com/map-prod/get?"
+ urllib.parse.quote(json.dumps(query).encode("utf8")),
method="GET",
callback=self.parse_station_list,
)
else:
# Otherwise make another map data request for the summary bounding box, simulating zooming in
query = {
"map_data": {
"screen_width": 1024,
"screen_height": 1024,
"sw_lon": summary["sw_lon"],
"sw_lat": summary["sw_lat"],
"ne_lon": summary["ne_lon"],
"ne_lat": summary["ne_lat"],
"filter": {
"connector_l1": False,
"connector_l2": False,
"is_bmw_dc_program": False,
"is_nctc_program": False,
"connector_chademo": False,
"connector_combo": False,
"connector_tesla": False,
"price_free": False,
"status_available": False,
"network_chargepoint": True,
"network_blink": False,
"network_semacharge": False,
"network_evgo": False,
"connector_l2_nema_1450": False,
"connector_l2_tesla": False,
},
}
}
yield scrapy.http.JsonRequest(
url="https://mc.chargepoint.com/map-prod/get?"
+ urllib.parse.quote(json.dumps(query).encode("utf8")),
method="GET",
)
def parse_station_list(self, response):
station_list = response.json()["station_list"]
for summary in station_list.get("summaries"):
properties = {
"ref": summary["device_id"],
"lat": summary["lat"],
"lon": summary["lon"],
"name": " ".join(summary.get("station_name", [])) or None,
"city": summary.get("address", {}).get("city"),
"state": summary.get("address", {}).get("state_name"),
"street_address": summary.get("address", {}).get("address1"),
}
apply_category(Categories.CHARGING_STATION, properties)
properties["extras"]["capacity"] = summary["port_count"]["total"]
yield Feature(**properties)
|
[
"noreply@github.com"
] |
gmavian.noreply@github.com
|
475328963d8e76328bb904c7636e6d0593e4cc1b
|
38258a7dd9acbfb7adf72983015de68a948a4826
|
/CodeUp/C_6066.py
|
f3668e5168ba85e8a1894b1865365b3f8f4bb72f
|
[] |
no_license
|
kangsm0903/Algorithm
|
13a7fe5729039a1d0ce91a574c4755a8a92fb02b
|
7d713d1c9e2e4dc30141d4f409ac1430a357065b
|
refs/heads/master
| 2022-10-04T00:33:49.247977
| 2022-09-26T12:51:16
| 2022-09-26T12:51:16
| 219,265,010
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
a = list(map(int, input().split(" ")))
for i in range(0,3):
if a[i]%2 == 0:
a[i] = "even"
else:
a[i] = "odd"
for i in range(0,3):
print(a[i])
|
[
"kangsm0903@naver.com"
] |
kangsm0903@naver.com
|
ef822a1e255fb2074b792181339c6da6256c78c3
|
f29293ba53d454d3c436db317f6b4e3fa11cbfd0
|
/arrangør/views.py
|
3ddb31c41be4ae8b54b62d014ef8f525eae4478e
|
[] |
no_license
|
alfredronning/oldProject
|
375f3979da0aa8cdb5eaebd4ad268901eea3c0b9
|
9b099cdd3a5e4091242c7905588b8c5260970284
|
refs/heads/master
| 2021-01-22T07:32:06.735812
| 2017-02-13T12:29:06
| 2017-02-13T12:29:06
| 81,822,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,618
|
py
|
from django.shortcuts import render, redirect
from django.views import generic
from band_booking.models import Concert, Technical_needs
from bookingsjef.actions.concert_overview_term import get_current_term
# Create your views here.
class ConcertsView(generic.ListView):
"""
Generates a view for the query set of the current term
"""
template_name = 'arrangør/concert_overview.html'
context_object_name = 'concerts'
def get_queryset(self):
"""
:return: The concerts of the current term
"""
start_term, end_term = get_current_term()
concerts = Concert.objects.filter(date__range=[start_term, end_term])
return concerts
def overview_concert(request, id):
"""
:param request: The HTTP request
:param id: The id of the concert
:return: An overview page for the given concert, if the user has the required permissions. Else a redirect.
Returns an overview page for the concert of the given ID. If there is no concert with this ID or the user does not
have the necessary requirements to view the page the user will be redirected to the concert_overview page.
"""
def build_equipment(concert):
"""
Finds the amount of equipment needed for all the bands of the concert, especially it combines the requirements
with the same names such that it is easier for the user to read through the list
"""
equipment = {}
for band in concert.bands.all():
# Find equipment needed by the given band
band_equipment = Technical_needs.objects.filter(band=band)
for current_equipment in band_equipment:
# Combine the needs of similar equipment for different bands
if current_equipment.equipment_name in equipment:
equipment[current_equipment.equipment_name] += current_equipment.amount
else:
equipment[current_equipment.equipment_name] = current_equipment.amount
return equipment
# Try to find a concert with the given ID
try:
concert = Concert.objects.get(pk=id)
except Concert.DoesNotExist:
return redirect('arrangør:concerts')
# Check if user is allowed to view the given concert
if not request.user.is_superuser and not request.user.has_perm('band_booking.can_see_concert_information') and request.user != concert.organizer:
return redirect('arrangør:concerts')
# Render the page
return render(request, 'arrangør/concert.html', {'concert': concert, 'equipment': build_equipment(concert)})
|
[
"alfredronning@gmail.com"
] |
alfredronning@gmail.com
|
477aca3a366ff7509d6341c4a3179e7feb47d413
|
a15c69fdbf4a3aab6ab816bb99f3161676b7569b
|
/src/srm/598/ErasingCharacters.py
|
ab60cd3e72b08ac4eb98ce3a32dba4cf1b14367f
|
[] |
no_license
|
prstcsnpr/Algorithm
|
08bf5df5741af3e1b43830bf896e281a45a6ab94
|
4eeab0c3fdc3f488e7cf7b3554f82fb4c63899ad
|
refs/heads/master
| 2016-09-05T10:48:20.599794
| 2013-12-11T07:36:58
| 2013-12-11T07:36:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
import unittest
class ErasingCharacters(object):
def simulate(self, s):
while True:
result = s
for i in range(len(s) - 1):
if s[i] == s[i + 1]:
result = s[0:i] + s[i+2:]
if result == s:
return result
else:
s = result
class ErasingCharactersTestCase(unittest.TestCase):
def setUp(self):
self.ec = ErasingCharacters()
def test_0(self):
self.assertEqual(self.ec.simulate('cieeilll'), 'cl')
def test_1(self):
self.assertEqual(self.ec.simulate('topcoder'), 'topcoder')
def test_2(self):
self.assertEqual(self.ec.simulate('abcdefghijklmnopqrstuvwxyyxwvutsrqponmlkjihgfedcba'), '')
def test_3(self):
self.assertEqual(self.ec.simulate('bacaabaccbaaccabbcabbacabcbba'), 'bacbaca')
def test_4(self):
self.assertEqual(self.ec.simulate('eel'), 'l')
if __name__ == '__main__':
unittest.main()
|
[
"prstcsnpr@10.2.78.74"
] |
prstcsnpr@10.2.78.74
|
6dc284c465d8262adfc99feaf1ce573a82accab7
|
126901616c8339139f4f880b9b0101e3505977e1
|
/backend/quickmed/models.py
|
b1c1bb4d872b91f90dd43b7e0e77f96129cecd34
|
[] |
no_license
|
LordGhostX/QuickMed
|
51a8d27237b52a165161244620f5ea75eb28fa57
|
c5525a9804201f8c427d60caf27a040d9b225512
|
refs/heads/master
| 2023-05-25T00:04:25.998587
| 2022-12-02T17:31:54
| 2022-12-02T17:31:54
| 210,404,014
| 9
| 1
| null | 2023-05-22T23:15:22
| 2019-09-23T16:36:13
|
HTML
|
UTF-8
|
Python
| false
| false
| 836
|
py
|
from django.db import models
from datetime import datetime
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name="profile", on_delete=models.CASCADE)
hospital_name = models.TextField(max_length=100)
hospital_address = models.TextField(max_length=100)
hospital_phone = models.TextField(max_length=100)
def __str__(self):
return self.user.username
NULL_AND_BLANK = {'null': True, 'blank': True}
class Result(models.Model):
creator = models.ForeignKey(User, **NULL_AND_BLANK, on_delete=models.CASCADE)
test_type = models.CharField(max_length=100)
test_results = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
notes = models.TextField(max_length= 500)
|
[
"chukslord1@gmail.com"
] |
chukslord1@gmail.com
|
d30a87df7fb6fd525296fcf61ab27d40f79a5e80
|
3ac57e01e61e4216afeae6006c515eb02ecdcb76
|
/euler3.py
|
435da7947753f2e34db631f4386ae6371b982f0b
|
[] |
no_license
|
akaruihoshi/project-euler
|
6ca4e8ef888110bf89c504d5a35e331e2f8bf8cf
|
f89cedca8a148f5f8cd4ec161e50c28edf1a5678
|
refs/heads/master
| 2022-08-21T18:38:36.580433
| 2020-05-22T09:59:50
| 2020-05-22T09:59:50
| 266,076,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
a = 600851475143
x = 2
y = 2
while x <= a:
is_prime = True
# untuk nyari tau apakah dia prime apa ga
if x > 3:
x1 = x ** 0.5
if x1 % 1 > 0:
x1 = int(x1) + 1
while y <= x1 and is_prime:
z = x / y
if z % 1 == 0:
is_prime = False
y = y + 1
y = 2
# untuk tau apakah bisa dibagi sama angka tersebut
while is_prime:
# print(a)
if a % x == 0:
a = a / x
else:
is_prime = False
x = x + 1
print(x-1)
|
[
"akarui.hoshi.6282742@gmail.com"
] |
akarui.hoshi.6282742@gmail.com
|
ec24655e98ab05b42d1d54bedcb848c2fc90f07e
|
f2ec1298c00d813c7e973cac22184ea8f54eb60c
|
/MxShop/apps/trade/models.py
|
6b6549cbd73a281f4648faa11b75915f61274de1
|
[] |
no_license
|
llf-1996/mx_drf
|
fcfaa028630eeb02be91af5e30fb2a200037400c
|
f4878c0d9857e7af7277d10cc32da5d9c522de0c
|
refs/heads/master
| 2020-06-05T06:51:49.629343
| 2019-06-17T13:20:29
| 2019-06-17T13:20:29
| 192,350,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,760
|
py
|
from datetime import datetime
from django.db import models
from django.contrib.auth import get_user_model
from goods.models import Goods
User = get_user_model()
# Create your models here.
class ShoppingCart(models.Model):
"""
购物车
"""
user = models.ForeignKey(User, verbose_name=u"用户")
goods = models.ForeignKey(Goods, verbose_name=u"商品")
nums = models.IntegerField(default=0, verbose_name="购买数量")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = '购物车'
verbose_name_plural = verbose_name
unique_together = ("user", "goods")
def __str__(self):
return "%s(%d)".format(self.goods.name, self.nums)
class OrderInfo(models.Model):
"""
订单
"""
ORDER_STATUS = (
("TRADE_SUCCESS", "成功"),
("TRADE_CLOSED", "超时关闭"),
("WAIT_BUYER_PAY", "交易创建"),
("TRADE_FINISHED", "交易结束"),
("paying", "待支付"),
)
user = models.ForeignKey(User, verbose_name="用户")
order_sn = models.CharField(max_length=30, null=True, blank=True, unique=True, verbose_name="订单号")
trade_no = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name=u"交易号")
pay_status = models.CharField(choices=ORDER_STATUS, default="paying", max_length=30, verbose_name="订单状态")
post_script = models.CharField(max_length=200, verbose_name="订单留言")
order_mount = models.FloatField(default=0.0, verbose_name="订单金额")
pay_time = models.DateTimeField(null=True, blank=True, verbose_name="支付时间")
# 用户信息
address = models.CharField(max_length=100, default="", verbose_name="收货地址")
signer_name = models.CharField(max_length=20, default="", verbose_name="签收人")
singer_mobile = models.CharField(max_length=11, verbose_name="联系电话")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = u"订单"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order_sn)
class OrderGoods(models.Model):
"""
订单的商品详情
"""
order = models.ForeignKey(OrderInfo, verbose_name="订单信息", related_name="goods")
goods = models.ForeignKey(Goods, verbose_name="商品")
goods_num = models.IntegerField(default=0, verbose_name="商品数量")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "订单商品"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order.order_sn)
|
[
"2367746876@qq.com"
] |
2367746876@qq.com
|
100da862027b8d2642d223ead728c151f3d438ab
|
84864f862dec9171e958920f2c8e7c920fcef056
|
/Undergraduate Thesis/NumericalSimulation/nzg.py
|
c1af37dc8752932e7fda2248be4c291a108fc3e9
|
[] |
no_license
|
Orcuslc/Learning
|
7704950f8c09232dadbbde81ed82ddc0ca65172d
|
ffa856febd85235d17358178f1e288ffae7856cb
|
refs/heads/master
| 2020-03-26T11:59:54.093395
| 2018-05-29T04:19:36
| 2018-05-29T04:19:36
| 47,269,920
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,234
|
py
|
import random as rd
import math
import numpy as np
from matplotlib import pyplot as plt
def walk_astep(memory, q, step = 1):
u = rd.random()
if u <= q:
memory.append(step)
else:
memory.append(-1*step)
def random_choice(k, alpha):
u = rd.random()
# if alpha == 1.0:
# return math.ceil(alpha*k)
# if k <= 2:
# if alpha == 1.0:
# return math.ceil(u*k)
# return math.ceil(math.log((alpha**k-1)*u+1, alpha))
# else:
# if alpha == 1.0:
# return math.ceil(u*2)+(k-3)
# return math.ceil(math.log((alpha**2-1)*u+1, alpha))+(k-3)
# return math.ceil(u*k)
return math.ceil(u-alpha/(alpha+alpha**2))+(k-1)
def walk(n, p, q, alpha):
memory = [0]
for i in range(1, n+1):
if i == 1:
walk_astep(memory, q, 1)
elif i == 2:
walk_astep(memory, q, memory[1])
else:
mem = random_choice(i-1, alpha)
# print(mem)
walk_astep(memory, p, memory[mem])
return np.cumsum(memory)
if __name__ == '__main__':
# memory = walk(1000000, 1/3, 2/3, 1)theoreticaltheoretical
num = 1000
n = 10000
count = 0
p = 2/3
q = 2/3
alpha = 2
# for p in [2/3]:
# for q in [2/3]:
# count += 1
# plt.figure(count)
# sub = 0
# # for alpha in [1, 1.25, 1.5, 1.75, 2]:
# for alpha in [1]:
# memory = np.zeros(n+1)
# sub += 1
# for i in range(num):
# # print(i)
# memory += walk(n, p, q, alpha)
# # print(len(memory))
# memory /= num
# # memory = memory[np.where(memory > 0)]
# # plt.subplot(510+sub)
# plt.plot(np.log(np.array(range(1, len(memory)))), np.log(memory[1:]))
# # for p in [1/3]:
# # for q in [1/3, 2/3]:
# # count += 1
# # plt.figure(count)
# # for alpha in [1, 1.25, 1.5, 1.75, 2]:
# # memory = walk(n, p, q, alpha)
# # plt.plot(np.array(range(1, n+1)), memory[1:])
memory = np.zeros(n+1)
for i in range(num):
# print(i)
memory += walk(n, 2/3, 2/3, alpha = 1)
# print(len(memory))
memory /= num
# fig = plt.figure()
plt.plot(np.log(range(1, n+1)), np.log(memory[1:]), 'b')
plt.xlabel('log(t)')
plt.ylabel(r'$log(\langle X_t \rangle)$')
# fig.suptitle(r'$\langle X_t \rangle$'+'和'+'t的关系', fontproperties='SimHei', fontsize = 40)
# # plt.plot(range(1, n+1), memory[1:])
plt.grid(True)
plt.show()
|
[
"orcuslc@hotmail.com"
] |
orcuslc@hotmail.com
|
b8f080d2a6d43c4add9608c655522e1b65e86fe4
|
7020f68875af3f94b309c12bfe45d6c3520a7b3a
|
/Week3.py
|
c09cc3a6e0d67b2696c31673d661e6b204fa0e0f
|
[] |
no_license
|
mgrddsj/MEMS-OpenCV-Heroku
|
a6326617ba373673dd82e5e0f09f7c2c157b6744
|
209d61bba568cb7d214c081d5f372f07c9efb655
|
refs/heads/main
| 2023-04-25T07:29:37.471350
| 2021-05-21T07:14:48
| 2021-05-21T07:14:48
| 359,997,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,516
|
py
|
import cv2
import streamlit as st
import numpy as np
import glob
def main():
st.markdown("**Note:** Images are compressed for web viewing.")
st.markdown("Display and process results may be different from original images.")
st.markdown("However, the images are still quite large, you may need a fast Internet to open this page.")
st.markdown("图片已针对网页浏览压缩,图片的显示和处理可能与原图有偏差。")
st.markdown("但图片仍然较大,加载可能会比较缓慢。如果加载不出来,请确保您可以访问国际互联网。")
st.sidebar.write("Variable control panel\n变量控制台")
st.markdown("***")
st.markdown("This is a web app that renders the images real-time, tweak the values in the variable control panel to see the changes.")
st.markdown("For phone users, you can see the control panel by clicking the arrow on the top left of the page.")
st.markdown("这是一个实时渲染图片网页应用,您可以更改变量控制台中的数值来查看变化。")
st.markdown("手机用户可以通过点击页面左上方的箭头按钮来打开变量控制台。")
st.markdown("***")
file_list = sorted(glob.glob("img/*"))
file_path = st.sidebar.selectbox("Image:", file_list, index=8)
original_img = cv2.imread(file_path)
original_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)
st.image(original_img, use_column_width=True, caption="Original image 原图")
image = cv2.imread(file_path, 0) # Read as grayscale image
st.image(image, use_column_width=True, caption="Grayscale image 灰度图")
# 最大滤波处理
kernel = np.ones((3, 3), np.uint8)
dilate_iteration = st.sidebar.slider(
"Dilate iteration 最大滤波(膨涨)次数", min_value=1, max_value=50, value=1)
dilate = cv2.dilate(image, kernel, iterations=dilate_iteration)
st.image(dilate, use_column_width=True, clamp=True, caption="Dilated image 最大滤波处理")
# 最小滤波处理
kernel = np.ones((3, 3), np.uint8)
# iteration的值越高,模糊程度(腐蚀程度)就越高 呈正相关关系且只能是整数
erosion_iteration = st.sidebar.slider(
"Erosion iteration 最小滤波(腐蚀)次数", min_value=1, max_value=50, value=1)
erosion = cv2.erode(dilate, kernel, iterations=erosion_iteration)
st.image(erosion, use_column_width=True, clamp=True, caption="Eroded image 最小滤波处理")
# 阈值处理
threshhold_value = st.sidebar.slider(
"Threshold 阈值", min_value=50, max_value=255, value=100)
ret, thresh = cv2.threshold(erosion, threshhold_value, 255, cv2.THRESH_BINARY)
thresh = thresh.astype(np.float64)
st.image(thresh, use_column_width=True, clamp=True, caption="Threshold processed image 阈值处理")
# 轮廓
thresh = thresh.astype(np.uint8)
img = cv2.Canny(thresh, 100, 200)
st.image(img, use_column_width=True, clamp=True, caption="Contour 轮廓")
# 霍夫变换
st.sidebar.write("Hough Line Transform (HLT) 霍夫变换")
blank2 = np.zeros(img.shape, np.uint8)
blank2 = cv2.cvtColor(blank2, cv2.COLOR_GRAY2BGR)
houghRho = st.sidebar.slider("HLT rho (step size) 霍夫变换 rho 值(搜索步长)", min_value=1, max_value=10, value=1)
houghThreshhold = st.sidebar.slider(
"HLT threshold 霍夫变换阈值", min_value=1, max_value=1000, value=100)
houghMinLineLength = st.sidebar.slider(
"HLT min. length 霍夫最短线段长度", min_value=1, max_value=500, value=10)
houghMaxLineGap = st.sidebar.slider("HLT max line gap 霍夫最长间隙", min_value=1, max_value=200, value=100)
lines = cv2.HoughLinesP(img, houghRho, np.pi/180, houghThreshhold,
minLineLength=houghMinLineLength, maxLineGap=houghMaxLineGap)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(blank2, (x1, y1), (x2, y2), (0, 255, 0), 2)
st.image(blank2, use_column_width=True, clamp=True, caption="Hough line transform (line detection) 霍夫变换(直线检测)")
st.text("lines detected: {}".format(len(lines)))
# Harris 角点检测
st.write("**Harris Corner Detection 角点检测**")
with st.echo():
corners = cv2.cornerHarris(img, blockSize=5, ksize=5, k=0.04)
harris_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
harris_img[corners > 0.1*corners.max()] = [0, 255, 0]
st.image(harris_img, use_column_width=True, clamp=True, caption="Harris Corner detection 角点检测")
st.text("Corners detected: {}".format(len(corners)))
st.write("The detected corners are drawn in green, you may need to zoom in to see them.")
st.write("检测到的角点已用绿色像素标出,您可能需要放大来看见它们。")
# Shi-Tomasi 角点检测
st.write("**Shi-Tomasi Corner Detection 角点检测**")
with st.echo():
corners_s = cv2.goodFeaturesToTrack(
img, maxCorners=100, qualityLevel=0.01, minDistance=0)
corners_s = np.int0(corners_s)
shitomasi_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for i in corners_s:
x, y = i.ravel()
cv2.circle(shitomasi_img, (x, y), 5, (0, 255, 0), -1)
st.image(shitomasi_img, use_column_width=True,
clamp=True, caption="Shi-Tomasi Corner detection 角点检测")
st.text("Corners detected: {}".format(len(corners_s)))
if __name__ == "__main__":
main()
|
[
"Jesse_Xu@live.com"
] |
Jesse_Xu@live.com
|
67d07065a2938033112c73776b10b9ebf83e424d
|
bdda1ad015be505352a197d4d1066b13826a7a32
|
/core/migrations/0001_initial.py
|
e5fcef9501fd67ee8dedd31b6300c2d330b9d49c
|
[] |
no_license
|
dowoncha/basa-test
|
8700428ea1ec14aa1e3c6f64beadda8b6ba4eab3
|
433477ccc7ab595f5db247792175cc5b88e368cf
|
refs/heads/master
| 2020-12-02T08:14:18.470664
| 2017-07-10T15:15:17
| 2017-07-10T15:15:17
| 96,791,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,618
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-07-10 11:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=200)),
('country', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='artist',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Location'),
),
migrations.AddField(
model_name='artist',
name='tags',
field=models.ManyToManyField(to='core.Tag'),
),
]
|
[
"dowoncha@live.unc.edu"
] |
dowoncha@live.unc.edu
|
b35488ea9f9a8ede3e0dd6a8dfbdd9d30d443b5f
|
19d58444437e2af0f143b77516702b5c2f5e3f61
|
/twin_pixels.py
|
5228cd496513b1632561da7192d46ddf57cd40f5
|
[] |
no_license
|
simozmp/jes_exercises
|
8a8bf6cbecd426720b54a6ed179d807335c02f23
|
c95901171283ed64dcbc76b31fa2233b8a049830
|
refs/heads/master
| 2020-06-14T19:01:27.754651
| 2016-04-15T13:22:47
| 2016-04-15T13:22:47
| 54,203,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,402
|
py
|
def areTwins(pix1, pix2) :
# @param pix1: Pixel;
# @param pix2: Pixel;
# @return boolean; Two pixels are twin when the sum of the pix1
# components is equal to the sum of the pix2 components
return getRed(pix1)+getGreen(pix1)+getBlue(pix1) == getRed(pix2)+getGreen(pix2)+getBlue(pix2)
def haveTwinForEachPix(pic1, pic2) :
# @param pic1: Picture; First picture
# @param pic2: Picture; Second picture
# @return boolean; The function is intended to see if each
# of the pixel's colors in the first picture
# has a respective twin in the second one
for pix1 in getAllPixels(pic1) : # scans each pixel of the first picture
hasTwin = false # flag for pix1 (true when the pix1 pixel has a twin in the second picture)
for pix2 in getAllPixels(pic2) : # scans each pixel of the second picture
if areTwins(pix1,pix2) : # tests if the two colors are twins, in that case:
hasTwin = true # - keep track of the found twin
break # - break the pix2 cycle
if not hasTwin : # if pix1 doesn't have any twin in the second picture ...
return false # ... result is false
return true
## WITH DEBUG MESSAGES
## def haveTwinForEachPix(pic1, pic2) :
## # @param pic1: Picture;
## # @param pic2: Picture;
##
## for pix1 in getAllPixels(pic1) :
## # DEBUG # print "-----Pix1:" + String(getRed(pix1)+getGreen(pix1)+getBlue(pix1))
##
## check = false
##
## for pix2 in getAllPixels(pic2) :
## # DEBUG # print "--Pix2:" + String(getRed(pix2)+getGreen(pix2)+getBlue(pix2))
##
## if areTwins(pix1,pix2) :
## # DEBUG # print "-- twins!"
## check = true
## break
##
## if not check :
## # DEBUG # print "false!!!!"
## return false
##
## return true
#
# I used the two following functions to build test pictures
#
def makeFirstPic() :
pic = makeEmptyPicture(3,3,white)
setColor(getPixelAt(pic,0,0), makeColor(1,2,3)) # r+g+b = 6
setColor(getPixelAt(pic,1,0), makeColor(2,2,3)) # r+g+b = 7
setColor(getPixelAt(pic,2,0), makeColor(3,2,3)) # r+g+b = 8
setColor(getPixelAt(pic,0,1), makeColor(4,2,3)) # r+g+b = 9
setColor(getPixelAt(pic,1,1), makeColor(5,2,3)) # r+g+b = 10
setColor(getPixelAt(pic,2,1), makeColor(6,2,3)) # r+g+b = 11
setColor(getPixelAt(pic,0,2), makeColor(7,2,3)) # r+g+b = 12
setColor(getPixelAt(pic,1,2), makeColor(8,2,3)) # r+g+b = 13
setColor(getPixelAt(pic,2,2), makeColor(9,2,3)) # r+g+b = 14
return pic
def makeSecondPic() :
pic = makeEmptyPicture(3,3,white)
setColor(getPixelAt(pic,0,0), makeColor(1,2,3)) # r+g+b = 6
setColor(getPixelAt(pic,1,0), makeColor(2,2,3)) # r+g+b = 7
setColor(getPixelAt(pic,2,0), makeColor(0,2,3)) # r+g+b = 5
setColor(getPixelAt(pic,0,1), makeColor(4,2,3)) # r+g+b = 9
setColor(getPixelAt(pic,1,1), makeColor(5,2,3)) # r+g+b = 10
setColor(getPixelAt(pic,2,1), makeColor(6,2,3)) # r+g+b = 11
setColor(getPixelAt(pic,0,2), makeColor(7,2,3)) # r+g+b = 12
setColor(getPixelAt(pic,1,2), makeColor(8,2,3)) # r+g+b = 13
setColor(getPixelAt(pic,2,2), makeColor(9,2,3)) # r+g+b = 14
return pic
|
[
"simo.zmp96@gmail.com"
] |
simo.zmp96@gmail.com
|
138f1eaf70bab179317bf09a3c49884ad2530e08
|
772a82205af92d2f2d2b490ac6bc23fdb7456124
|
/algorithm/luogu/P1540.py
|
8486a00f271d8dde0f15acdb3de6cc5abafe6eae
|
[] |
no_license
|
atashi/LLL
|
4f777b3a06c6ed38eab4323d2072dbbec22eee92
|
857b8c7fccfe8216da59228c1cf3675444855673
|
refs/heads/master
| 2021-05-17T10:11:28.946779
| 2019-11-25T15:56:14
| 2019-11-25T15:56:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
m, n = [int(i) for i in input().split()]
nums = [int(i) for i in input().split()]
d = dict()
buffer = []
i = 0
count = 0
for num in nums:
if num not in d:
d[num] = True
count += 1
if len(buffer) < m:
buffer.append(num)
else:
i = i % m
tmp = buffer[i]
buffer[i] = num
del d[tmp]
i += 1
print(count)
|
[
"rebornwwp@gmail.com"
] |
rebornwwp@gmail.com
|
5a819e9002a72dec33dd8777b6a34b3a92f4947c
|
67e55f481808fa307add29e2ad348492deef78a1
|
/03_Code_Forces/05_WordCapitalization.py
|
4eafedc495f12c35499542165902895447547ef1
|
[] |
no_license
|
jpallavi23/Smart-Interviews
|
1dea8d28e6a7152205f2b680a2511824c74465df
|
91f0ff5414af02a49e1a75ef923cd5497e0e4bb8
|
refs/heads/master
| 2023-02-05T15:37:05.364713
| 2020-12-31T13:27:29
| 2020-12-31T13:27:29
| 310,283,727
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
'''
Capitalization is writing a word with its first letter as a capital letter. Your task is to capitalize the given word.
Note, that during capitalization all the letters except the first one remains unchanged.
Input
A single line contains a non-empty word. This word consists of lowercase and uppercase English letters. The length of the word will not exceed 103.
Output
Output the given word after capitalization.
Examples
Input
ApPLe
Output
ApPLe
Input
konjac
Output
Konjac
'''
word = input()
print(*[word[0].upper() + word[1:]])
|
[
"pallavijampala112722@gmail.com"
] |
pallavijampala112722@gmail.com
|
152639cc794a953f8943c70b86d1bfec8a78cb17
|
76d6f6f5e37521827b3d2a6a36bc137103075039
|
/module1-3/randomSearch.py
|
0a4b30d332245175d7dcb867492575b923c701e2
|
[] |
no_license
|
honghuCode/cs231n-2
|
380988b54b54308abd35579dfef5e2f7d7f35784
|
1d5d7a6300f844cb96d5b483474edfd876dea9b0
|
refs/heads/master
| 2021-01-20T06:11:56.053810
| 2017-01-17T13:02:01
| 2017-01-17T13:02:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
#coding=utf-8
import numpy as np
import loss
import load_full_CIFAR10 as load
X_train, Y_train, X_test, Y_test = load.load_CIFAR10('cifar-10-batches-py/')
X_train = X_train.T;
#增加一行,处理偏置值
bias = np.ones((1, X_train.shape[1]))
X_train = np.vstack((X_train, bias))
bestloss = float('inf') #初始化为最大值
for num in xrange(1000):
W = np.random.randn(10, 3073)*0.000001
lossValue = loss.L(X_train, Y_train, W);
print bestloss
if lossValue < bestloss:
bestloss = lossValue;
bestW = W
#print 'in attempt %d the loss was %f, best %f' %(num, lossValue, bestloss)
|
[
"897241687@qq.com"
] |
897241687@qq.com
|
443d3474dda27cf501fe2763686bfdd2ee103c84
|
e15d63ccde04e7458bff5af1bdad63a5c699b489
|
/example/Transformer_vision/face/deep_vit/deepvit.py
|
02c77d650ad0aaa6b6f222c09be856f10291b8a1
|
[
"WTFPL"
] |
permissive
|
ddddwee1/TorchSUL
|
775b6a2b1e4ab7aac25a3f0411de83affc257af5
|
6c7cd41b14fc8b746983e8b981d1ba4d08370ca2
|
refs/heads/master
| 2023-08-21T15:21:24.131718
| 2023-08-18T09:37:56
| 2023-08-18T09:37:56
| 227,628,298
| 13
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,510
|
py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.reattn_weights = nn.Parameter(torch.randn(heads, heads))
self.reattn_norm = nn.Sequential(
Rearrange('b h i j -> b i j h'),
nn.LayerNorm(heads),
Rearrange('b i j h -> b h i j')
)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
# attention
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = dots.softmax(dim=-1)
# re-attention
attn = einsum('b h i j, h g -> b g i j', attn, self.reattn_weights)
attn = self.reattn_norm(attn)
# aggregate and out
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout))),
Residual(PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout)))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x)
x = ff(x)
return x
class DeepViT(nn.Module):
def __init__(self, *, image_size, patch_size, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):
super().__init__()
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
self.pool = pool
# self.to_latent = nn.Identity()
# self.mlp_head = nn.Sequential(
# nn.LayerNorm(dim),
# nn.Linear(dim, num_classes)
# )
def forward(self, img):
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
x = self.transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
# x = self.to_latent(x)
# return self.mlp_head(x)
return x
|
[
"cy960823@outlook.com"
] |
cy960823@outlook.com
|
384216efe4438e75898ed7a6bb898d34f478d199
|
01a58a01045687b0e01f4119eab42c3af364d62f
|
/node_modules/better-sqlite3/build/config.gypi
|
971e9379328fcfcd68e62991d2aba24464230e8f
|
[
"MIT"
] |
permissive
|
Flawani/zrobiehosting
|
ed280422ef55bc7a1ae788097fe44c92822b3c28
|
9613db4e2eeadb005e456d9c67a75bcf40dc6534
|
refs/heads/master
| 2020-12-04T20:00:05.859360
| 2020-01-05T08:15:44
| 2020-01-05T08:15:44
| 231,887,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,613
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"nasm_version": "2.14",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.64",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\Jakub2\\.node-gyp\\10.16.3",
"standalone_static_library": 1,
"access": "",
"allow_same_version": "",
"also": "",
"always_auth": "",
"audit": "true",
"audit_level": "low",
"auth_type": "legacy",
"before": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\Jakub2\\AppData\\Roaming\\npm-cache",
"cache_lock_retries": "10",
"cache_lock_stale": "60000",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cert": "",
"cidr": "",
"color": "true",
"commit_hooks": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"dry_run": "",
"editor": "notepad.exe",
"engine_strict": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_maxtimeout": "60000",
"fetch_retry_mintimeout": "10000",
"force": "",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\Jakub2\\AppData\\Roaming\\npm\\etc\\npmrc",
"globalignorefile": "C:\\Users\\Jakub2\\AppData\\Roaming\\npm\\etc\\npmignore",
"global_style": "",
"group": "",
"ham_it_up": "",
"heading": "npm",
"https_proxy": "",
"if_present": "",
"ignore_prepublish": "",
"ignore_scripts": "",
"init_author_email": "",
"init_author_name": "",
"init_author_url": "",
"init_license": "ISC",
"init_module": "C:\\Users\\Jakub2\\.npm-init.js",
"init_version": "1.0.0",
"json": "",
"key": "",
"legacy_bundling": "",
"link": "",
"local_address": "",
"logs_max": "10",
"long": "",
"maxsockets": "50",
"message": "%s",
"metrics_registry": "https://registry.npmjs.org/",
"node_gyp": "C:\\Program Files\\nodejs\\node_modules\\npm\\node_modules\\node-gyp\\bin\\node-gyp.js",
"node_options": "",
"node_version": "10.16.3",
"noproxy": "",
"offline": "",
"onload_script": "",
"only": "",
"optional": "true",
"otp": "",
"package_lock": "true",
"package_lock_only": "",
"parseable": "",
"prefer_offline": "",
"prefer_online": "",
"prefix": "C:\\Users\\Jakub2\\AppData\\Roaming\\npm",
"preid": "",
"production": "",
"progress": "true",
"read_only": "",
"rebuild_bundle": "true",
"registry": "https://registry.npmjs.org/",
"rollback": "true",
"save": "true",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"save_prod": "",
"scope": "",
"scripts_prepend_node_path": "warn-only",
"script_shell": "",
"searchexclude": "",
"searchlimit": "20",
"searchopts": "",
"searchstaleness": "900",
"send_metrics": "",
"shell": "C:\\WINDOWS\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_commit": "",
"sign_git_tag": "",
"sso_poll_frequency": "500",
"sso_type": "oauth",
"strict_ssl": "true",
"tag": "latest",
"tag_version_prefix": "v",
"timing": "",
"tmp": "C:\\Users\\Jakub2\\AppData\\Local\\Temp",
"umask": "0000",
"unicode": "",
"unsafe_perm": "true",
"update_notifier": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\Jakub2\\.npmrc",
"user_agent": "npm/6.9.0 node/v10.16.3 win32 x64",
"version": "",
"versions": "",
"viewer": "browser"
}
}
|
[
"46850608+Flawani@users.noreply.github.com"
] |
46850608+Flawani@users.noreply.github.com
|
cf9ef7169a33bdadee7fd2ebbce7fd0ea9facd71
|
fb17d7a0754af8037f93382ffa90fb5374ba467e
|
/maboss/webx/test01.py
|
dc178e4c1d744bb60a3e764743dd0519daab850a
|
[
"MIT"
] |
permissive
|
mabotech/maboss.py
|
becfd68c05c70ea214ce381d2d3b0e89fac8bb24
|
30c919d09c0a290772dde2f913c51843f0f88e26
|
refs/heads/master
| 2016-09-10T10:53:00.910707
| 2014-07-05T12:18:20
| 2014-07-05T12:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
# -*- coding: utf-8 -*-
import os
import logging
import logging.handlers
import logging.config
#import profile
import time
from time import strftime, localtime
from datetime import datetime
from Cheetah.Template import Template
from mabolab.core.base import Base
from flask.config import Config
settings = Config("" )
settings.from_pyfile( 'C:/MTP/mabotech/maboss1.2/maboss/configuration/central_config.py')
settings['APP_NAME'] = "mabozen"
base = Base( settings)
db = base.get_db("oracle")
from models.label.workstation_printer import WorkstationPrinter
def create():
workstation = 'workstation'
printer_name = 'printer_name'
ip_address = '192.168.100.106'
spool_address = 'spool_address'
template_path = 'template_path'
try:
wsp = WorkstationPrinter(workstation, printer_name, ip_address, spool_address, template_path, 'MT')
db.session.add(wsp)
db.session.commit()
status = wsp.id
except Exception, e:
db.session.rollback()
status = "error"
raise(Exception(e.message))
return status
def query(id):
wsp = db.session.query(WorkstationPrinter).filter_by(id=id).first()
wsp.ip_address='192.168.1.21'
wsp.lastupdateon = datetime.now()
db.session.add(wsp)
db.session.commit()
print dir(db.session)
return wsp
if __name__ == '__main__':
#s = create()
s = query(5)
print s
|
[
"aidear@163.com"
] |
aidear@163.com
|
9ce2baaeaa600af666164ded7fd55f0b40b30555
|
191863e86029b0e5866bf810ec7bf209fce88b6f
|
/sagemaker-lineage/visualizer.py
|
0412248090f8a4fc4a26fc394cf295e3d18a7fcf
|
[
"Apache-2.0"
] |
permissive
|
EmilyWebber/amazon-sagemaker-examples
|
237efa2211a5d26854ece7da2356d627d4f5f225
|
fb5a8ba5298bb7879ee9b8c10cb0842ac078b84b
|
refs/heads/main
| 2022-04-06T10:29:56.728806
| 2022-04-05T14:54:25
| 2022-04-05T14:54:25
| 331,713,808
| 3
| 3
|
Apache-2.0
| 2021-01-21T18:13:20
| 2021-01-21T18:13:19
| null |
UTF-8
|
Python
| false
| false
| 2,017
|
py
|
from pyvis.network import Network
import os
class Visualizer:
def __init__(self):
self.directory = "generated"
if not os.path.exists(self.directory):
os.makedirs(self.directory)
def render(self, query_lineage_response, scenario_name):
net = self.get_network()
for vertex in query_lineage_response["Vertices"]:
arn = vertex["Arn"]
if "Type" in vertex:
label = vertex["Type"]
else:
label = None
lineage_type = vertex["LineageType"]
name = self.get_name(arn)
title = self.get_title(arn, label, lineage_type)
net.add_node(vertex["Arn"], label=name, title=title, shape="ellipse")
for edge in query_lineage_response["Edges"]:
source = edge["SourceArn"]
dest = edge["DestinationArn"]
net.add_edge(dest, source)
return net.show(f"{self.directory}/{scenario_name}.html")
def get_title(self, arn, label, lineage_type):
return f"Arn: {arn}\nType: {label}\nLineage Type: {lineage_type}"
def get_name(self, arn):
name = arn.split("/")[1]
return name
def get_network(self):
net = Network(height="400px", width="800px", directed=True, notebook=True)
net.set_options(
"""
var options = {
"nodes": {
"borderWidth": 3,
"shadow": {
"enabled": true
},
"shapeProperties": {
"borderRadius": 3
},
"size": 11,
"shape": "circle"
},
"edges": {
"arrows": {
"to": {
"enabled": true
}
},
"color": {
"inherit": true
},
"smooth": false
},
"layout": {
"hierarchical": {
"enabled": true,
"direction": "LR",
"sortMethod": "directed"
}
},
"physics": {
"hierarchicalRepulsion": {
"centralGravity": 0
},
"minVelocity": 0.75,
"solver": "hierarchicalRepulsion"
}
}
"""
)
return net
|
[
"jeniya.tabassum@gmail.com"
] |
jeniya.tabassum@gmail.com
|
c00f4e57fb45e35989e16e53a973497b38a0758e
|
d264958440e5963b9794d60f1b4c6f68584d4b9d
|
/test_calculator.py
|
e1cc546fe9ae9c873ce49b40cc3d01da9cc74f23
|
[] |
no_license
|
rongzhen-chen/CalculatorLibrary
|
bc4780427dae2e579b99eb418587160ea568b12e
|
2ddaedbf41ce01bac50d7d4eb19f24bbf9e026ed
|
refs/heads/main
| 2023-01-22T22:00:46.467272
| 2020-11-23T10:45:52
| 2020-11-23T10:45:52
| 315,268,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
"""
Unit tests for the calculator library
"""
import calculator
class TestCalculator:
def test_addition(self):
assert 4 == calculator.add(2, 2)
def test_subtraction(self):
assert 2 == calculator.subtract(4, 2)
def test_multiplication(self):
assert 100 == calculator.multiply(10, 10)
|
[
"rongzhen.chen@ericsson.com"
] |
rongzhen.chen@ericsson.com
|
8950e2714021112a21c867cc6a8e8eef361a97e4
|
00cacf1a24b33e622c90ebd37d3f8e7ce291c7ba
|
/code/gen_fewrel_dataset_roberta_task_finetune_HEADandTAIL_updateRep_bottomANDtop_classifier_batch_AllLabel.py
|
f4e745f7c1d247f2935354111671dd5a0e391d76
|
[] |
no_license
|
riyadhctg/CSS-LM
|
a1e94001713e7014e9eeceea186e87ce18fa5f3e
|
fbe6b974847f54307ee921f897e8e6e7a650326e
|
refs/heads/main
| 2023-07-05T14:16:30.244804
| 2021-08-13T04:02:37
| 2021-08-13T04:02:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53,095
|
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import json
import time
from torch.autograd import Variable
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import RobertaTokenizer, RobertaForMaskedLM, RobertaForSequenceClassification
#from transformers.modeling_roberta import RobertaForMaskedLMDomainTask
from transformers.modeling_roberta_updateRep import RobertaForMaskedLMDomainTask
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def return_Classifier(weight, bias, dim_in, dim_out):
classifier = torch.nn.Linear(dim_in, dim_out , bias=True)
###
classifier.weight.data = weight.to("cpu")
classifier.bias.data = bias.to("cpu")
'''
device="cuda"
classifier.weight.data = weight.to(device)
classifier.bias.data = bias.to(device)
'''
###
classifier.requires_grad=False
return classifier
def in_Domain_Task_Data_binary(data_dir_indomain, tokenizer, max_seq_length):
###Open
with open(data_dir_indomain+"train.json") as file:
data = json.load(file)
###Preprocess
num_label_list = list()
label_sentence_dict = dict()
for line in data:
#line["sentence"]
#line["aspect"]
#line["sentiment"]
num_label_list.append(line["label"])
try:
label_sentence_dict[line["label"]].append([line["sentence"]])
except:
label_sentence_dict[line["label"]] = [line["sentence"]]
num_label = sorted(list(set(num_label_list)))
label_map = {label : i for i , label in enumerate(num_label)}
###Create data: 1 choosed data along with the rest of 7 class data
all_cur_tensors = list()
for line in data:
#line["sentence"]
#line["aspect"]
#line["sentiment"]
sentence = line["sentence"]
label = line["label"]
sentence_out = [(random.choice(label_sentence_dict[label_out])[0], label_out) for label_out in num_label if label_out!=label]
all_sentence = [(sentence, label)] + sentence_out #1st sentence is choosed
all_input_ids = list()
all_input_mask = list()
all_segment_ids = list()
all_lm_labels_ids = list()
all_is_next = list()
all_tail_idxs = list()
all_sentence_labels = list()
for id, sentence_label in enumerate(all_sentence):
#tokens_a = tokenizer.tokenize(sentence_label[0])
tokens_a = tokenizer.tokenize(sentence_label[0])
'''
if "</s>" in tokens_a:
print("Have more than 1 </s>")
for i in range(len(tokens_a)):
if tokens_a[i] == "</s>":
tokens_a[i] = "s"
'''
# tokenize
cur_example = InputExample(guid=id, tokens_a=tokens_a, tokens_b=None, is_next=0)
# transform sample to features
cur_features = convert_example_to_features(cur_example, max_seq_length, tokenizer)
all_input_ids.append(torch.tensor(cur_features.input_ids))
all_input_ids_org.append(torch.tensor(cur_features.input_ids_org))
all_input_mask.append(torch.tensor(cur_features.input_mask))
all_segment_ids.append(torch.tensor(cur_features.segment_ids))
all_lm_labels_ids.append(torch.tensor(cur_features.lm_label_ids))
all_is_next.append(torch.tensor(0))
all_tail_idxs.append(torch.tensor(cur_features.tail_idxs))
all_sentence_labels.append(torch.tensor(label_map[sentence_label[1]]))
cur_tensors = (torch.stack(all_input_ids),
torch.stack(all_input_ids_org),
torch.stack(all_input_mask),
torch.stack(all_segment_ids),
torch.stack(all_lm_labels_ids),
torch.stack(all_is_next),
torch.stack(all_tail_idxs),
torch.stack(all_sentence_labels))
all_cur_tensors.append(cur_tensors)
return all_cur_tensors
def in_Domain_Task_Data_mutiple(data_dir_indomain, tokenizer, max_seq_length):
###Open
with open(data_dir_indomain+"train.json") as file:
data = json.load(file)
###Preprocess
num_label_list = list()
label_sentence_dict = dict()
num_sentiment_label_list = list()
sentiment_label_dict = dict()
for line in data:
#line["sentence"]
#line["aspect"]
#line["sentiment"]
num_sentiment_label_list.append(line["label"])
num_label_list.append(line["label"])
num_label = sorted(list(set(num_label_list)))
label_map = {label : i for i , label in enumerate(num_label)}
num_sentiment_label = sorted(list(set(num_sentiment_label_list)))
sentiment_label_map = {label : i for i , label in enumerate(num_sentiment_label)}
print("=======")
print("label_map:")
print(label_map)
print("=======")
print("=======")
print("sentiment_label_map:")
print(sentiment_label_map)
print("=======")
###Create data: 1 choosed data along with the rest of 7 class data
cur_tensors_list = list()
#print(list(label_map.values()))
candidate_label_list = list(label_map.values())
candidate_sentiment_label_list = list(sentiment_label_map.values())
all_type_sentence = [0]*len(candidate_label_list)
all_type_sentiment_sentence = [0]*len(candidate_sentiment_label_list)
for line in data:
#line["sentence"]
#line["aspect"]
sentiment = line["label"]
sentence = line["tokens"]
label = line["label"]
tokens_a = tokenizer.tokenize(sentence)
#input_ids = tokenizer.encode(sentence, add_special_tokens=False)
# tokenize
cur_example = InputExample(guid=id, tokens_a=tokens_a, tokens_b=None, is_next=0)
# transform sample to features
cur_features = convert_example_to_features(cur_example, max_seq_length, tokenizer)
cur_tensors = (torch.tensor(cur_features.input_ids),
torch.tensor(cur_features.input_ids_org),
torch.tensor(cur_features.input_mask),
torch.tensor(cur_features.segment_ids),
torch.tensor(cur_features.lm_label_ids),
torch.tensor(0),
torch.tensor(cur_features.tail_idxs),
torch.tensor(label_map[label]),
torch.tensor(sentiment_label_map[sentiment]))
cur_tensors_list.append(cur_tensors)
###
if label_map[label] in candidate_label_list:
all_type_sentence[label_map[label]]=cur_tensors
candidate_label_list.remove(label_map[label])
if sentiment_label_map[sentiment] in candidate_sentiment_label_list:
all_type_sentiment_sentence[sentiment_label_map[sentiment]]=cur_tensors
candidate_sentiment_label_list.remove(sentiment_label_map[sentiment])
###
return all_type_sentiment_sentence, cur_tensors_list
def load_outdomain(data_dir_outdomain, tokenizer, max_seq_length):
###Open
doc_line = list()
with open(data_dir_outdomain+"train.txt") as file:
#data = json.load(file)
for i,line in enumerate(file):
doc_line.append(line)
###Preprocess
cur_tensors_list=list()
for i, line in enumerate(doc_line):
tokens_a = tokenizer.tokenize(line)
# tokenize
cur_example = InputExample(guid=id, tokens_a=tokens_a, tokens_b=None, is_next=0)
# transform sample to features
cur_features = convert_example_to_features(cur_example, max_seq_length, tokenizer)
cur_tensors = (torch.tensor(cur_features.input_ids),
torch.tensor(cur_features.input_ids_org),
torch.tensor(cur_features.input_mask),
torch.tensor(cur_features.segment_ids),
torch.tensor(cur_features.lm_label_ids),
torch.tensor(0),
torch.tensor(cur_features.tail_idxs),
torch.tensor(0),
torch.tensor(0))
cur_tensors_list.append(cur_tensors)
return cur_tensors_list
def in_Domain_Task_Data_binary(data_dir_indomain, tokenizer, max_seq_length):
###Open
with open(data_dir_indomain+"train.json") as file:
data = json.load(file)
###Preprocess
num_label_list = list()
label_sentence_dict = dict()
for line in data:
#line["sentence"]
#line["aspect"]
#line["sentiment"]
num_label_list.append(line["label"])
try:
label_sentence_dict[line["label"]].append([line["sentence"]])
except:
label_sentence_dict[line["label"]] = [line["sentence"]]
num_label = sorted(list(set(num_label_list)))
label_map = {label : i for i , label in enumerate(num_label)}
###Create data: 1 choosed data along with the rest of 7 class data
all_cur_tensors = list()
for line in data:
#line["sentence"]
#line["aspect"]
#line["sentiment"]
sentence = line["sentence"]
label = line["aspect"]
sentence_out = [(random.choice(label_sentence_dict[label_out])[0], label_out) for label_out in num_label if label_out!=label]
all_sentence = [(sentence, label)] + sentence_out #1st sentence is choosed
all_input_ids = list()
all_input_mask = list()
all_segment_ids = list()
all_lm_labels_ids = list()
all_is_next = list()
all_tail_idxs = list()
all_sentence_labels = list()
for id, sentence_label in enumerate(all_sentence):
#tokens_a = tokenizer.tokenize(sentence_label[0])
tokens_a = tokenizer.tokenize(sentence_label[0])
'''
if "</s>" in tokens_a:
print("Have more than 1 </s>")
for i in range(len(tokens_a)):
if tokens_a[i] == "</s>":
tokens_a[i] = "s"
'''
# tokenize
cur_example = InputExample(guid=id, tokens_a=tokens_a, tokens_b=None, is_next=0)
# transform sample to features
cur_features = convert_example_to_features(cur_example, max_seq_length, tokenizer)
all_input_ids.append(torch.tensor(cur_features.input_ids))
all_input_ids_org.append(torch.tensor(cur_features.input_ids_org))
all_input_mask.append(torch.tensor(cur_features.input_mask))
all_segment_ids.append(torch.tensor(cur_features.segment_ids))
all_lm_labels_ids.append(torch.tensor(cur_features.lm_label_ids))
all_is_next.append(torch.tensor(0))
all_tail_idxs.append(torch.tensor(cur_features.tail_idxs))
all_sentence_labels.append(torch.tensor(label_map[sentence_label[1]]))
cur_tensors = (torch.stack(all_input_ids),
torch.stack(all_input_ids_org),
torch.stack(all_input_mask),
torch.stack(all_segment_ids),
torch.stack(all_lm_labels_ids),
torch.stack(all_is_next),
torch.stack(all_tail_idxs),
torch.stack(all_sentence_labels))
all_cur_tensors.append(cur_tensors)
return all_cur_tensors
def AugmentationData_Domain(top_k, tokenizer, max_seq_length):
#top_k_shape = top_k.indices.shape
#ids = top_k.indices.reshape(top_k_shape[0]*top_k_shape[1]).tolist()
top_k_shape = top_k["indices"].shape
ids = top_k["indices"].reshape(top_k_shape[0]*top_k_shape[1]).tolist()
all_input_ids = list()
all_input_ids_org = list()
all_input_mask = list()
all_segment_ids = list()
all_lm_labels_ids = list()
all_is_next = list()
all_tail_idxs = list()
for id, i in enumerate(ids):
t1 = data[str(i)]['sentence']
#tokens_a = tokenizer.tokenize(t1)
tokens_a = tokenizer.tokenize(t1)
'''
if "</s>" in tokens_a:
print("Have more than 1 </s>")
#tokens_a[tokens_a.index("<s>")] = "s"
for i in range(len(tokens_a)):
if tokens_a[i] == "</s>":
tokens_a[i] = "s"
'''
# tokenize
cur_example = InputExample(guid=id, tokens_a=tokens_a, tokens_b=None, is_next=0)
# transform sample to features
cur_features = convert_example_to_features(cur_example, max_seq_length, tokenizer)
all_input_ids.append(torch.tensor(cur_features.input_ids))
all_input_ids_org.append(torch.tensor(cur_features.input_ids_org))
all_input_mask.append(torch.tensor(cur_features.input_mask))
all_segment_ids.append(torch.tensor(cur_features.segment_ids))
all_lm_labels_ids.append(torch.tensor(cur_features.lm_label_ids))
all_is_next.append(torch.tensor(0))
all_tail_idxs.append(torch.tensor(cur_features.tail_idxs))
cur_tensors = (torch.stack(all_input_ids),
torch.stack(all_input_ids_org),
torch.stack(all_input_mask),
torch.stack(all_segment_ids),
torch.stack(all_lm_labels_ids),
torch.stack(all_is_next),
torch.stack(all_tail_idxs))
return cur_tensors
def AugmentationData_Task(top_k, tokenizer, max_seq_length, add_org=None):
top_k_shape = top_k["indices"].shape
sentence_ids = top_k["indices"]
all_input_ids = list()
all_input_ids_org = list()
all_input_mask = list()
all_segment_ids = list()
all_lm_labels_ids = list()
all_is_next = list()
all_tail_idxs = list()
all_sentence_labels = list()
all_sentiment_labels = list()
add_org = tuple(t.to('cpu') for t in add_org)
#input_ids_, input_ids_org_, input_mask_, segment_ids_, lm_label_ids_, is_next_, tail_idxs_, sentence_label_ = add_org
input_ids_, input_ids_org_, input_mask_, segment_ids_, lm_label_ids_, is_next_, tail_idxs_, sentence_label_, sentiment_label_ = add_org
for id_1, sent in enumerate(sentence_ids):
for id_2, sent_id in enumerate(sent):
t1 = data[str(int(sent_id))]['sentence']
tokens_a = tokenizer.tokenize(t1)
# tokenize
cur_example = InputExample(guid=id, tokens_a=tokens_a, tokens_b=None, is_next=0)
# transform sample to features
cur_features = convert_example_to_features(cur_example, max_seq_length, tokenizer)
all_input_ids.append(torch.tensor(cur_features.input_ids))
all_input_ids_org.append(torch.tensor(cur_features.input_ids_org))
all_input_mask.append(torch.tensor(cur_features.input_mask))
all_segment_ids.append(torch.tensor(cur_features.segment_ids))
all_lm_labels_ids.append(torch.tensor(cur_features.lm_label_ids))
all_is_next.append(torch.tensor(0))
all_tail_idxs.append(torch.tensor(cur_features.tail_idxs))
all_sentence_labels.append(torch.tensor(sentence_label_[id_1]))
all_sentiment_labels.append(torch.tensor(sentiment_label_[id_1]))
all_input_ids.append(input_ids_[id_1])
all_input_ids_org.append(input_ids_org_[id_1])
all_input_mask.append(input_mask_[id_1])
all_segment_ids.append(segment_ids_[id_1])
all_lm_labels_ids.append(lm_label_ids_[id_1])
all_is_next.append(is_next_[id_1])
all_tail_idxs.append(tail_idxs_[id_1])
all_sentence_labels.append(sentence_label_[id_1])
all_sentiment_labels.append(sentiment_label_[id_1])
cur_tensors = (torch.stack(all_input_ids),
torch.stack(all_input_ids_org),
torch.stack(all_input_mask),
torch.stack(all_segment_ids),
torch.stack(all_lm_labels_ids),
torch.stack(all_is_next),
torch.stack(all_tail_idxs),
torch.stack(all_sentence_labels),
torch.stack(all_sentiment_labels)
)
return cur_tensors
def AugmentationData_Task_pos_and_neg(top_k=None, tokenizer=None, max_seq_length=None, add_org=None, in_task_rep=None):
input_ids_, input_ids_org_, input_mask_, segment_ids_, lm_label_ids_, is_next_, tail_idxs_, sentence_label_, sentiment_label_ = add_org
all_sentence_binary_label = list()
all_in_task_rep_comb = list()
for id_1, num in enumerate(sentence_label_):
sentence_label_int = (sentence_label_==num).to(torch.long)
in_task_rep_append = in_task_rep[id_1].unsqueeze(0).expand(in_task_rep.shape[0],-1)
in_task_rep_comb = torch.cat((in_task_rep_append,in_task_rep),-1)
all_sentence_binary_label.append(sentence_label_int)
all_in_task_rep_comb.append(in_task_rep_comb)
all_sentence_binary_label = torch.stack(all_sentence_binary_label)
all_in_task_rep_comb = torch.stack(all_in_task_rep_comb)
cur_tensors = (all_in_task_rep_comb, all_sentence_binary_label)
return cur_tensors
class Dataset_noNext(Dataset):
def __init__(self, corpus_path, tokenizer, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True):
self.vocab_size = tokenizer.vocab_size
self.tokenizer = tokenizer
self.seq_len = seq_len
self.on_memory = on_memory
self.corpus_lines = corpus_lines # number of non-empty lines in input corpus
self.corpus_path = corpus_path
self.encoding = encoding
self.current_doc = 0 # to avoid random sentence from same doc
# for loading samples directly from file
self.sample_counter = 0 # used to keep track of full epochs on file
self.line_buffer = None # keep second sentence of a pair in memory and use as first sentence in next pair
# for loading samples in memory
self.current_random_doc = 0
self.num_docs = 0
self.sample_to_doc = [] # map sample index to doc and line
# load samples into memory
if on_memory:
self.all_docs = []
doc = []
self.corpus_lines = 0
with open(corpus_path, "r", encoding=encoding) as f:
for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
line = line.strip()
if line == "":
self.all_docs.append(doc)
doc = []
#remove last added sample because there won't be a subsequent line anymore in the doc
self.sample_to_doc.pop()
else:
#store as one sample
sample = {"doc_id": len(self.all_docs),
"line": len(doc)}
self.sample_to_doc.append(sample)
doc.append(line)
self.corpus_lines = self.corpus_lines + 1
# if last row in file is not empty
if self.all_docs[-1] != doc:
self.all_docs.append(doc)
self.sample_to_doc.pop()
self.num_docs = len(self.all_docs)
# load samples later lazily from disk
else:
if self.corpus_lines is None:
with open(corpus_path, "r", encoding=encoding) as f:
self.corpus_lines = 0
for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
'''
if line.strip() == "":
self.num_docs += 1
else:
'''
if line.strip() == "":
continue
self.corpus_lines += 1
# if doc does not end with empty line
if line.strip() != "":
self.num_docs += 1
self.file = open(corpus_path, "r", encoding=encoding)
self.random_file = open(corpus_path, "r", encoding=encoding)
def __len__(self):
# last line of doc won't be used, because there's no "nextSentence". Additionally, we start counting at 0.
#print(self.corpus_lines)
#print(self.num_docs)
#return self.corpus_lines - self.num_docs - 1
return self.corpus_lines - self.num_docs
def __getitem__(self, item):
cur_id = self.sample_counter
self.sample_counter += 1
if not self.on_memory:
# after one epoch we start again from beginning of file
if cur_id != 0 and (cur_id % len(self) == 0):
self.file.close()
self.file = open(self.corpus_path, "r", encoding=self.encoding)
#t1, t2, is_next_label = self.random_sent(item)
t1, is_next_label = self.random_sent(item)
if is_next_label == None:
is_next_label = 0
#tokens_a = self.tokenizer.tokenize(t1)
tokens_a = self.tokenizer.tokenize(t1)
#tokens_b = self.tokenizer.tokenize(t2)
# tokenize
cur_example = InputExample(guid=cur_id, tokens_a=tokens_a, tokens_b=None, is_next=is_next_label)
# transform sample to features
cur_features = convert_example_to_features(cur_example, self.seq_len, self.tokenizer)
cur_tensors = (torch.tensor(cur_features.input_ids),
torch.tensor(cur_features.input_ids_org),
torch.tensor(cur_features.input_mask),
torch.tensor(cur_features.segment_ids),
torch.tensor(cur_features.lm_label_ids),
torch.tensor(0),
torch.tensor(cur_features.tail_idxs),
torch.tensor(0),
torch.tensor(0))
'''
cur_tensors = (torch.tensor(cur_features.input_ids),
torch.tensor(cur_features.input_ids_org),
torch.tensor(cur_features.input_mask),
torch.tensor(cur_features.segment_ids),
torch.tensor(cur_features.lm_label_ids),
torch.tensor(cur_features.is_next),
torch.tensor(cur_features.tail_idxs))
'''
return cur_tensors
def random_sent(self, index):
"""
Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
from one doc. With 50% the second sentence will be a random one from another doc.
:param index: int, index of sample.
:return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
"""
t1, t2 = self.get_corpus_line(index)
return t1, None
def get_corpus_line(self, item):
"""
Get one sample from corpus consisting of a pair of two subsequent lines from the same doc.
:param item: int, index of sample.
:return: (str, str), two subsequent sentences from corpus
"""
t1 = ""
t2 = ""
assert item < self.corpus_lines
if self.on_memory:
sample = self.sample_to_doc[item]
t1 = self.all_docs[sample["doc_id"]][sample["line"]]
# used later to avoid random nextSentence from same doc
self.current_doc = sample["doc_id"]
return t1, t2
#return t1
else:
if self.line_buffer is None:
# read first non-empty line of file
while t1 == "" :
t1 = next(self.file).strip()
else:
# use t2 from previous iteration as new t1
t1 = self.line_buffer
# skip empty rows that are used for separating documents and keep track of current doc id
while t1 == "":
t1 = next(self.file).strip()
self.current_doc = self.current_doc+1
self.line_buffer = next(self.file).strip()
assert t1 != ""
return t1, t2
def get_random_line(self):
"""
Get random line from another document for nextSentence task.
:return: str, content of one line
"""
# Similar to original tf repo: This outer loop should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document we're processing.
for _ in range(10):
if self.on_memory:
rand_doc_idx = random.randint(0, len(self.all_docs)-1)
rand_doc = self.all_docs[rand_doc_idx]
line = rand_doc[random.randrange(len(rand_doc))]
else:
rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)
#pick random line
for _ in range(rand_index):
line = self.get_next_line()
#check if our picked random line is really from another doc like we want it to be
if self.current_random_doc != self.current_doc:
break
return line
def get_next_line(self):
""" Gets next line of random_file and starts over when reaching end of file"""
try:
line = next(self.random_file).strip()
#keep track of which document we are currently looking at to later avoid having the same doc as t1
if line == "":
self.current_random_doc = self.current_random_doc + 1
line = next(self.random_file).strip()
except StopIteration:
self.random_file.close()
self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
line = next(self.random_file).strip()
return line
class InputExample(object):
"""A single training/test example for the language model."""
def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
tokens_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
tokens_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.tokens_a = tokens_a
self.tokens_b = tokens_b
self.is_next = is_next # nextSentence
self.lm_labels = lm_labels # masked words for language model
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_ids_org, input_mask, segment_ids, is_next, lm_label_ids, tail_idxs):
self.input_ids = input_ids
self.input_ids_org = input_ids_org
self.input_mask = input_mask
self.segment_ids = segment_ids
self.is_next = is_next
self.lm_label_ids = lm_label_ids
self.tail_idxs = tail_idxs
def random_word(tokens, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
#candidate_id = random.randint(0,tokenizer.vocab_size)
#print(tokenizer.convert_ids_to_tokens(candidate_id))
# 80% randomly change token to mask token
if prob < 0.8:
#tokens[i] = "[MASK]"
tokens[i] = "<mask>"
# 10% randomly change token to random token
elif prob < 0.9:
#tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
#tokens[i] = tokenizer.convert_ids_to_tokens(candidate_id)
candidate_id = random.randint(0,tokenizer.vocab_size)
w = tokenizer.convert_ids_to_tokens(candidate_id)
'''
if tokens[i] == None:
candidate_id = 100
w = tokenizer.convert_ids_to_tokens(candidate_id)
'''
tokens[i] = w
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
#output_label.append(tokenizer.vocab[token])
w = tokenizer.convert_tokens_to_ids(token)
if w!= None:
output_label.append(w)
else:
print("Have no this tokens in ids")
exit()
except KeyError:
# For unknown words (should not occur with BPE vocab)
#output_label.append(tokenizer.vocab["<unk>"])
w = tokenizer.convert_tokens_to_ids("<unk>")
output_label.append(w)
logger.warning("Cannot find token '{}' in vocab. Using <unk> insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
def convert_example_to_features(example, max_seq_length, tokenizer):
#now tokens_a is input_ids
tokens_a = example.tokens_a
tokens_b = example.tokens_b
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
#_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 2)
#print(tokens_a)
tokens_a_org = tokens_a.copy()
tokens_a, t1_label = random_word(tokens_a, tokenizer)
lm_label_ids = ([-1] + t1_label + [-1])
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
tokens_org = []
segment_ids = []
tokens.append("<s>")
tokens_org.append("<s>")
segment_ids.append(0)
for i, token in enumerate(tokens_a):
if token!="</s>":
tokens.append(tokens_a[i])
tokens_org.append(tokens_a_org[i])
segment_ids.append(0)
else:
tokens.append("s")
tokens_org.append("s")
segment_ids.append(0)
tokens.append("</s>")
tokens_org.append("</s>")
segment_ids.append(0)
#tokens.append("[SEP]")
#segment_ids.append(1)
#input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = tokenizer.encode(tokens, add_special_tokens=False)
input_ids_org = tokenizer.encode(tokens_org, add_special_tokens=False)
tail_idxs = len(input_ids)+1
#print(input_ids)
input_ids = [w if w!=None else 0 for w in input_ids]
input_ids_org = [w if w!=None else 0 for w in input_ids_org]
#print(input_ids)
#exit()
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
pad_id = tokenizer.convert_tokens_to_ids("<pad>")
#<pad>:1
while len(input_ids) < max_seq_length:
input_ids.append(pad_id)
input_ids_org.append(pad_id)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
assert len(input_ids) == max_seq_length
assert len(input_ids_org) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
features = InputFeatures(input_ids=input_ids,
input_ids_org = input_ids_org,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
is_next=example.is_next,
tail_idxs=tail_idxs)
return features
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir_indomain",
default=None,
type=str,
required=True,
help="The input train corpus.(In Domain)")
parser.add_argument("--data_dir_outdomain",
default=None,
type=str,
required=True,
help="The input train corpus.(Out Domain)")
parser.add_argument("--pretrain_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
parser.add_argument("--augment_times",
default=None,
type=int,
required=True,
help="Default batch_size/augment_times to save model")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--on_memory",
action='store_true',
help="Whether to load train samples into memory or use disk")
parser.add_argument("--do_lower_case",
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type = float, default = 0,
help = "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
####
parser.add_argument("--num_labels_task",
default=None, type=int,
required=True,
help="num_labels_task")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm",
default=1.0,
type=float,
help="Max gradient norm.")
parser.add_argument('--fp16_opt_level',
type=str,
default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--task",
default=None,
type=int,
required=True,
help="Choose Task")
parser.add_argument("--K",
default=None,
type=int,
required=True,
help="K size")
####
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
#tokenizer = RobertaTokenizer.from_pretrained(args.pretrain_model, do_lower_case=args.do_lower_case)
tokenizer = RobertaTokenizer.from_pretrained(args.pretrain_model)
# Prepare model
model = RobertaForMaskedLMDomainTask.from_pretrained(args.pretrain_model, output_hidden_states=True, return_dict=True, num_labels=args.num_labels_task)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
###Generate Docs###
docs_head = list()
docs_tail = list()
if True:
print("Loading Train Dataset", args.data_dir_outdomain)
train_dataset = Dataset_noNext(args.data_dir_outdomain, tokenizer, seq_len=args.max_seq_length, corpus_lines=None, on_memory=args.on_memory)
#train_dataset = load_outdomain(args.data_dir_outdomain, tokenizer, args.max_seq_length)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", args.train_batch_size)
#logger.info(" Num steps = %d", num_train_optimization_steps)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
model.eval()
#result = 0
counter_domain = 0
counter_task = 0
id_doc = dict()
id_doc_tensor = list()
rest_head = list()
rest_tail = list()
rest_input = list()
for step, batch_ in enumerate(tqdm(train_dataloader, desc="Iteration")):
#print(step)
###Normal mode
batch_ = tuple(t.to(device) for t in batch_)
input_ids_, input_ids_org_, input_mask_, segment_ids_, lm_label_ids_, is_next_, tail_idxs_, sentence_label_, sentiment_label_ = batch_
#Generate query representation
with torch.no_grad():
in_domain_rep, in_task_rep = model(input_ids_org=input_ids_org_, tail_idxs=tail_idxs_, attention_mask=input_mask_, func="in_domain_task_rep")
###
in_domain_rep = in_domain_rep.to("cpu")
in_task_rep = in_domain_rep.to("cpu")
###
#####
#####
if step==0:
docs_tail.append(in_domain_rep)
docs_head.append(in_task_rep)
id_doc_tensor.append(input_ids_org_)
else:
if docs_tail[-1].shape[0] == in_domain_rep.shape[0]:
docs_tail.append(in_domain_rep)
docs_head.append(in_task_rep)
id_doc_tensor.append(input_ids_org_)
else:
rest_tail = in_domain_rep
rest_head = in_task_rep
rest_input = input_ids_org_
counter_domain += int(in_domain_rep.shape[0])
counter_task += int(in_task_rep.shape[0])
if counter_domain!=counter_task:
print("Error")
exit()
'''
docs_tail = torch.stack(docs_tail).unsqueeze(1)
docs_head = torch.stack(docs_head).unsqueeze(1)
'''
###
docs_tail = torch.stack(docs_tail)
docs_tail = docs_tail.reshape(docs_tail.shape[0]*docs_tail.shape[1],1,docs_tail.shape[-1])
try:
rest_tail = rest_tail.reshape(rest_tail.shape[0],1,rest_tail.shape[-1])
docs_tail = torch.cat([docs_tail,rest_tail],0)
except:
pass
docs_head = torch.stack(docs_head)
docs_head = docs_head.reshape(docs_head.shape[0]*docs_head.shape[1],1,docs_head.shape[-1])
try:
rest_head = rest_head.reshape(rest_head.shape[0],1,rest_head.shape[-1])
docs_head = torch.cat([docs_head,rest_head],0)
except:
pass
id_doc_tensor = torch.stack(id_doc_tensor)
id_doc_tensor = id_doc_tensor.reshape(id_doc_tensor.shape[0]*id_doc_tensor.shape[1],id_doc_tensor.shape[-1])
try:
rest_input = rest_input.reshape(rest_input.shape[0],rest_input.shape[-1])
id_doc_tensor = torch.cat([id_doc_tensor,rest_input],0)
except:
pass
###
print(docs_head.shape)
print(docs_tail.shape)
print(id_doc_tensor.shape)
#exit()
###
docs_tail = docs_tail.to("cpu")
docs_head = docs_head.to("cpu")
id_doc_tensor = id_doc_tensor.to("cpu")
'''
docs_tail = docs_tail.to(device)
docs_head = docs_head.to(device)
id_doc_tensor = id_doc_tensor.to(device)
'''
###
###Retrive: Caculate || p_n and bottom_n###
###
total_score = torch.zeros([args.train_batch_size,docs_head.shape[0]]).to("cpu")
'''
total_score = torch.zeros([args.train_batch_size,docs_head.shape[0]]).to(device)
'''
###
if True:
print("Loading Train Dataset", args.data_dir_indomain)
#train_dataset = load_outdomain(args.data_dir_indomain, tokenizer, args.max_seq_length)
all_type_sentence, train_dataset = in_Domain_Task_Data_mutiple(args.data_dir_indomain, tokenizer, args.max_seq_length)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", args.train_batch_size)
#logger.info(" Num steps = %d", num_train_optimization_steps)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
model.eval()
if int(args.K) > int(docs_head.shape[0]):
k = int(docs_head.shape[0])
else:
k = int(int(args.K)/2)
#########
#####load_classifier
#########
with torch.no_grad():
task_binary_classifier_weight, task_binary_classifier_bias = model(func="return_task_binary_classifier")
task_binary_classifier_weight = task_binary_classifier_weight[:int(task_binary_classifier_weight.shape[0]/n_gpu)][:]
task_binary_classifier_bias = task_binary_classifier_bias[:int(task_binary_classifier_bias.shape[0]/n_gpu)][:]
task_binary_classifier = return_Classifier(task_binary_classifier_weight, task_binary_classifier_bias, 768*2, 2)
domain_binary_classifier_weight, domain_binary_classifier_bias = model(func="return_domain_binary_classifier")
domain_binary_classifier_weight = domain_binary_classifier_weight[:int(domain_binary_classifier_weight.shape[0]/n_gpu)][:]
domain_binary_classifier_bias = domain_binary_classifier_bias[:int(domain_binary_classifier_bias.shape[0]/n_gpu)][:]
domain_binary_classifier = return_Classifier(domain_binary_classifier_weight, domain_binary_classifier_bias, 768, 2)
########
###load_alltype_sentence
########
for step, batch_ in enumerate(tqdm(train_dataloader, desc="Iteration")):
###Normal mode
batch_ = tuple(t.to(device) for t in batch_)
input_ids_, input_ids_org_, input_mask_, segment_ids_, lm_label_ids_, is_next_, tail_idxs_, sentence_label_, sentiment_label_ = batch_
with torch.no_grad():
#Generate query representation
in_domain_rep, in_task_rep = model(input_ids_org=input_ids_org_, tail_idxs=tail_idxs_, attention_mask=input_mask_, func="in_domain_task_rep")
##Load classifier weight
# Search id from Docs and ranking via (Domain/Task)
###
query_domain = in_domain_rep.float().to("cpu")
query_domain = query_domain.unsqueeze(1)
query_task = in_task_rep.float().to("cpu")
query_task = query_task.unsqueeze(1)
'''
query_domain = in_domain_rep.float().to(device)
query_domain = query_domain.unsqueeze(1)
query_task = in_task_rep.float().to(device)
query_task = query_task.unsqueeze(1)
'''
###
query_domain = query_domain.expand(-1, docs_tail.shape[0], -1)
query_task = query_task.expand(-1, docs_head.shape[0], -1)
#################
#################
LeakyReLU = torch.nn.LeakyReLU()
domain_binary_logit = LeakyReLU(domain_binary_classifier(docs_tail))
domain_binary_logit = domain_binary_logit[:,:,1] - domain_binary_logit[:,:,0]
domain_binary_logit = domain_binary_logit.squeeze(1).unsqueeze(0).expand(sentiment_label_.shape[0], -1)
task_binary_logit = LeakyReLU(task_binary_classifier(torch.cat([query_task, docs_head[:,0,:].unsqueeze(0).expand(sentiment_label_.shape[0], -1, -1)], dim=2)))
task_binary_logit = task_binary_logit[:,:,1] - task_binary_logit[:,:,0]
results = domain_binary_logit + task_binary_logit
if total_score.shape[0] == results.shape[0]:
total_score += results
else:
total_score = torch.cat([total_score,results],0)
print(total_score.shape)
########
########
#sum all batch tensor
total_score = total_score.sum(dim=0)
#print(total_score.shape)
#Ranking
bottom_k = torch.topk(total_score, k, dim=0, largest=False, sorted=False)
bottom_k = {"values":bottom_k.values, "indices":bottom_k.indices}
top_k = torch.topk(total_score, k, dim=0, largest=True, sorted=False)
top_k = {"values":top_k.values, "indices":top_k.indices}
#print(bottom_k["indices"].shape)
#print(top_k["indices"].shape)
choosed_docs = torch.cat([top_k["indices"],bottom_k["indices"]],0)
#print(choosed_docs.shape)
#select 1.tensor 2.text from foc_head,doc_tail
all_data_dict = dict()
head_hidd_list = list()
tail_hidd_list = list()
#print(len(id_doc))
for id, index in enumerate(choosed_docs):
#all_data_dict[id] = {"sentence":id_doc[int(index)]}
all_data_dict[id] = {"sentence":tokenizer.decode(id_doc_tensor[index]).replace("<pad>","").replace("<s>","").replace("</s>","")}
head_hidd_list.append(docs_head[index])
tail_hidd_list.append(docs_tail[index])
############################################
############################################
with open(args.output_dir+'train.json', 'w') as outfile:
json.dump(all_data_dict, outfile)
head_hidd_tensor = torch.stack(head_hidd_list)
tail_hidd_tensor = torch.stack(tail_hidd_list)
torch.save(head_hidd_tensor, args.output_dir+'train_head.pt')
torch.save(tail_hidd_tensor, args.output_dir+'train_tail.pt')
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
#total_length = len(tokens_a) + len(tokens_b)
total_length = len(tokens_a)
if total_length <= max_length:
break
else:
tokens_a.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
if __name__ == "__main__":
main()
|
[
"ky200120000@gmail.com"
] |
ky200120000@gmail.com
|
3b6213786afb95e6f2aa8ff988d8ea7dc07ba8f8
|
31a766fcae3779b05796534c354286083502f74a
|
/python/onshape_client/models/bt_list_response_bt_friend_info.py
|
e0809604caadf4e9e7624dfd6fe9b747d952e44d
|
[] |
no_license
|
nychang/onshape-clients
|
5ea21e73a05948f5e232d4851eb8ae8a6b8c75c8
|
9c97baae57f80e3922726443584e4cc50b99623f
|
refs/heads/master
| 2020-05-06T20:35:28.212953
| 2019-04-05T20:38:19
| 2019-04-05T20:38:19
| 180,243,972
| 0
| 0
| null | 2019-04-08T22:43:59
| 2019-04-08T22:43:59
| null |
UTF-8
|
Python
| false
| false
| 5,031
|
py
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
OpenAPI spec version: 1.96
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class BTListResponseBTFriendInfo(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'href': 'str',
'next': 'str',
'items': 'list[BTFriendInfo]',
'previous': 'str'
}
attribute_map = {
'href': 'href',
'next': 'next',
'items': 'items',
'previous': 'previous'
}
def __init__(self, href=None, next=None, items=None, previous=None): # noqa: E501
"""BTListResponseBTFriendInfo - a model defined in OpenAPI""" # noqa: E501
self._href = None
self._next = None
self._items = None
self._previous = None
self.discriminator = None
if href is not None:
self.href = href
if next is not None:
self.next = next
if items is not None:
self.items = items
if previous is not None:
self.previous = previous
@property
def href(self):
"""Gets the href of this BTListResponseBTFriendInfo. # noqa: E501
:return: The href of this BTListResponseBTFriendInfo. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this BTListResponseBTFriendInfo.
:param href: The href of this BTListResponseBTFriendInfo. # noqa: E501
:type: str
"""
self._href = href
@property
def next(self):
"""Gets the next of this BTListResponseBTFriendInfo. # noqa: E501
:return: The next of this BTListResponseBTFriendInfo. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this BTListResponseBTFriendInfo.
:param next: The next of this BTListResponseBTFriendInfo. # noqa: E501
:type: str
"""
self._next = next
@property
def items(self):
"""Gets the items of this BTListResponseBTFriendInfo. # noqa: E501
:return: The items of this BTListResponseBTFriendInfo. # noqa: E501
:rtype: list[BTFriendInfo]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this BTListResponseBTFriendInfo.
:param items: The items of this BTListResponseBTFriendInfo. # noqa: E501
:type: list[BTFriendInfo]
"""
self._items = items
@property
def previous(self):
"""Gets the previous of this BTListResponseBTFriendInfo. # noqa: E501
:return: The previous of this BTListResponseBTFriendInfo. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this BTListResponseBTFriendInfo.
:param previous: The previous of this BTListResponseBTFriendInfo. # noqa: E501
:type: str
"""
self._previous = previous
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BTListResponseBTFriendInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"ethan.keller@gmail.com"
] |
ethan.keller@gmail.com
|
e80f2bcf6fc43eef822956011db151363d24a1e0
|
e749e94163a0e20c551875583baef4e02e72de5e
|
/StackOverflow/APIM-4/38243194-buggy/feeding.py
|
f3aea1f0d550f61c41001e815987fd3d72de076e
|
[] |
no_license
|
tritims/TensorFlow-Program-Bugs
|
3445200179f4b7f5cc4ac1c6f076468ec19e51bb
|
158ba0a23e0cb74e73dbab08571b05fc36848f2a
|
refs/heads/master
| 2022-07-08T16:33:38.511696
| 2020-05-20T14:20:47
| 2020-05-20T14:20:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,214
|
py
|
import tensorflow as tf
import numpy as np
import time
assert tf.__version__ == "1.8.0"
np.random.seed(20180130)
tf.set_random_seed(20180130)
batch_size = 32
max_steps = 1000
def inference(x):
x_ = tf.layers.flatten(x)
W = tf.Variable(tf.truncated_normal([33 * 33, 21 * 21]))
tf.summary.histogram("W", W)
B = tf.Variable(tf.truncated_normal([21 * 21]))
tf.summary.histogram("B", B)
logits = tf.matmul(x_, W) + B
tf.summary.histogram("logits", logits)
return logits
def train(losses, global_step):
return tf.train.AdamOptimizer().minimize(loss=losses, global_step=global_step)
def loss(logits, _labels):
labels = tf.layers.flatten(_labels)
losses = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name="losses")
tf.summary.histogram("losses", losses)
return tf.reduce_mean(losses)
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
images = tf.placeholder(tf.float32, shape=[batch_size, 33, 33, 1])
labels = tf.placeholder(tf.float32, shape=[batch_size, 21, 21, 1])
logits = inference(images)
losses = loss(logits, labels)
train_op = train(losses, global_step)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.summary.merge_all()
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
summary_writer = tf.summary.FileWriter("./train", sess.graph)
for step in range(max_steps):
start_time = time.time()
data_batch, label_batch = np.random.normal(0, 1, [batch_size, 33, 33, 1]), np.random.uniform(0, 1,
[batch_size, 21,
21,
1])
_, loss_value = sess.run([train_op, losses],
feed_dict={images: data_batch, labels: label_batch})
print("step %d: " % step, loss_value)
summary_str = sess.run(summary_op)
duration = time.time() - start_time
|
[
"zyhzyhzyh@pku.edu.cn"
] |
zyhzyhzyh@pku.edu.cn
|
1782d05bf163995315dd1e189c464c57d5604142
|
eb2668b93899637f04e4c93e01063d0c8175ccde
|
/Irises_classification/iris_Random_Forest_GridSearch.py
|
76b1663e6c8020409fbc240a39e89b3110bcb459
|
[] |
no_license
|
D-Katt/AI-Machine-Learning
|
aad1fe1c8f3f901cb7829919d1b69a106f0ddfab
|
1868c92366dccabf8c86c559eee640645b51bb51
|
refs/heads/master
| 2021-12-19T21:59:04.403188
| 2021-12-07T13:07:46
| 2021-12-07T13:07:46
| 235,104,866
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
# Пример классификации объектов 'iris dataset' с использованием модели Random Forest.
# Инструмент GridSearchCV используется для настройки гиперпараметра - кол-ва деревье.
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
data = load_iris() # Загружаем датасет
X = data.data # Извлекаем входные данные (размеры)
y = data.target # Извлекаем итоговые значения (наименования видов)
# Делим данные на учебные и тестовые:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Содаем модель Random Forest:
rf = RandomForestClassifier()
# Задаем список значений для настройки параметра "количество деревьев":
param_grid = {'n_estimators': [20, 30, 40, 50, 60, 70]}
# Передаем GridSearchCV оцениваемую модель, список значений параметра
# для выбора лучшего и критерий оценки:
grid = GridSearchCV(rf, param_grid, scoring='accuracy')
# Передаем учебные данные:
grid.fit(X_train, y_train)
print('Максимальная точность прогноза на тестовой выборке:', grid.best_score_)
print('\nПараметры лучшей модели:', grid.best_estimator_)
y_pred = grid.predict(X_test) # Прогноз на тестовой выборке
# 'grid' хранит параметры лучшей модели.
# Список для расшифровки числовых обозначений видов растений:
iris_types = ['Iris-Setosa', 'Iris-Versicolour', 'Iris-Virginica']
print('\nПримеры прогноза:')
for i in range(len(y_pred)):
print(f'\tПрогноз: {iris_types[y_pred[i]]}. Фактическое значение: {iris_types[y_test[i]]}')
|
[
"noreply@github.com"
] |
D-Katt.noreply@github.com
|
3fae8319caaf596170050255b0cbf87a2d51e369
|
2094596506d1a930fa736946fb8959b60273872c
|
/rango/migrations/0005_userprofile.py
|
fc6137ad733293947e459305fcb5c277c2506b19
|
[] |
no_license
|
KaijiaDong/rango
|
b93af24a1c8cc8ec8797b84de63c706ba0f54eb7
|
1ed77612fe10f883dc879319768563cfcf8d287d
|
refs/heads/master
| 2021-01-11T16:08:32.852551
| 2017-02-13T22:26:46
| 2017-02-13T22:26:46
| 80,016,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2017-02-12 22:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rango', '0004_auto_20170126_1545'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website', models.URLField(blank=True)),
('picture', models.ImageField(blank=True, upload_to='profile_images')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"Kevin@glaroam2-130-133.wireless.gla.ac.uk"
] |
Kevin@glaroam2-130-133.wireless.gla.ac.uk
|
71afaf605a6f9dd78f457fb4df7c3d357a80ad76
|
7cbdd8c4268ad6168ce4cd838c3fd96a269f2e5d
|
/pong_no_walls_CPSC386.py
|
d916b240cc33d6f46695f613fc7980b1b02e707a
|
[] |
no_license
|
jakobpatino/CPSC386_pong_no_walls
|
5b20a8c8e2b128c3255c7a554464b1bb57edb3cc
|
75d5c36eddfd90d9bbedd3d54867a2e6f61e1bc1
|
refs/heads/master
| 2022-02-21T16:11:06.134964
| 2019-09-17T19:46:44
| 2019-09-17T19:46:44
| 208,929,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,210
|
py
|
import pygame
import sys
import random
from pygame.locals import*
pygame.init()
mainClock = pygame.time.Clock()
# set up window
WINDOWWIDTH = 800
WINDOWHEIGHT = 550
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)
pygame.display.set_caption('Pong No Walls')
# set colors and text font
WHITE = (225, 225, 225)
BLACK = (0, 0, 0)
GREEN = (0, 128, 0)
basicFont = pygame.font.SysFont(None, 60)
# add sound files
ball_bounce = pygame.mixer.Sound('audio/ball_bounce.wav')
round_win = pygame.mixer.Sound('audio/round_win.wav')
round_lose = pygame.mixer.Sound('audio/round_lose.wav')
match_win = pygame.mixer.Sound('audio/match_win.wav')
match_lose = pygame.mixer.Sound('audio/match_lose.wav')
win = pygame.mixer.Sound('audio/victory.wav')
loss = pygame.mixer.Sound('audio/loss.wav')
# add player paddles and their images
player_right = pygame.Rect(750, 200, 38, 120)
player_paddle_right = pygame.image.load('images/paddle_player.jpg')
player_paddle_right_fit = pygame.transform.scale(player_paddle_right, (38, 120))
player_top = pygame.Rect(600, 12, 120, 38)
player_paddle_top = pygame.image.load('images/paddle_player_top.jpg')
player_paddle_top_fit = pygame.transform.scale(player_paddle_top, (120, 38))
player_bottom = pygame.Rect(600, 500, 120, 38)
player_paddle_bottom = pygame.image.load('images/paddle_player_bottom.jpg')
player_paddle_bottom_fit = pygame.transform.scale(player_paddle_bottom, (120, 38))
# add ai paddles and their images
ai_left = pygame.Rect(12, 200, 38, 120)
ai_paddle_left = pygame.image.load('images/paddle_ai.jpg')
ai_paddle_left_fit = pygame.transform.scale(ai_paddle_left, (38, 120))
ai_top = pygame.Rect(80, 12, 120, 38)
ai_paddle_top = pygame.image.load('images/paddle_ai_top.jpg')
ai_paddle_top_fit = pygame.transform.scale(ai_paddle_top, (120, 38))
ai_bottom = pygame.Rect(80, 500, 120, 38)
ai_paddle_bottom = pygame.image.load('images/paddle_ai_bottom.jpg')
ai_paddle_bottom_fit = pygame.transform.scale(ai_paddle_bottom, (120, 38))
# set up player movement variables
moveLeft = False
moveRight = False
moveUp = False
moveDown = False
# set paddle speeds
PLAYERSPEED = 7
AISPEED = 3
# set up initial ball stats
ball_dir = ['up_right', 'up_left', 'down_right', 'down_left']
curr_dir = ball_dir[random.randrange(0, 4)]
curr_speed = random.randrange(4, 9)
curr_y = random.randrange(100, 450)
curr_x = 400
# function that changes the direction of ball movement when it bounces on a left or right surface
def bounce_side(direction):
if direction == 'up_right':
return 'up_left'
elif direction == 'up_left':
return 'up_right'
elif direction == 'down_right':
return 'down_left'
elif direction == 'down_left':
return 'down_right'
else:
return direction
# function that changes the direction of ball movement when it bounces on a left or right surface
def bounce_bottom(direction):
if direction == 'up_right':
return 'down_right'
elif direction == 'up_left':
return 'down_left'
else:
return direction
# function that changes the direction of ball movement when it bounces on a bottom surface
def bounce_top(direction):
if direction == 'down_right':
return 'up_right'
elif direction == 'down_left':
return 'up_left'
else:
return direction
# set initial scores
round_score_ai = 0
round_score_player = 0
match_score_ai = 0
match_score_player = 0
victory = False
ai_win = False
player_win = False
# run game loop
while not victory:
# set up keyboard actions
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
# change the keyboard variables
if event.key == K_LEFT:
moveRight = False
moveLeft = True
if event.key == K_RIGHT:
moveLeft = False
moveRight = True
if event.key == K_UP:
moveDown = False
moveUp = True
if event.key == K_DOWN:
moveUp = False
moveDown = True
if event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == K_LEFT:
moveLeft = False
if event.key == K_RIGHT:
moveRight = False
if event.key == K_UP:
moveUp = False
if event.key == K_DOWN:
moveDown = False
# paint background
windowSurface.fill(BLACK)
# set the text to read the current score
round_score_ai_text = basicFont.render(str(round_score_ai), True, WHITE)
round_score_player_text = basicFont.render(str(round_score_player), True, WHITE)
# place the score on the screen
if round_score_ai < 10:
windowSurface.blit(round_score_ai_text, (350, 50))
else:
windowSurface.blit(round_score_ai_text, (335, 50))
if round_score_player < 10:
windowSurface.blit(round_score_player_text, (430, 50))
else:
windowSurface.blit(round_score_player_text, (415, 50))
# paint match points on the screen; fill in circles when point scored
if match_score_ai < 1:
pygame.draw.circle(windowSurface, WHITE, (380, 100), 5, 1)
else:
pygame.draw.circle(windowSurface, WHITE, (380, 100), 5)
if match_score_ai < 2:
pygame.draw.circle(windowSurface, WHITE, (360, 100), 5, 1)
else:
pygame.draw.circle(windowSurface, WHITE, (360, 100), 5)
if match_score_ai < 3:
pygame.draw.circle(windowSurface, WHITE, (340, 100), 5, 1)
else:
pygame.draw.circle(windowSurface, WHITE, (340, 100), 5)
if match_score_player < 1:
pygame.draw.circle(windowSurface, WHITE, (420, 100), 5, 1)
else:
pygame.draw.circle(windowSurface, WHITE, (420, 100), 5)
if match_score_player < 2:
pygame.draw.circle(windowSurface, WHITE, (440, 100), 5, 1)
else:
pygame.draw.circle(windowSurface, WHITE, (440, 100), 5)
if match_score_player < 3:
pygame.draw.circle(windowSurface, WHITE, (460, 100), 5, 1)
else:
pygame.draw.circle(windowSurface, WHITE, (460, 100), 5)
# put the paddles on the screen
windowSurface.blit(player_paddle_right_fit, player_right)
windowSurface.blit(player_paddle_top_fit, player_top)
windowSurface.blit(player_paddle_bottom_fit, player_bottom)
windowSurface.blit(ai_paddle_left_fit, ai_left)
windowSurface.blit(ai_paddle_top_fit, ai_top)
windowSurface.blit(ai_paddle_bottom_fit, ai_bottom)
# paint the net on screen
for i in range(27):
pygame.draw.rect(windowSurface, WHITE, (397, (20 * i) + 10, 6, 10))
# gives a match point and plays win/lose sound; creates new ball stats
if round_score_ai > 11 and round_score_ai >= round_score_player + 2:
if match_score_ai == 0:
match_score_ai = 1
round_score_ai = 0
round_score_player = 0
match_lose.play()
elif match_score_ai == 1:
match_score_ai = 2
round_score_ai = 0
round_score_player = 0
match_lose.play()
elif match_score_ai == 2:
ai_win = True
victory = True
loss.play()
curr_dir = ball_dir[random.randrange(0, 4)]
curr_speed = random.randrange(4, 9)
curr_y = random.randrange(100, 450)
curr_x = 400
if round_score_player > 11 and round_score_player >= round_score_ai + 2:
if match_score_player == 0:
match_score_player = 1
round_score_ai = 0
round_score_player = 0
match_win.play()
elif match_score_player == 1:
match_score_player = 2
round_score_ai = 0
round_score_player = 0
match_win.play()
elif match_score_player == 2:
player_win = True
victory = True
win.play()
curr_dir = ball_dir[random.randrange(0, 4)]
curr_speed = random.randrange(4, 9)
curr_y = random.randrange(100, 450)
curr_x = 400
# gives round points and plays round win/loss sounds
if (curr_x > 400 and curr_y > 558) or curr_x > 808 or (curr_x > 400 and curr_y < -8):
round_score_ai += 1
curr_dir = ball_dir[random.randrange(0, 4)]
curr_speed = random.randrange(4, 9)
curr_y = random.randrange(100, 450)
curr_x = 400
if not (round_score_ai > 11 and round_score_ai >= round_score_player + 2):
round_lose.play()
if (curr_x <= 400 and curr_y > 558) or curr_x < -8 or (curr_x <= 400 and curr_y < -8):
round_score_player += 1
curr_dir = ball_dir[random.randrange(0, 4)]
curr_speed = random.randrange(4, 9)
curr_y = random.randrange(100, 450)
curr_x = 400
if not (round_score_player > 11 and round_score_player >= round_score_ai + 2):
round_win.play()
# moves ball based on current direction
if curr_dir == 'up_right':
curr_y -= curr_speed
curr_x += curr_speed
if curr_dir == 'up_left':
curr_y -= curr_speed
curr_x -= curr_speed
if curr_dir == 'down_right':
curr_y += curr_speed
curr_x += curr_speed
if curr_dir == 'down_left':
curr_y += curr_speed
curr_x -= curr_speed
# draws ball at current position
ball = pygame.draw.circle(windowSurface, WHITE, (curr_x, curr_y), 8)
# bounces ball if it collides with paddles and plays bounce sound
if curr_dir == 'up_right' or curr_dir == 'down_right':
if (ball.colliderect(player_right) and curr_x <= player_right.left) or \
(ball.colliderect(player_top) and curr_x <= player_top.left) or \
(ball.colliderect(player_bottom) and curr_x <= player_bottom.left) or \
(ball.colliderect(ai_top) and curr_x <= ai_top.left) or \
(ball.colliderect(ai_bottom) and curr_x <= ai_bottom.left):
curr_dir = bounce_side(curr_dir)
ball_bounce.play()
if curr_dir == 'up_left' or curr_dir == 'down_left':
if (ball.colliderect(ai_left) and curr_x >= ai_left.right) or \
(ball.colliderect(ai_top) and curr_x >= ai_top.right) or \
(ball.colliderect(ai_bottom) and curr_x >= ai_bottom.right) or \
(ball.colliderect(player_top) and curr_x >= player_top.right) or \
(ball.colliderect(player_bottom) and curr_x >= player_bottom.right):
curr_dir = bounce_side(curr_dir)
ball_bounce.play()
if curr_dir == 'up_left' or curr_dir == 'up_right':
if (ball.colliderect(player_top) and curr_y >= player_top.bottom) or \
(ball.colliderect(player_right) and curr_y >= player_right.bottom) or \
(ball.colliderect(ai_top) and curr_y >= ai_top.bottom) or \
(ball.colliderect(ai_left) and curr_y >= ai_left.bottom):
curr_dir = bounce_bottom(curr_dir)
ball_bounce.play()
if curr_dir == 'down_left' or curr_dir == 'down_right':
if (ball.colliderect(player_bottom) and curr_y <= player_bottom.top) or \
(ball.colliderect(player_right) and curr_y <= player_right.top) or \
(ball.colliderect(ai_bottom) and curr_y <= ai_bottom.top) or \
(ball.colliderect(ai_left) and curr_y <= ai_left.top):
curr_dir = bounce_top(curr_dir)
ball_bounce.play()
# sets up player movement
if moveUp and player_right.top > 13\
and not player_right.colliderect(player_top):
player_right.top -= PLAYERSPEED
if moveDown and player_right.bottom < 537\
and not player_right.colliderect(player_bottom):
player_right.bottom += PLAYERSPEED
if moveLeft and player_top.left > 400 and player_bottom.left > 400 \
and not player_top.colliderect(ai_top) and not player_bottom.colliderect(ai_bottom):
player_top.left -= PLAYERSPEED
player_bottom.left -= PLAYERSPEED
if moveRight and player_top.right < WINDOWWIDTH and player_bottom.right < (WINDOWWIDTH - 13)\
and not player_top.colliderect(player_right) and not player_bottom.colliderect(player_right):
player_top.right += PLAYERSPEED
player_bottom.right += PLAYERSPEED
# sets up ai movement that tracks ball
if curr_y > (ai_left.top + 60) and ai_left.bottom < (WINDOWHEIGHT - 13) \
and not ai_left.colliderect(ai_bottom):
ai_left.top += AISPEED
if curr_y < (ai_left.top + 60) and ai_left.top > 12 \
and not ai_left.colliderect(ai_top):
ai_left.top -= AISPEED
if curr_x < (ai_top.right - 60) and ai_top.left > 0 and ai_bottom.left > 12 \
and not ai_top.colliderect(ai_left) and not ai_bottom.colliderect(ai_left):
ai_top.left -= AISPEED
ai_bottom.left -= AISPEED
if curr_x > (ai_top.right - 60) and ai_top.right < 400 and ai_bottom.right < 400 \
and not ai_top.colliderect(player_top) and not ai_bottom.colliderect(player_bottom):
ai_top.right += AISPEED
ai_bottom.right += AISPEED
# draw window onto screen
pygame.display.update()
mainClock.tick(40)
# sets up play again
while victory:
# set background color
windowSurface.fill(BLACK)
# reset defaults
moveLeft = False
moveRight = False
moveUp = False
moveDown = False
player_right.top = 200
player_right.left = 750
player_top.top = 12
player_top.left = 600
player_bottom.top = 500
player_bottom.left = 600
ai_left.top = 200
ai_bottom.left = 12
ai_top.top = 12
ai_top.left = 80
ai_bottom.top = 500
ai_bottom.left = 80
curr_dir = ball_dir[random.randrange(0, 4)]
curr_speed = random.randrange(4, 9)
curr_y = random.randrange(100, 450)
curr_x = 400
round_score_ai = 0
round_score_player = 0
match_score_ai = 0
match_score_player = 0
# display 'play again' prompt and winner
if ai_win:
play_again = basicFont.render('AI Wins! Play Again? (y/n)', True, WHITE)
windowSurface.blit(play_again, (140, 225))
if player_win:
play_again2 = basicFont.render('Player Wins! Play Again? (y/n)', True, WHITE)
windowSurface.blit(play_again2, (95, 225))
# sets up keyboard functions
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYUP:
if event.key == K_ESCAPE or event.key == K_n:
pygame.quit()
sys.exit()
if event.key == K_y:
ai_win = False
player_win = False
victory = False
# draw window onto screen
pygame.display.update()
mainClock.tick(40)
|
[
"jakobpatino@csu.fullerton.edu"
] |
jakobpatino@csu.fullerton.edu
|
907b8197e25d556bb14e30c9fdde457f9cc9d5a0
|
83abb7f62d8e071bd6c6ad59dad26ccb8912fafe
|
/final_project/machinetranslation/tests.py
|
9f7a62e4681335efb98143d48bcd62b8852b04b7
|
[
"Apache-2.0"
] |
permissive
|
Miczu80/xzceb-flask_eng_fr
|
9e7a393716d4ebc97994baa14892e5917afda829
|
f7052e9c3c4b3c30e79afd6649cf335640dff3a1
|
refs/heads/master
| 2023-08-18T07:38:19.466379
| 2021-10-13T13:33:41
| 2021-10-13T13:33:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
import unittest
from translator import english_to_french, french_to_english
class TestE2F(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french('Hello'),'Bonjour')
self.assertEqual(english_to_french('Foot'),'Pied')
#self.assertEqual(english_to_french(''),'')
#self.assertEqual(english_to_french(0),0)
class TestF2E(unittest.TestCase):
def test2(self):
self.assertEqual(french_to_english('Bonjour'),'Hello')
self.assertEqual(french_to_english('Pied'),'Foot')
#self.assertEqual(french_to_english(''),'')
#self.assertEqual(french_to_english(0),0)
unittest.main()
|
[
"zupa.cebulowa@gmail.com"
] |
zupa.cebulowa@gmail.com
|
840a00c6cac4c9bf3dbc754935420be3c726f029
|
7a88b6a2fde31076a7fb9d61cb0f5b1837478d3e
|
/core/views.py
|
b5aa0511106b7aa99bb84238cc636612fb622afa
|
[] |
no_license
|
0xmostafaa/portfolio
|
ebd3a65d3fe40ee60448b0ec5a2c490166ee6d57
|
30f09829a5df3e32955253fdd7b36b794d8db499
|
refs/heads/main
| 2023-03-20T16:49:43.380548
| 2021-03-17T16:04:13
| 2021-03-17T16:04:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'core/base.html')
|
[
"67633637+mostafa218@users.noreply.github.com"
] |
67633637+mostafa218@users.noreply.github.com
|
1a4c466e2dff4100a4f385c1e970f92e9d7b95c4
|
72f4cd2c48b2007e6ceb4fde5781a9f641d4f624
|
/delete_oparation_list.py
|
1c8ec5b8b698e411e0ba3d904cfc6e1a946c77d3
|
[] |
no_license
|
haidarontor/python
|
8710391d03d5b449da21ab0b7c1976644994a5cc
|
2fe55a372b8712adf367278b69f45f79f8151b88
|
refs/heads/master
| 2021-08-08T21:41:07.772959
| 2017-11-11T09:56:44
| 2017-11-11T09:56:44
| 110,284,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
list_1=['physics','Chemistry','biology',1990,2000,2001]
name=['DAHAKA','COMILLA','RAJSHAHI']
name1=['barisal']
print list_1
del list_1[4]
print "after deleting value at index 1:"
print list_1
print name + name1
print name1*4
|
[
"haidarontor@gmail.com"
] |
haidarontor@gmail.com
|
874c5926d6d5481c64f223a05795ff06ed9e3ea5
|
1059bb7b11f424ebeaaf2ee3e8cddf99133b1989
|
/ssbn_neuron_model/test_ssbn.py
|
e0a41825d2a21c0778f2e029dd66fa1edaefd772
|
[] |
no_license
|
jyotikab/stn_gpe_ssbn
|
2357b11d70e972030df656b0ff0a13b46a3155e0
|
1324ce2538e8042f6770b2353fdff1db60df0f6a
|
refs/heads/master
| 2021-07-17T13:29:14.364571
| 2020-11-02T14:35:40
| 2020-11-02T14:35:40
| 223,620,440
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,181
|
py
|
#To test the SSBN neuron model
import numpy as np
import nest
import pylab as pl
labelsize = 14.
ticksize = 14.
tsim = 500000.
# Custom chosen colors
col_lis = [np.array([63.,25.,255.])/255., np.array([204.,99.,20.])/255.,np.array([178.,120.,76.])/255., np.array([200.,0.,0.])/255.,np.array([153.,88.,61.])/255., np.array([204.,187.,20.])/255.]
# Range of input for the FI curve
amp_arr = np.arange(5000.,10000.,1000.)
sup_fr_lis =[]
fig = pl.figure(1,(8,5))
ax1 = fig.add_subplot(111)
# Range of burst lengths (1 to 5)
for kk,jj in enumerate(np.arange(5.)):
fr_lis = []
seed = np.random.randint(0,9999999,1)
print seed
for ii in amp_arr:
nest.ResetKernel()
nest.SetKernelStatus({'resolution':0.1,'grng_seed':seed[0]}) # Setting a new random seed each time
# Poisson generator with rate as amp_arr
dc_gen = nest.Create("poisson_generator",params = {'rate':ii})
# Create ssbn neuron
aa = nest.Create("ssbn",params = {'spb':jj+1})
sd = nest.Create('spike_detector')
# Connect ssbn to spike detector
nest.Connect(aa,sd)
# Poisson generator to ssbn
nest.Connect(dc_gen,aa)
# Simulate
nest.Simulate(tsim)
# Read spikes
spikes = nest.GetStatus(sd,'events')[0]
num_spikes = len(spikes['senders'])
print num_spikes
# Calculate rate
f_rate = (num_spikes*1000)/tsim
fr_lis.append(f_rate)
# Plot FI
ax1.plot(amp_arr, fr_lis,lw = 5., alpha = 0.7, color = col_lis[kk], label = str(jj+1))
pl.legend(loc = 'best', prop = {'size':10.})
#pl.xlim(300,500)
ax1.set_ylim(0,40)
ax1.set_ylabel("Firing rate (Hz)",size = labelsize)
ax1.set_xticks(amp_arr[::2])
ax1.set_yticks(np.arange(0,40,10))
for tl in ax1.get_xticklabels():
tl.set_fontsize(ticksize)
ax1.xaxis.get_major_formatter().set_powerlimits((0,1))
ax1.set_xlabel("Poisson rate (Hz)",fontsize = labelsize)
ax1.text(6.2e4,40,'B',fontsize = labelsize + 3, style = 'normal')
for tl in ax1.get_yticklabels():
tl.set_fontsize(ticksize)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.tick_params(axis = 'both', which = 'both',top = 'off', right = 'off')
pl.savefig("FIs_psdb.png")
pl.show()
|
[
"jyotika.bahuguna@gmail.com"
] |
jyotika.bahuguna@gmail.com
|
89a605e60342463909ff57250928f34f0fa44704
|
9f7a1399e3577f402034eed4a88cf03af2a85445
|
/student/views/student.py
|
9cf3f48275acb6fd0a389e88174a87692f95eea4
|
[] |
no_license
|
mohamedellakany/profs-studs
|
192784263e6dcadfb32942fa6678fae9dec74e18
|
25259c7fdba4be90a37f24b325b24cc67c4daa8a
|
refs/heads/main
| 2023-06-17T21:57:03.992413
| 2021-07-21T03:38:27
| 2021-07-21T03:38:27
| 387,981,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
from django.db.models import Count
from student.models import Quiz
class SignUpView(TemplateView):
template_name = 'registration/signup.html'
def home(request):
if request.user.is_authenticated:
if request.user.is_teacher:
return redirect('teachers:quiz_change_list')
elif request.user.is_student:
return redirect('students:quiz_list')
else:
return redirect('admin:index')
return render(request,'student/quiz_list.html',{
'quizzes':Quiz.objects.annotate(questions_count=Count('questions')) \
.filter(questions_count__gt=0)})
class AboutView(TemplateView):
template_name = 'student/about.html'
|
[
"mohamed.ellaqany@gmail.com"
] |
mohamed.ellaqany@gmail.com
|
945492db9a3d84385065953cc931392b9f7472df
|
8a27b48ebabb381f6e495cc7a1c35634136e10ad
|
/RC4.py
|
5acbb6ff18f9b9f1387dbff8931f1b4752834051
|
[] |
no_license
|
camilacarwalho/cryptography-data-securityClass
|
131db63b9cfd2d5e18c06b3662dbf33c9dc734eb
|
786555fc84979654a3bf4e99f42718e8a40f1820
|
refs/heads/master
| 2020-05-25T05:38:22.797552
| 2019-05-27T02:14:37
| 2019-05-27T02:14:37
| 187,652,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
import numpy as np # biblioteca Python para realizar cálculos em Arrays
def KSA(key): #função que cria a lista S com valores de 0 a 255
tam_key = len(key)
S = list(range(256))
j = 0
for i in range(256):
j = (j + S[i] + key[
i % tam_key]) % 256
S[i], S[j] = S[j], S[i]
return S
def PRGA(S, n): #função que gera keystream
i = 0
j = 0
key = []
while n > 0:
n -= 1 #
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i]
K = S[(S[i] + S[j]) % 256]
key.append(K)
return key
def criandoArray(s): #função que transforma s em array
return [ord(c) for c in s]
def deArrayParaString(array): #função que retorna o array como string
text = ""
for c in range(len(array)):
text += array[c]
return text
def cifrar(message, keystream): #função de cifragem
message = np.array([ord(c) for c in message])
cipher = keystream ^ message
print("\nResult: ", cipher)
return cipher
def iniciarKeystream(key, message): #função que us a keystream gerada em PRGA e converte em array
S = KSA(key)
keystream = np.array(PRGA(S, len(message)))
print("\nKEYSTREAM: ", keystream)
return keystream
def decrypt(cipher, keystream): #função que pega a keystream gerada e decifra para o texto original
decrypterUni = keystream ^ cipher
decrypter = [chr(c) for c in decrypterUni]
return deArrayParaString(decrypter)
def rc4(key, plaintext): #função que serve como controlador para o algoritmo rc4
keyArray = criandoArray(key)
keystream = iniciarKeystream(keyArray, plaintext)
cipher = cifrar(plaintext, keystream)
cipherHex = (cipher.astype(np.uint8).data.hex())
print("\nHEX: ", cipherHex)
decifrar = ""
while decifrar != key:
option = input("\nDeseja descriptografar? [s/n]")
if option == 's':
decifrar = input("\nChave:")
print("\nResultado:", decrypt(cipher, keystream))
exit()
elif option == 'n':
print('------------FIM------------')
exit()
else: print("Inválido!")
def main():
while True:
print('------------RC4------------')
key = input("Chave:")
plaintext = input("Texto:")
rc4(key, plaintext)
main()
|
[
"camilacarwalho3@gmail.com"
] |
camilacarwalho3@gmail.com
|
baffe740816552364ec2133b592c44a87646c925
|
bd45db89d88c6e78f9c5a1c36b3b0a05da437adf
|
/testrun.py
|
629d30b6bed0ffc1cb7b925b6732ee808d0632f8
|
[] |
no_license
|
williamqin123/mandelbrot
|
90d8991b7c8d0bce2c3524352da204dde6d10b69
|
4a46b09b1fbee9d9b61f80adf5c0053fd80430ce
|
refs/heads/master
| 2022-12-21T21:00:24.839964
| 2020-09-10T23:41:30
| 2020-09-10T23:41:30
| 294,546,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
import timeit, random
def factored():
x = random.random()
y = random.random()
return (pow(x, 1 / 3) + (x - 1)**3 + 1) / 2
def expanded():
x = random.random()
y = random.random()
return (pow(x, 1 / 3) + x**3 - 3 * x**2 + 3 * x) / 2
print(timeit.timeit(factored, number=1000000))
print(timeit.timeit(expanded, number=1000000))
|
[
"williamqin@Williams-MacBook-Pro.local"
] |
williamqin@Williams-MacBook-Pro.local
|
576bf9ca98955f15a9691b7a2aa05f7ea2cdbe8a
|
b212decbc0c5850cc462bf0cd6695c33863de521
|
/mainApp/models.py
|
aa3e1d9fc6ddb6b231e03ece0c24500921216fb7
|
[] |
no_license
|
YusakuNo1/RumorBoard-Django
|
17bd7337db0a83d71ec05fcc149c9ce4fdb7e7df
|
0cdbd2a4ef1b3defa511d9bc8e7d1288a4a112bb
|
refs/heads/master
| 2021-01-25T12:02:19.064347
| 2014-03-16T14:00:01
| 2014-03-16T14:00:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,477
|
py
|
from django.db import models
from django.contrib.auth.models import User, AbstractBaseUser, PermissionsMixin, BaseUserManager
from django_boto.s3.storage import S3Storage
import config
import rumorboard.settings as settings
s3 = S3Storage(bucket_name='babylyricsus')
class UserProfileManager(BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email = UserProfileManager.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email,
password=password,
)
user.is_admin = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin, models.Model):
email = models.EmailField(
verbose_name='email address',
max_length=config.EMAIL_FIELD_LENGTH,
unique=True,
db_index=True,
)
first_name = models.CharField(max_length=config.NAME_FIELD_LENGTH, blank=True)
last_name = models.CharField(max_length=config.NAME_FIELD_LENGTH, blank=True)
profileImage = models.ImageField(storage=s3, upload_to = settings.AWS_S3_FOLDER + '/profile/', default=None, null=True, blank=True)
description = models.CharField(max_length=config.USER_DESCRIPTION_LENGTH, default='', blank=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def get_full_name(self):
# The user is identified by their email address
return self.email
def get_short_name(self):
# The user is identified by their email address
return self.email
def __unicode__(self):
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
class Rumor(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
title = models.CharField(max_length=config.TITLE_LENGTH, null=True, blank=True)
content = models.CharField(max_length=config.CONTENT_LENGTH)
contentImage = models.ImageField(storage=s3, upload_to = settings.AWS_S3_FOLDER + '/rumor/', default=None, null=True, blank=True)
anonymous = models.BooleanField(default=True)
thumbsUpUser = models.ManyToManyField(UserProfile, related_name='thumbsUpUser', blank=True)
thumbsDownUser = models.ManyToManyField(UserProfile, related_name='thumbsDownUser', blank=True)
def __unicode__(self):
return self.title if (self.title is not None and len(self.title) > 0) else self.content
class RumorComment(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
rumor = models.ForeignKey(Rumor)
content = models.CharField(max_length=config.CONTENT_LENGTH, null=True, blank=True)
#rating = models.IntegerField(default=config.RumorCommentRating.NoRating)
def __unicode__(self):
return self.content
#class RumorPoll(models.Model):
# rumor = models.OneToOneField(Rumor)
#
# def __unicode__(self):
# return self.rumor.title if (self.rumor.title is not None and len(self.rumor.title) > 0) else self.rumor.content
class RumorPollColumn(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
rumor = models.ForeignKey(Rumor)
name = models.CharField(max_length=config.TITLE_LENGTH)
columnIndex = models.IntegerField()
rumorPollUser = models.ManyToManyField(UserProfile, related_name='rumorPollUser', blank=True)
def __unicode__(self):
return self.rumor.title + ': ' + self.name
|
[
"YusakuNo1@hotmail.com"
] |
YusakuNo1@hotmail.com
|
5e997385319a3c9eb565450d2b53be851a281dbf
|
570d3e7d3fd90e7173bc82ae6daf59a144d8c5e0
|
/Exe10.py
|
bbffffcf637d3e4273b922df37f03bb4755e1e5a
|
[] |
no_license
|
mateusascacibas/Python-Collections
|
97a61cae737ec8e37603e879c09ce41db4b85aa8
|
d88cc66f7d16bb9f32d3578f76ef14fca4c8bf05
|
refs/heads/main
| 2023-03-02T05:37:59.179868
| 2021-02-10T17:05:36
| 2021-02-10T17:05:36
| 337,793,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
vet = list(range(15))
cont = 0
while cont < 15:
vet[cont] = float(input("Digite um valor: "))
cont = 1 + cont
media = (sum(vet)) / 15
print(media)
|
[
"noreply@github.com"
] |
mateusascacibas.noreply@github.com
|
f54902e671cb0a84b84727de899d0b0cac79c611
|
1916d9c93e9c656a2d7f085001c8de46a7fc6f7b
|
/.svn/pristine/47/47b27eb4c4965e0c8076ec4b1ebcc2f47c3a76ae.svn-base
|
5d62d53cc0741e620ca2cbcae2e15d78bca06498
|
[
"Apache-2.0"
] |
permissive
|
starryhwj/MobilePhone
|
ad09de82fd5037c9a9c01bfa054805b4e2e41e84
|
211c1367e193c4e2b4833afae56367fdf9ac1a53
|
refs/heads/master
| 2022-11-30T03:21:22.288819
| 2020-01-10T10:23:01
| 2020-01-10T10:23:01
| 233,017,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
# Generated by Django 2.1.14 on 2019-11-16 12:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Web', '0027_auto_20191116_1107'),
]
operations = [
migrations.CreateModel(
name='MaintenanceNumberMissionKeyword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.TextField(max_length=150, verbose_name='视频分类名')),
('Owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='拥有者')),
],
),
migrations.RemoveField(
model_name='maintenancenumbermission',
name='VideoClassifications',
),
migrations.AddField(
model_name='maintenancenumbermission',
name='KeywordClassifications',
field=models.ManyToManyField(to='Web.MaintenanceNumberMissionKeyword'),
),
]
|
[
"241964143@qq.com"
] |
241964143@qq.com
|
|
6408af203bb56ea22931d42dab44069ede4d8c10
|
e88410fa2ab8415598eda8779895f5568bd0ea84
|
/function2.py
|
d662cae12e1a3f4e855f569d5a8181f3865a28c0
|
[] |
no_license
|
songjingmin/No2.test
|
2369e6718a33d422faf7ff75782869652e049bda
|
5b957aeaa765ed16b116f0a3c7e9521def12ce09
|
refs/heads/master
| 2020-03-18T15:56:49.965938
| 2018-06-04T17:28:57
| 2018-06-04T17:28:57
| 134,938,874
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
def printMax(a,b):
if a > b :
print("a,'is maximum'")
else:
print("b,'is maximum'")
printMax(3,4)
x = 5
y = 7
printMax(x,y)
def func(x):
print ("'x is', x")
x = 2
print ("'Changed local x to', x")
x = 50
func(x)
print ("'x is still', x")
#使用global语句
def func():
global x
print ("'x is', x")
x = 2
print ("'Changed local x to', x")
x = 50
func()
print ("'Value of x is', x")
|
[
"475949283@qq.com"
] |
475949283@qq.com
|
bbcae70c304dfb85edd4bb595009b81eca55269c
|
1580b76258d67c3343f218e4c1c2bcc8dfa1954f
|
/application/register_form.py
|
339b96e0e1837fcd76e3255283ace176ab419564
|
[] |
no_license
|
MeresaG/Learning-Management-System
|
6d41dfe8a489af7ad0e6c10366a22927af982622
|
d228afac3b4082a77366b20bfd77937a7c30d409
|
refs/heads/master
| 2021-05-23T14:55:46.223365
| 2020-04-05T22:00:38
| 2020-04-05T22:00:38
| 253,348,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, HiddenField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from application.user_model import User
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
|
[
"mera@wenam.fritz.box"
] |
mera@wenam.fritz.box
|
805cf0dffaf8c4f48b31a1c03c15ac893f37b03a
|
97f026d0508b9f3e9cf82e4e4f5bb4c94222517e
|
/saleor/graphql/channel/tests/test_channel_availability.py
|
dd7d3ec4c686ceb51e28dd6de468d26bd85dea36
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
ibutiti/saleor
|
caa8e1ae61ce053dd166ebc11b43ec32e0115518
|
fffe9a54c01aa07131102474dcb1519e0b59da74
|
refs/heads/master
| 2022-01-06T14:37:14.912046
| 2021-05-18T10:24:01
| 2021-05-18T10:24:01
| 111,393,664
| 0
| 0
|
BSD-3-Clause
| 2019-07-05T20:24:06
| 2017-11-20T10:09:41
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,459
|
py
|
import graphene
from ....channel.error_codes import ChannelErrorCode
from ...tests.utils import get_graphql_content
CHANNEL_ACTIVATE_MUTATION = """
mutation ActivateChannel($id: ID!) {
channelActivate(id: $id){
channel {
id
name
isActive
}
errors{
field
code
message
}
}
}
"""
def test_channel_activate_mutation(
permission_manage_channels, staff_api_client, channel_USD
):
# given
channel_USD.is_active = False
channel_USD.save()
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
variables = {"id": channel_id}
# when
response = staff_api_client.post_graphql(
CHANNEL_ACTIVATE_MUTATION,
variables=variables,
permissions=(permission_manage_channels,),
)
content = get_graphql_content(response)
# then
data = content["data"]["channelActivate"]
assert not data["errors"]
assert data["channel"]["name"] == channel_USD.name
assert data["channel"]["isActive"] is True
def test_channel_activate_mutation_on_activated_channel(
permission_manage_channels, staff_api_client, channel_USD
):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
variables = {"id": channel_id}
# when
response = staff_api_client.post_graphql(
CHANNEL_ACTIVATE_MUTATION,
variables=variables,
permissions=(permission_manage_channels,),
)
content = get_graphql_content(response)
# then
data = content["data"]["channelActivate"]
assert data["errors"][0]["field"] == "id"
assert data["errors"][0]["code"] == ChannelErrorCode.INVALID.name
CHANNEL_DEACTIVATE_MUTATION = """
mutation DeactivateChannel($id: ID!) {
channelDeactivate(id: $id){
channel {
id
name
isActive
}
errors{
field
code
message
}
}
}
"""
def test_channel_deactivate_mutation(
permission_manage_channels, staff_api_client, channel_USD
):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
variables = {"id": channel_id}
# when
response = staff_api_client.post_graphql(
CHANNEL_DEACTIVATE_MUTATION,
variables=variables,
permissions=(permission_manage_channels,),
)
content = get_graphql_content(response)
# then
data = content["data"]["channelDeactivate"]
assert not data["errors"]
assert data["channel"]["name"] == channel_USD.name
assert data["channel"]["isActive"] is False
def test_channel_deactivate_mutation_on_deactivated_channel(
permission_manage_channels, staff_api_client, channel_USD
):
# given
channel_USD.is_active = False
channel_USD.save()
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
variables = {"id": channel_id}
# when
response = staff_api_client.post_graphql(
CHANNEL_DEACTIVATE_MUTATION,
variables=variables,
permissions=(permission_manage_channels,),
)
content = get_graphql_content(response)
# then
data = content["data"]["channelDeactivate"]
assert data["errors"][0]["field"] == "id"
assert data["errors"][0]["code"] == ChannelErrorCode.INVALID.name
|
[
"noreply@github.com"
] |
ibutiti.noreply@github.com
|
78d1fa5c5462ddf22271eca10fa662e2300d17e3
|
8c9a69e4fcf4b4e3706ae74fa018f2c91c763328
|
/web scrapping projects/flipkart.py
|
7af13de29cd42bfadcb0634921194a60c3336a57
|
[] |
no_license
|
scottwedge/python-programs
|
cf77c65b3e7f326d9a8f56d07873006355e45556
|
105369d554e449b3dbad0a720590175f78137f13
|
refs/heads/master
| 2022-04-26T03:57:34.770147
| 2020-04-22T17:55:02
| 2020-04-22T17:55:02
| 257,976,440
| 0
| 0
| null | 2020-04-22T17:45:57
| 2020-04-22T17:45:57
| null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
import bs4
import requests
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
page_url="https://www.flipkart.com/google-pixel-3a-just-black-64-gb/product-reviews/itmfgk4jfgstaack?pid=MOBFFGFP7UHHJUZU&page="
analyzer=SentimentIntensityAnalyzer()
pros=0
cons=0
for i in range(1,2):
page= requests.get(page_url+str(i))
# print(page_url+str(i))
soup = bs4.BeautifulSoup(page.content,'lxml')
scor = soup.select(".bhgxx2 ._390CkK .qwjRop div div")
for review in scor:
# print(review.getText())
senti=analyzer.polarity_scores(review.getText())
if(senti["neg"]>senti["pos"]):
cons+=1
elif(senti["neg"]<senti["pos"]):
pros+=1
print("total positive comments are {} and negative comments are{}".format(pros,cons))
|
[
"davesarath@gmail.com"
] |
davesarath@gmail.com
|
5dcdcc372d61a6b94c3766d3bfd238c0d5d1be3f
|
7df24fb0c87c85afc3e524cd7330f12cc25c1e49
|
/Python HackerRank Problem Solution/Math/TriangleQuest2.py
|
e70b1704e377590cc993c0c65130222ff59f5f5c
|
[] |
no_license
|
Jay28497/Problem-solution-for-Python
|
94d3a87a2643b4fce8ce62fe44a8f3fed920ca55
|
71930c0f78e0f9b2b5bf4b00c54505167703f11a
|
refs/heads/main
| 2023-02-05T04:17:09.188785
| 2020-12-22T09:17:35
| 2020-12-22T09:17:35
| 308,985,811
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
for i in range(1, int(input()) + 1):
print((10 ** i // 9) ** 2)
|
[
"jaykanjariya28@gmail.com"
] |
jaykanjariya28@gmail.com
|
b954dadb11467f6ea3ca8136aa3763c885548741
|
921def078f93fe46b880b44a3fb3fcf4890e5d92
|
/odin/libs/regions_of_interest/shapes_manager.py
|
2f0d186e848a581a23fca58fb31265f55f523a98
|
[
"MIT"
] |
permissive
|
lucalianas/odin
|
6979aab06658c75bd0b98d8148da3e8695838bd2
|
bdd598801b8d3dec94e621d914c812e6c466221e
|
refs/heads/master
| 2021-06-03T13:11:03.513907
| 2020-09-17T09:13:50
| 2020-09-17T09:13:50
| 98,536,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,712
|
py
|
try:
import simplejson as json
except ImportError:
import json
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from errors import InvalidPolygonError
from random import randint
from requests import codes as rc
from shapely.geometry import Polygon, Point, MultiPolygon
from shapely.affinity import scale
import numpy as np
import cv2
class Shape(object):
def __init__(self, segments):
self.polygon = Polygon(segments)
def get_bounds(self):
bounds = self.polygon.bounds
try:
return {
'x_min': bounds[0],
'y_min': bounds[1],
'x_max': bounds[2],
'y_max': bounds[3]
}
except IndexError:
raise InvalidPolygonError()
def get_coordinates(self, scale_level=0):
if scale_level != 0:
polygon = self._rescale_polygon(scale_level)
else:
polygon = self.polygon
return list(polygon.exterior.coords)
def get_area(self, scale_level=0):
if scale_level != 0:
polygon = self._rescale_polygon(scale_level)
else:
polygon = self.polygon
return polygon.area
def get_length(self, scale_level=0):
if scale_level != 0:
polygon = self._rescale_polygon(scale_level)
else:
polygon = self.polygon
polygon_path = np.array(polygon.exterior.coords[:])
_, radius = cv2.minEnclosingCircle(polygon_path.astype(int))
return radius*2
def get_bounding_box(self, x_min=None, y_min=None, x_max=None, y_max=None):
p1, p2, p3, p4 = self.get_bounding_box_points(x_min, y_min, x_max, y_max)
return self._box_to_polygon({
'up_left': p1,
'up_right': p2,
'down_right': p3,
'down_left': p4
})
def get_bounding_box_points(self, x_min=None, y_min=None, x_max=None, y_max=None):
bounds = self.get_bounds()
xm = x_min if not x_min is None else bounds['x_min']
xM = x_max if not x_max is None else bounds['x_max']
ym = y_min if not y_min is None else bounds['y_min']
yM = y_max if not y_max is None else bounds['y_max']
return [(xm, ym), (xM, ym), (xM, yM), (xm, yM)]
def get_random_point(self):
bounds = self.get_bounds()
point = Point(
randint(int(bounds['x_min']), int(bounds['x_max'])),
randint(int(bounds['y_min']), int(bounds['y_max']))
)
while not self.polygon.contains(point):
point = Point(
randint(int(bounds['x_min']), int(bounds['x_max'])),
randint(int(bounds['y_min']), int(bounds['y_max']))
)
return point
def get_random_points(self, points_count):
points = [self.get_random_point() for _ in xrange(points_count)]
return points
def _box_to_polygon(self, box):
return Polygon([box['down_left'], box['down_right'], box['up_right'], box['up_left']])
def _rescale_polygon(self, scale_level):
scaling = pow(2, scale_level)
return scale(self.polygon, xfact=scaling, yfact=scaling, origin=(0, 0))
def get_intersection_mask(self, box, scale_level=0, tolerance=0):
if scale_level != 0:
polygon = self._rescale_polygon(scale_level)
else:
polygon = self.polygon
if tolerance > 0:
polygon = polygon.simplify(tolerance, preserve_topology=False)
box_polygon = self._box_to_polygon(box)
box_height = int(box['down_left'][1] - box['up_left'][1])
box_width = int(box['down_right'][0] - box['down_left'][0])
if not polygon.intersects(box_polygon):
return np.zeros((box_width, box_height), dtype=np.uint8)
else:
if polygon.contains(box_polygon):
return np.ones((box_width, box_height), dtype=np.uint8)
else:
mask = np.zeros((box_width, box_height), dtype=np.uint8)
intersection = polygon.intersection(box_polygon)
if type(intersection) is MultiPolygon:
intersection_paths = list(intersection)
else:
intersection_paths = [intersection]
for path in intersection_paths:
ipath = path.exterior.coords[:]
ipath = [(int(x - box['up_left'][0]), int(y - box['up_left'][1])) for x, y in ipath]
cv2.fillPoly(mask, np.array([ipath,]), 1)
return mask
def get_full_mask(self, scale_level=0, tolerance=0):
if scale_level != 0:
polygon = self._rescale_polygon(scale_level)
scale_factor = pow(2, scale_level)
else:
polygon = self.polygon
scale_factor = 1
if tolerance > 0:
polygon = polygon.simplify(tolerance, preserve_topology=False)
bounds = self.get_bounds()
box_height = int((bounds['y_max']-bounds['y_min'])*scale_factor)
box_width = int((bounds['x_max']-bounds['x_min'])*scale_factor)
mask = np.zeros((box_height, box_width), dtype=np.uint8)
polygon_path = polygon.exterior.coords[:]
polygon_path = [(int(x - bounds['x_min']*scale_factor),
int(y - bounds['y_min']*scale_factor)) for x, y in polygon_path]
cv2.fillPoly(mask, np.array([polygon_path, ]), 1)
return mask
def get_difference_mask(self, box, scale_level=0, tolerance=0):
return 1 - self.get_intersection_mask(box, scale_level, tolerance)
class ShapesManager(object):
def __init__(self, promort_client):
self.promort_client = promort_client
def _get_roi(self, slide_id, roi_type, roi_id):
# the second 's' character related to the 'roi_type' parameter is needed because the URL required
# the plural form of the ROI type (slices, cores, focus_regions)
url = 'api/odin/rois/%s/%ss/%s/' % (slide_id, roi_type, roi_id)
response = self.promort_client.get(url)
if response.status_code == rc.OK:
roi_segments = json.loads(response.json()['roi_json'])['segments']
return Shape([(seg['point']['x'], seg['point']['y']) for seg in roi_segments])
else:
return None
def get_slice(self, slide_id, roi_id):
return self._get_roi(slide_id, 'slice', roi_id)
def get_core(self, slide_id, roi_id):
return self._get_roi(slide_id, 'core', roi_id)
def get_focus_region(self, slide_id, roi_id):
return self._get_roi(slide_id, 'focus_region', roi_id)
|
[
"luca.lianas@crs4.it"
] |
luca.lianas@crs4.it
|
6610c928d2235ba827735a632582cc6693081bcc
|
2d3a0ae30adfccabc2eddefe217ab4fca5541f91
|
/training/loss/losses.py
|
f540c693e87b2116ec853c516f3e91e3e4857361
|
[] |
no_license
|
zabulskyy/PropaGANda
|
11083b3e0d92fccf70d871bed6d9769cdea968b5
|
d8c6398640207f911ef41e7a9591af6d21f2ef88
|
refs/heads/master
| 2020-04-04T12:18:34.178513
| 2019-05-20T10:45:28
| 2019-05-20T10:45:28
| 155,921,937
| 1
| 0
| null | 2019-04-28T06:23:48
| 2018-11-02T21:10:57
|
Python
|
UTF-8
|
Python
| false
| false
| 253
|
py
|
import torch.nn as nn
def get_loss(loss_config):
loss_name = loss_config["name"]
if loss_name == "cross_entropy":
loss = nn.CrossEntropyLoss()
else:
raise ValueError("Loss [%s] not recognized." % loss_name)
return loss
|
[
"v.borsuk2000@gmail.com"
] |
v.borsuk2000@gmail.com
|
23d3d9f9b503b373c1bee5486725554b265d7ef1
|
c40337ca035167e610d6599af236b2c6b1ba1fbb
|
/app/src/domain/color/repository.py
|
6bc9c91748056b0135bc5097a824e97022f29f25
|
[] |
no_license
|
oksanaperekipska/staff-planning
|
a7f45af7dacadbdc59ee84b6d6e6b649bb23cbdf
|
898d866ce93b7023452c05c1a9e0abb18b764bb0
|
refs/heads/master
| 2023-03-23T09:30:37.772993
| 2021-03-24T21:38:55
| 2021-03-24T21:38:55
| 351,229,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
from typing import Optional
from config.database import db
from config.logs import get_module_logger
from domain.color.model import Color
log = get_module_logger(__name__)
class ColorRepository:
@staticmethod
def find_one_by_id(id: int) -> Optional[Color]:
log.debug("Looking for color with id=%s", id)
row = db.one("SELECT * FROM color WHERE id=%(id)s", id=id)
return Color.construct(**row._asdict()) if row else None
@staticmethod
def find_all(offset: int = 0, limit: int = 20) -> list[Color]:
log.debug("Looking for all color, limit=%s", limit)
rows = db.all("SELECT * FROM color ORDER BY id OFFSET %(offset)s LIMIT %(limit)s",
offset=offset,
limit=limit)
return [Color.construct(**row._asdict()) for row in rows]
|
[
"o.perekipska@idev-hub.com"
] |
o.perekipska@idev-hub.com
|
a6a3f6888aa3295fa1701139c9bee2c015a0ec05
|
7ce58197aa5d86e78af9c5fccfec68754d18141e
|
/Practical1/program3.py
|
73c51d6b81025fae1736f3aa17915070e78a0599
|
[] |
no_license
|
k08puntambekar/IMCC_Python
|
5187b0c0adeebd0d5caca7fbba21f30e9b10ed8b
|
815f0b8fd182826e6acb33710416fcbadb4ce5d4
|
refs/heads/master
| 2023-04-06T18:41:41.688360
| 2021-04-07T09:45:58
| 2021-04-07T09:45:58
| 355,428,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
def remove_duplicate(duplicate):
new_list = []
for num in duplicate:
if num not in new_list:
new_list.append(num)
return new_list
MyList = [10, 10, 30, 30, 20, 60, 70, 70, 40, 55, 45, 45]
print(remove_duplicate(MyList))
|
[
"k08puntambekar@gmail.com"
] |
k08puntambekar@gmail.com
|
b3dc3a61e408b6e946b355b7049076e61aa384e3
|
7f7bf9a5827d1441f18f568fc75ed5bf0159ca6c
|
/12_Yandex_Final_Tasks/A/A.py
|
1d1a21e461e557099825af74a0350107317450a7
|
[] |
no_license
|
KorsakovPV/yandex_contest
|
08bcff4eaf38d46a8348ac3abbb5f496857fe8e4
|
f67917ef710f5b138142b11ec4e6e4678b23e408
|
refs/heads/master
| 2023-01-06T13:04:07.955570
| 2020-10-24T20:22:41
| 2020-10-24T20:22:41
| 290,097,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,408
|
py
|
"""
A. Калькулятор
Задание связано с обратной польской нотацией. Она используется для парсинга
арифметических выражений. По сравнению с другим приемом, применяемым для данной
задачи — использованием дерева операций, она является более компактной, так как
в ней не используются скобки. Еще её иногда называют обратной польской записью
или постфиксной нотацией.
В постфиксной нотации операнды расположены перед знаками операций.
Пример 1:
3 4 +
будет равно 7, и означает 3 + 4
Пример 2:
10 2 4 * -
будет равно 2, и означает 10 - 2 * 4
Разберем последний пример подробнее:
Знак * стоит сразу после чисел 2 и 4, значит к ним нужно применить операцию,
которую этот знак обозначает, то есть перемножить эти два числа. В результате
получим 8
После этого выражение приобретет вид:
10 8 -
Операцию минус нужно применить к двум идущим перед ней числам, то есть 10 и 8.
В итоге получаем 2.
Рассмотрим алгоритм более подробно. Для его реализации будем использовать стек.
Для вычисления значения выражения, записанного в обратной польской нотации,
нужно считывать выражение слева направо и придерживаться следующих шагов:
1. Обработка входного символа: - Если на вход подан операнд, он помещается на
вершину стека. - Если на вход подан знак операции, она выполняется над
требуемым количеством значений из стека, взятых в порядке добавления.
Результат выполненной операции помещается на вершину стека.
2. Если входной набор символов обработан не полностью, перейти к шагу 1.
3. После полной обработки входного набора символов результат вычисления
выражения находится в вершине стека.
Формат ввода
В единственной строке дано выражение, записанное в обратной польской нотации.
Числа и арифметические операции отделены друг от друга пробелами.
На вход могут подаваться операции: +, -, *, / и числа, по модулю не
превосходящие 10000.
Гарантируется, что значение промежуточных выражений в тестовых данных по модулю
не больше 50000.
Формат вывода
Единственное число - значение выражения.
Пример 1
Ввод Вывод
2 1 + 3 *
9
Пример 2
Ввод Вывод
7 2 + 4 * 2 +
38
Примечания
Операция деления целочисленная. То есть, например, 12 5 / будет 2.
Решение должно быть реализовано с использованием структуры данных стек.
"""
class Stack:
def __init__(self):
self.operands = []
self.d = {'+': self.addition,
'-': self.subtraction,
'*': self.multiplication,
'/': self.division}
def is_empty(self):
return self.operands == []
def push(self, operand):
self.operands.append(operand)
def pop(self):
if not self.is_empty():
return self.operands.pop()
def addition(self, operand1, operand2):
return operand2 + operand1
def subtraction(self, operand1, operand2):
return operand2 - operand1
def multiplication(self, operand1, operand2):
return operand2 * operand1
def division(self, operand1, operand2):
return operand2 // operand1
def calc(input_file):
stack = Stack()
for i in input_file:
if i in stack.d.keys():
operand1 = stack.pop()
operand2 = stack.pop()
stack.push(stack.d[i](operand1, operand2))
else:
stack.push(int(i))
return stack.pop()
def main(input_file):
f = open(input_file)
input_file = f.read().rstrip().split()
f.close()
return str(calc(input_file)) + '\n'
if __name__ == '__main__':
print(main('input.txt') + '\n')
f = open('output.txt', 'w')
f.write(main('input.txt'))
f.close()
assert main('input1.txt') == '9' + '\n', "input1.txt error"
assert main('input2.txt') == '38' + '\n', "input2.txt error"
|
[
"pavelkpv@gmail.com"
] |
pavelkpv@gmail.com
|
cfb882d319a71f29c43aa1e99601b8492dacfa4a
|
087448bda3808703ecd21c0a4dfa00ea144b4cb3
|
/finder.py
|
6577086b2e40deade1e524a6f8b6ea7c8703527d
|
[
"0BSD"
] |
permissive
|
jcooper-korg/talon_user
|
bc61e546f8606a98731d6cdd87dde563b8501cbc
|
ef086f9890448f7d633a4f02b36a18de853581a8
|
refs/heads/master
| 2020-03-19T03:54:37.562042
| 2018-10-29T17:57:35
| 2018-10-29T17:57:35
| 135,774,042
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Talon voice commands for interacting with the Finder
# John S. Cooper jcooper@korgrd.com
from talon.voice import Key, Context
ctx = Context('Finder', bundle='com.apple.finder')
ctx.keymap({
'duplicate': Key('cmd-d'),
'collapse': Key('cmd-left'),
'expand': Key('cmd-right'),
'open': Key('cmd-down'),
'trash it': Key('cmd-backspace'),
'show package contents': Key('cmd-alt-o'),
})
|
[
"jcooper@korgrd.com"
] |
jcooper@korgrd.com
|
6385cfcb8dcddb74d43b164af28a8e95dc918c8a
|
d5d0851e14e9c6d09774d6bf522ab3f05f1f1133
|
/keyboardAgents.py
|
c3cf14bdf5265105fde1e254b2fac8031b097e01
|
[] |
no_license
|
ttaallll/PacmanAI
|
81a4cdbcc8fbe6baac574208944335cf11cbcc0f
|
3f6bc5019c492c4553b40807281edf866cbe820c
|
refs/heads/master
| 2021-01-17T14:32:23.608891
| 2015-04-12T12:35:20
| 2015-04-12T12:35:20
| 32,480,570
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,680
|
py
|
# keyboardAgents.py
# -----------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from game import Agent
from game import Directions
import random
class KeyboardAgent(Agent):
"""
An agent controlled by the keyboard.
"""
# NOTE: Arrow keys also work.
WEST_KEY = 'a'
EAST_KEY = 'd'
NORTH_KEY = 'w'
SOUTH_KEY = 's'
STOP_KEY = 'q'
def __init__( self, index = 0 ):
self.lastMove = Directions.STOP
self.index = index
self.keys = []
def getAction( self, state):
from graphicsUtils import keys_waiting
from graphicsUtils import keys_pressed
keys = list(keys_waiting()) + list(keys_pressed())
if keys != []:
self.keys = keys
legal = state.getLegalActions(self.index)
move = self.getMove(legal)
if move == Directions.STOP:
# Try to move in the same direction as before
if self.lastMove in legal:
move = self.lastMove
if (self.STOP_KEY in self.keys) and Directions.STOP in legal: move = Directions.STOP
if move not in legal:
move = random.choice(legal)
self.lastMove = move
return move
def getMove(self, legal):
move = Directions.STOP
if (self.WEST_KEY in self.keys or 'Left' in self.keys) and Directions.WEST in legal: move = Directions.WEST
if (self.EAST_KEY in self.keys or 'Right' in self.keys) and Directions.EAST in legal: move = Directions.EAST
if (self.NORTH_KEY in self.keys or 'Up' in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
if (self.SOUTH_KEY in self.keys or 'Down' in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
class KeyboardAgent2(KeyboardAgent):
"""
A second agent controlled by the keyboard.
"""
# NOTE: Arrow keys also work.
WEST_KEY = 'j'
EAST_KEY = "l"
NORTH_KEY = 'i'
SOUTH_KEY = 'k'
STOP_KEY = 'u'
def getMove(self, legal):
move = Directions.STOP
if (self.WEST_KEY in self.keys) and Directions.WEST in legal: move = Directions.WEST
if (self.EAST_KEY in self.keys) and Directions.EAST in legal: move = Directions.EAST
if (self.NORTH_KEY in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
if (self.SOUTH_KEY in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
|
[
"tal.pais@cmycasa.com"
] |
tal.pais@cmycasa.com
|
5409baa66c3f61c28ab42b5fecacd06084bc0fb5
|
6612dd1c2836de492c0ca8a0f0f602683b2dbb89
|
/app.py
|
530378d495d6eaaa3637ee94e78cc2afc82d40ab
|
[] |
no_license
|
anupam1525/movies-rest-api
|
8764da85c705805e9a14a113f5570f4ef5d115ed
|
6b3404bdeadba5904a8bd5868248576f8eda7aec
|
refs/heads/master
| 2020-12-31T10:52:07.547766
| 2020-02-07T19:51:53
| 2020-02-07T19:51:53
| 239,009,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,095
|
py
|
#Import necessary packages
from flask import Flask
from flask_restful import Resource, reqparse, Api
#Instantiate a flask object
app = Flask(__name__)
#Instantiate Api object
api = Api(app)
#Setting the location for the sqlite database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///base.db'
#Adding the configurations for the database
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['PROPAGATE_EXCEPTIONS'] = True
#Import necessary classes from base.py
from base import Movies, db
#Link the app object to the Movies database
db.init_app(app)
app.app_context().push()
#Create the databases
db.create_all()
#Creating a class to create get, post, put & delete methods
class Movies_List(Resource):
#Instantiating a parser object to hold data from message payload
parser = reqparse.RequestParser()
parser.add_argument('director', type=str, required=False, help='Director of the movie')
parser.add_argument('genre', type=str, required=False, help='Genre of the movie')
parser.add_argument('collection', type=int, required=True, help='Gross collection of the movie')
#Creating the get method
def get(self, movie):
item = Movies.find_by_title(movie)
if item:
return item.json()
return {'Message': 'Movie is not found'}
#Creating the post method
def post(self, movie):
if Movies.find_by_title(movie):
return {' Message': 'Movie with the title {} already exists'.format(movie)}
args = Movies_List.parser.parse_args()
item = Movies(movie, args['director'], args['genre'], args['collection'])
item.save_to()
return item.json()
#Creating the put method
def put(self, movie):
args = Movies_List.parser.parse_args()
item = Movies.find_by_title(movie)
if item:
item.collection = args['collection']
item.save_to()
return {'Movie': item.json()}
item = Movies(movie, args['director'], args['genre'], args['collection'])
item.save_to()
return item.json()
#Creating the delete method
def delete(self, movie):
item = Movies.find_by_title(movie)
if item:
item.delete_()
return {'Message': '{} has been deleted from records'.format(movie)}
return {'Message': '{} is already not on the list'.format()}
#Creating a class to get all the movies from the database.
class All_Movies(Resource):
#Defining the get method
def get(self):
return {'Movies': list(map(lambda x: x.json(), Movies.query.all()))}
#Adding the URIs to the api
api.add_resource(All_Movies, '/')
api.add_resource(Movies_List, '/<string:movie>')
if __name__=='__main__':
#Run the applications
app.run()
|
[
"anupam1525@gmail.com"
] |
anupam1525@gmail.com
|
69d0ac1ed32f14223ede581eaa3b883621e15804
|
86686a684b8bfcdf8566bcef9a24e96cc8b647ef
|
/falcon/Queueing.py
|
d2df7bd979503114c774b7c36da6158b22862ecd
|
[] |
no_license
|
yutapok/webapi-falcon
|
56ec5ad35032e80bc9dcffd7827ae628f84e7b5b
|
18680d17a2907649cfb0420ffab52ca94caf26f0
|
refs/heads/master
| 2020-03-18T17:08:08.828433
| 2018-06-02T16:44:09
| 2018-06-02T16:44:09
| 135,008,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
import redis
def set_redis_cli(host='localhost', port=6379):
return redis.Redis(host, port)
def enqueue(self, key, value):
#key_enq = key if key else self.key
pass
def dequeue(redis_cli):
if not isinstance(redis_cli, redis.Redis):
raise
return redis_cli.lpop
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
0841d5b8dac5b5a3f0775607832af7009dd05f68
|
b8586515ca8d4106dfe07fc9cc16342c90a44cc1
|
/055-napa-county/final-results/parse_napa.py
|
0c51a67c3df1123bb2ca1ff4f12511bd042ab46e
|
[
"MIT"
] |
permissive
|
mjtonetti/california-2016-election-precinct-maps
|
775929b88a9d09877f91b622f39e70cc6750c7fe
|
39e9a6e797aca1b5b5f5129294807dfadb5a795d
|
refs/heads/master
| 2020-05-31T21:15:54.434542
| 2017-01-10T00:51:26
| 2017-01-10T00:51:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,719
|
py
|
import sys
import os
import re
import csv, time
# import pandas as pd
import numpy as np
from subprocess import call
fips = '055'
# file and header
outfile = open(sys.argv[1][:-4]+'.csv','w')
outfile.write('pct16,candidate,total\n')
# loop through docs
for prop in range(53,68):
call(['pdftotext','-layout',str(prop)+'.pdf'])
pages = []
currentPage = ''
pageNo = 0
with open(str(prop)+'.pdf'.replace('pdf','txt')) as file:
# extract every other page (because of write in candidates)
raw = file.read()
pages = raw.split('')
# loop through pages
prop = ''
for page in pages:
# # look for candidate versus write in page
# pagetype = ''
# if 'HILLARY CLINTON' in page:
# # print 'MAIN CANDIDATE PAGE'
# pagetype = 'main'
# elif 'MCMULLIN' in page:
# # print 'write-in page'
# pagetype = 'writein'
# elif 'KAMALA' in page:
# pagetype = 'senate'
# elif 'Yes' in page:
# pagetype = 'prop'
# else:
# pagetype = "null"
lines = page.split('\n')
for line in lines:
if 'PROPOSITION' in line:
pindex = line.index('PROPOSITION')
propnum = line[pindex+12:pindex+14]
if line[:6].isdigit():
precinct = line[:6]
pct = precinct
line = line.replace(',','')
numbers = [int(s) for s in line.split() if s.isdigit()]
print numbers
yesvotes = str(numbers[-2])
novotes = str(numbers[-1])
outfile.write(fips+'-'+pct+',prop'+propnum+'_yes,'+yesvotes+'\n')
outfile.write(fips+'-'+pct+',prop'+propnum+'_no,'+novotes+'\n')
# # look for proposition
# if 'PROPOSITION' in line:
# prop = 'prop' + line[12:14]
# # look for precinct number, which is five
# precinct = line[:7].strip()
# if precinct.isdigit():
# # for when that first bit is a number
# print precinct
# lineindex = lines.index(line)
# totalline = lines[lineindex+4].replace(',','')
# numbers = [int(s) for s in totalline.split() if s.isdigit()]
# print numbers
# if pagetype == 'main':
# outfile.write('029-'+precinct+',pres_clinton,'+str(numbers[1])+'\n')
# outfile.write('029-'+precinct+',pres_trump,'+str(numbers[3])+'\n')
# outfile.write('029-'+precinct+',pres_johnson,'+str(numbers[4])+'\n')
# outfile.write('029-'+precinct+',pres_stein,'+str(numbers[0])+'\n')
# outfile.write('029-'+precinct+',pres_lariva,'+str(numbers[2])+'\n')
# elif pagetype == 'writein':
# outfile.write('029-'+precinct+',pres_other,'+str(numbers[0])+'\n')
# elif pagetype == 'senate':
# outfile.write('029-'+precinct+',ussenate_harris,'+str(numbers[1])+'\n')
# outfile.write('029-'+precinct+',ussenate_sanchez,'+str(numbers[0])+'\n')
# elif pagetype == 'prop':
# outfile.write('029-'+precinct+','+prop+'_yes,'+str(numbers[0])+'\n')
# outfile.write('029-'+precinct+','+prop+'_no,'+str(numbers[1])+'\n')
# # print lines[lineindex+4]
outfile.close()
|
[
"gaufre@gmail.com"
] |
gaufre@gmail.com
|
cd4bd2b824b89824cd76acfc8ca6380ffffce24d
|
96597359ecac8f99805fcb923663d3bd4bdf5582
|
/coaevnmt/modules/inference.py
|
2f69c8f8670aa2ec772936a5ab9c10aa010df562
|
[] |
no_license
|
Rgerritse/AEVNMT-Co-training
|
a5a3c981ac770786d137e9d21dc36de20573be79
|
87dc88d227f3e68dc974abf28cd4cc90ae9603ef
|
refs/heads/master
| 2020-04-28T22:02:12.784008
| 2019-11-18T13:51:02
| 2019-11-18T13:51:02
| 175,603,538
| 0
| 0
| null | 2019-05-04T10:17:01
| 2019-03-14T10:53:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import rnn_creation_fn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class InferenceModel(nn.Module):
def __init__(self, config):
super(InferenceModel, self).__init__()
rnn_fn = rnn_creation_fn(config["rnn_type"])
self.rnn = rnn_fn(config["hidden_size"], config["hidden_size"], batch_first=True, bidirectional=True)
self.config = config
if config["z_inference_mode"] == "max_hid":
self.aff_u_hid = nn.Linear(4 * config["hidden_size"], config["hidden_size"])
self.aff_u_out = nn.Linear(config["hidden_size"], config["latent_size"])
self.aff_s_hid = nn.Linear(4 * config["hidden_size"], config["hidden_size"])
self.aff_s_out = nn.Linear(config["hidden_size"], config["latent_size"])
else:
self.aff_u_hid = nn.Linear(2 * config["hidden_size"], config["hidden_size"])
self.aff_u_out = nn.Linear(config["hidden_size"], config["latent_size"])
self.aff_s_hid = nn.Linear(2 * config["hidden_size"], config["hidden_size"])
self.aff_s_out = nn.Linear(config["hidden_size"], config["latent_size"])
def forward(self, x, x_mask, x_len):
packed_seq = pack_padded_sequence(x, x_len, batch_first=True, enforce_sorted=False)
output, hidden = self.rnn(packed_seq)
output, _ = pad_packed_sequence(output, batch_first=True)
if self.config["z_inference_mode"] == "max_hid":
if self.config["rnn_type"] == "lstm":
hidden = hidden[0]
layers = [hidden[layer_num] for layer_num in range(hidden.size(0))]
hidden_combined = torch.cat(layers, dim=-1)
masked_output = output * x_mask.squeeze(1).unsqueeze(-1).type_as(output)
max_output = torch.max(masked_output, 1)[0]
final_output = torch.cat([max_output, hidden_combined], 1)
else:
final_output = torch.sum(output * x_mask.squeeze(1).unsqueeze(-1).type_as(output), 1)
loc = self.aff_u_out(F.relu(self.aff_u_hid(final_output)))
scale = F.softplus(self.aff_s_out(F.relu(self.aff_s_hid(final_output))))
return loc, scale
|
[
"rgerritse95@gmail.com"
] |
rgerritse95@gmail.com
|
aba74070a91d99c3c734d1af057d0cbbd5ec02b9
|
6923f05fbb133dbb5b631062a501af66d911d25b
|
/pageSpeedTest/testWebsite.py
|
8e01c23c31b17fd8712cd49a9fdcd0dae46cd50f
|
[
"Apache-2.0"
] |
permissive
|
appleseed-web/pyStagingTests
|
05797735451489500b8294f50a4e5b9d0cc2175c
|
950522dbff54ff15bea61227cae15dcade6f25d1
|
refs/heads/master
| 2021-01-23T05:57:12.751414
| 2017-03-27T14:40:16
| 2017-03-27T14:40:16
| 86,327,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
import sys
import json
import urllib.parse
import requests
valid = True
url=sys.argv[1]
apiKey="AIzaSyA-ZkVp0wt8eBgLFTU22uKxfgmFCHsfx5A"
vurl='https://www.googleapis.com/pagespeedonline/v2/runPagespeed?url='+ urllib.parse.quote_plus(url) +'&filter_third_party_resources=true&locale=it_IT&screenshot=false&key=' + apiKey
response = requests.get(vurl)
x=json.loads(response.text)
print();
print();
print();
print('--------------------------------------')
print('--------------------------------------')
print(' |||' + url + '|||');
print('--------------------------------------')
print('--------------------------------------')
print();
print(x['ruleGroups'])
print();
print();
print();
exit("pagespeedtest url " + url )
|
[
"d.bizzi@appleseed-mail.it"
] |
d.bizzi@appleseed-mail.it
|
5181041ef92804133ec031421b7012d0546037f2
|
02424d7618cefccafcefc166946b868f6c9b16f3
|
/create_data.py
|
c964bb009befa9a57bf266b0799aef383ac831b3
|
[] |
no_license
|
Andryyyha/SSU-BigData-ETL
|
70cb38982a5fced79ed8b4cd4810e5490701623f
|
abc873567f849dcec097d490dce7988b6929508d
|
refs/heads/master
| 2021-03-18T16:33:26.085105
| 2020-04-14T11:19:26
| 2020-04-14T11:19:26
| 247,081,653
| 1
| 0
| null | 2020-04-13T10:55:57
| 2020-03-13T13:47:51
|
Scala
|
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
import argparse
import csv
import os
from oauth2client import client
from google.cloud import pubsub_v1
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--project_id', help='project name', dest='project_id', default='ssu-bigdata-etl')
parser.add_argument('--path_to_data', help='path to csv file', dest='path_to_data', default='./stations.csv')
parser.add_argument('--repeats', help='how repeats to publish the same file', dest='repeats', default=3, type=int)
parser.add_argument('--topic_name', help='pub/sub topic name', dest='topic_name', default='stations')
parser.add_argument('--path_to_creds', help='path to JSON with credentials', dest='creds', default='/Users/andryyyha/SSU-BigData-ETL-3b758a590d47.json')
return parser.parse_args()
def create_client():
return pubsub_v1.PublisherClient()
def publish_data(path_to_data, topic_name, repeats, client):
for i in range(1, repeats):
firstline = True
with open(path_to_data) as data:
print("Publishing data iteration {}".format(i))
reader = csv.reader(data)
for line in reader:
if firstline:
firstline = False
continue
line[1] = '"' + line[1] + '"'
line[2] = '"' + line[2] + '"'
# print(len(line))
encoded_line = ','.join(line).encode('utf-8')
print(encoded_line, '\n')
future = client.publish(topic_name, encoded_line)
print(future.result())
def main():
args = parse()
print(args)
project_id = args.project_id
path_to_data = args.path_to_data
repeats = args.repeats
name = args.topic_name
topic_name = 'projects/{}/topics/{}'.format(project_id, name)
creds = args.creds
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = creds
client = create_client()
publish_data(path_to_data, topic_name, repeats, client)
if __name__ == "__main__":
main()
|
[
"adrianvalir@gmail.com"
] |
adrianvalir@gmail.com
|
d616ad3674ce8ea3c008c0b0b5ecf98f4046c2b6
|
f1ba365d7de711cfe766f43931b4a049e90f08b7
|
/hw14.py
|
3ad0d2ff3cda76b9790055b601fc629fbbf5dd40
|
[] |
no_license
|
LevBaranov/pylearn
|
97115136e27c31d91d7d0cd2a9b5b28c6cf67df1
|
f35dc5995e8b10590892d8309c7f2f4ae824487b
|
refs/heads/master
| 2022-05-29T12:17:44.488384
| 2020-05-02T12:34:09
| 2020-05-02T12:34:09
| 256,698,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
class Integer(int):
'Измененный int, чтобы 2+2 =5'
def __add__(self, arg):
return super().__add__(arg) + 1
class List(list):
'Измененный list, что больше 10 элементов нельзя положить'
def __init__(self, arg):
if len(arg) <= 10:
return super().__init__(arg)
def append(self, arg):
if (self.__len__() + 1) <= 10:
return super().append(arg)
def extend(self, arg):
if (self.__len__() + len(arg)) <= 10:
return super().extend(arg)
def insert(self, position, arg):
if (self.__len__() + 1) <= 10:
return super().insert(position, arg)
class UniqueList(list):
'Лист с уникальными элементами, чтобы вел себя как множество'
def __init__(self, arg):
for a in arg:
if self.count(arg) == 0:
self.append(a)
def append(self, arg):
if self.count(arg) == 0:
return super().append(arg)
def extend(self, arg):
for a in arg:
print(self.count(a))
if self.count(a) == 0:
self.append(a)
return self
def insert(self, position, arg):
if self.count(arg) == 0:
return super().insert(position, arg)
if __name__ == '__main__':
l = UniqueList([3, 4, 3, 5])
print(l)
l.append(4)
print(l)
l.extend([14, 35, 5])
print(l)
l.insert(4, 4)
print(l)
|
[
"lev.l.baranov@gmail.com"
] |
lev.l.baranov@gmail.com
|
e2302c898e5a72d5eebbc82ec1dcc0b067e59dca
|
f3950ce26f75c9c352f781d59b9ab4d97deb14ac
|
/SCIE2100_Practical5/sequence.py
|
c6ca6092020915eb5a7e0c0c03a1e17043c927b4
|
[] |
no_license
|
danielzhangau/Bioinformatics
|
74646673f2da93165121d69c7a15e8cfe302bb76
|
0bad61936856c2aa6e6b9416a971e22496ed0c1a
|
refs/heads/main
| 2023-06-04T14:41:48.740998
| 2021-06-07T08:22:56
| 2021-06-07T08:22:56
| 349,257,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55,107
|
py
|
"""
Module *** sequence ***
This module depends on the following modules
sym -- defines an alphabet
prob -- defines structures to hold probabilities (prob also depends on sym)
This module incorporates classes for
Sequence -- names and defines a sequence of symbols; computes various transformations and pairwise alignments
Alignment -- defines a multiple sequence alignment; computes stats for use in substitution matrices
SubstMatrix -- substitution matrix class to support alignment methods
Regexp -- defines patterns as regular expressions for textual pattern matching in sequences
PWM -- defines a weight matrix that can score any site in actual sequences
Incorporates methods for loading and saving files relevant to the above (e.g. FASTA, ALN, substitution matrices)
and methods for retrieving relevant data from web services
This code has been adapted to Python 3.5 in 2017
This code has gone through many updates and has benefited from kind contributions of course participants.
Please keep suggestions coming!
Email: m.boden@uq.edu.au
"""
import string, sys, re, math, os, array
import numpy
#from webservice import *
from sym import *
from prob import *
# Sequence ------------------****
class Sequence(object):
""" A biological sequence. Stores the sequence itself (as a compact array),
the alphabet (i.e., type of sequence it is), and optionally a name and further
information. """
sequence = None # The array of symbols that make up the sequence
alphabet = None # The alphabet from which symbols come
name = None # The name (identifier) of a sequence
info = None # Other information (free text; e.g. annotations)
length = None # The number of symbols that the sequence is composed of
gappy = None # True if the sequence has "gaps", i.e. positions that represent deletions relative another sequence
def __init__(self, sequence, alphabet = None, name = '', info = '', gappy = False):
""" Create a sequence with the sequence data. Specifying the alphabet,
name and other information about the sequence are all optional.
The sequence data is immutable (stored as a string).
Example:
>>> myseq = Sequence('MVSAKKVPAIAMSFGVSF')
will create a sequence with no name, and assign one of the predefined
alphabets on the basis of what symbols were used.
>>> myseq.alphabet.symbols
will output the standard protein alphabet:
['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q',
'R', 'S', 'T', 'V', 'W', 'Y'] """
self.sequence = sequence
# Assign an alphabet
# If no alphabet is provided, attempts to identify the alphabet from sequence
self.alphabet = None
if not alphabet is None:
for sym in self.sequence:
if not sym in alphabet and (sym != '-' or not gappy): # error check: bail out
raise RuntimeError('Invalid symbol: %c in sequence %s' % (sym, name))
self.alphabet = alphabet
else:
for alphaName in preferredOrder:
alpha = predefAlphabets[alphaName]
valid = True
for sym in self.sequence:
if not sym in alpha and (sym != '-' or not gappy):
valid = False
break
if valid:
self.alphabet = alpha
break
if self.alphabet is None:
raise RuntimeError('Could not identify alphabet from sequence: %s' % name)
# Store other information
self.name = name
self.info = info
self.length = len(self.sequence)
self.gappy = gappy
def __len__(self):
""" Defines what the "len" operator returns for an instance of Sequence, e.g.
>>> seq = Sequence('ACGGTAGGA', DNA_Alphabet)
>>> print (len(seq))
9
"""
return len(self.sequence)
def __str__(self):
""" Defines what should be printed when the print statement is used on a Sequence instance """
str = self.name + ': '
for sym in self:
str += sym
return str
def __iter__(self):
""" Defines how a Sequence should be "iterated", i.e. what its elements are, e.g.
>>> seq = Sequence('AGGAT', DNA_Alphabet)
>>> for sym in seq:
print (sym)
will print A, G, G, A, T (each on a separate row)
"""
tsyms = tuple(self.sequence)
return tsyms.__iter__()
def __contains__(self, item):
""" Defines what is returned when the "in" operator is used on a Sequence, e.g.
>>> seq = Sequence('ACGGTAGGA', DNA_Alphabet)
>>> print ('T' in seq)
True
which is equivalent to
>>> print (seq.__contains__('T'))
True
>>> print ('X' in seq)
False
"""
for sym in self.sequence:
if sym == item:
return True
return False
def __getitem__(self, ndx):
""" Retrieve a specified index (or a "slice" of indices) of the sequence data.
Calling self.__getitem__(3) is equivalent to self[3]
"""
if type(ndx) is slice:
return ''.join(self.sequence[ndx])
else:
return self.sequence[ndx]
def writeFasta(self):
""" Write one sequence in FASTA format to a string and return it. """
fasta = '>' + self.name + ' ' + self.info + '\n'
data = ''.join(self.sequence)
nlines = int(math.ceil((len(self.sequence) - 1) / 60 + 1))
for i in range(nlines):
lineofseq = ''.join(data[i*60 : (i+1)*60]) + '\n'
fasta += lineofseq
return fasta
def count(self, findme = None):
""" Get the number of occurrences of specified symbol findme OR
if findme = None, return a dictionary of counts of all symbols in alphabet """
if findme != None:
cnt = 0
for sym in self.sequence:
if findme == sym:
cnt = cnt + 1
return cnt
else:
symbolCounts = {}
for symbol in self.alphabet:
symbolCounts[symbol] = self.count(symbol)
return symbolCounts
def find(self, findme):
""" Find the position of the specified symbol or sub-sequence """
return ''.join(self.sequence).find(findme)
"""
Below are some useful methods for loading data from strings and files.
Recognize the FASTA format (nothing fancy).
"""
def readFasta(string, alphabet = None, ignore = False, gappy = False):
""" Read the given string as FASTA formatted data and return the list of
sequences contained within it.
If alphabet is specified, use it, if None (default) then guess it.
If ignore is False, errors cause the method to fail.
If ignore is True, errors will disregard sequence.
If gappy is False (default), sequence cannot contain gaps,
if True gaps are accepted and included in the resulting sequences."""
seqlist = [] # list of sequences contained in the string
seqname = None # name of *current* sequence
seqinfo = None
seqdata = [] # sequence data for *current* sequence
for line in string.splitlines(): # read every line
if len(line) == 0: # ignore empty lines
continue
if line[0] == '>': # start of new sequence
if seqname: # check if we've got one current
try:
current = Sequence(seqdata, alphabet, seqname, seqinfo, gappy)
seqlist.append(current)
except RuntimeError as errmsg:
if not ignore:
raise RuntimeError(errmsg)
# now collect data about the new sequence
seqinfo = line[1:].split() # skip first char
if len(seqinfo) > 0:
try:
parsed = parseDefline(seqinfo[0])
seqname = parsed[0]
seqinfo = line[1:]
except IndexError as errmsg:
if not ignore:
raise RuntimeError(errmsg)
else:
seqname = ''
seqinfo = ''
seqdata = []
else: # we assume this is (more) data for current
cleanline = line.split()
for thisline in cleanline:
seqdata.extend(tuple(thisline.strip('*')))
# we're done reading the file, but the last sequence remains
if seqname:
try:
lastseq = Sequence(seqdata, alphabet, seqname, seqinfo, gappy)
seqlist.append(lastseq)
except RuntimeError as errmsg:
if not ignore:
raise RuntimeError(errmsg)
return seqlist
def parseDefline(string):
""" Parse the FASTA defline (see http://en.wikipedia.org/wiki/FASTA_format)
GenBank, EMBL, etc gi|gi-number|gb|accession|locus
SWISS-PROT, TrEMBL sp|accession|name
...
Return a tuple with
[0] primary search key, e.g. UniProt accession, Genbank GI
[1] secondary search key, e.g. UniProt name, Genbank accession
[2] source, e.g. 'sp' (SwissProt/UniProt), 'tr' (TrEMBL), 'gb' (Genbank)
"""
if len(string) == 0: return ('', '', '', '')
s = string.split()[0]
if re.match("^sp\|[A-Z][A-Z0-9]{5}\|\S+", s): arg = s.split('|'); return (arg[1], arg[2], arg[0], '')
elif re.match("^tr\|[A-Z][A-Z0-9]{5}\|\S+", s): arg = s.split('|'); return (arg[1], arg[2], arg[0], '')
elif re.match("^gi\|[0-9]*\|\S+\|\S+", s): arg = s.split('|'); return (arg[1], arg[3], arg[0], arg[2])
elif re.match("gb\|\S+\|\S+", s): arg = s.split('|'); return (arg[1], arg[2], arg[0], '')
elif re.match("emb\|\S+\|\S+", s): arg = s.split('|'); return (arg[1], arg[2], arg[0], '')
elif re.match("^refseq\|\S+\|\S+", s): arg = s.split('|'); return (arg[1], arg[2], arg[0], '')
else: return (s, '', '', '')
def readFastaFile(filename, alphabet = None, ignore = False, gappy = False):
""" Read the given FASTA formatted file and return the list of sequences
contained within it. Note that if alphabet is NOT specified, it will take a
separate guess for each sequence.
If ignore is False, errors cause the method to fail.
If ignore is True, errors will disregard sequence.
If gappy is False (default), sequence cannot contain gaps,
if True gaps are accepted and included in the resulting sequences."""
fh = open(filename)
seqlist = []
batch = '' # a batch of rows including one or more complete FASTA entries
rowcnt = 0
for row in fh:
row = row.strip()
if len(row) > 0:
if row.startswith('>') and rowcnt > 0:
more = readFasta(batch, alphabet, ignore, gappy)
if len(more) > 0:
seqlist.extend(more)
batch = ''
rowcnt = 0
batch += row + '\n'
rowcnt += 1
if len(batch) > 0:
more = readFasta(batch, alphabet, ignore, gappy)
if len(more) > 0:
seqlist.extend(more)
fh.close()
return seqlist
def writeFastaFile(filename, seqs):
""" Write the specified sequences to a FASTA file. """
fh = open(filename, 'w')
for seq in seqs:
fh.write(seq.writeFasta())
fh.close()
def getMarkov(seqs, order = 0):
""" Retrieve the Markov stats for a set of sequences. """
myseqs = seqs
if seqs is Sequence:
myseqs = list([seqs])
myalpha = None
for seq in myseqs:
if myalpha == None:
myalpha = seq.alphabet
else:
if seq.alphabet != myalpha:
raise RuntimeError('Sequence ' + seq.name + ' uses an invalid alphabet ')
jp = Joint([myalpha for _ in range(order + 1)])
for seq in myseqs:
for i in range(len(seq) - order):
sub = seq[i:i + order + 1]
jp.observe(sub)
return jp
def getCount(seqs, findme = None):
if findme != None:
cnt = 0
for seq in seqs:
cnt += seq.count(findme)
return cnt
else:
if len(seqs) > 0:
alpha = seqs[0].alphabet
patcnt = {}
for a in alpha:
patcnt[a] = getCount(seqs, a)
return patcnt
# Alignment ------------------
class Alignment():
""" A sequence alignment class. Stores two or more sequences of equal length where
one symbol is gap '-'
Example usage:
>>> seqs = [Sequence('THIS-LI-NE-', Protein_Alphabet, gappy = True), Sequence('--ISALIGNED', Protein_Alphabet, gappy = True)]
>>> print (Alignment(seqs))
THIS-LI-NE-
--ISALIGNED """
alignlen = None
seqs = None
alphabet = None
def __init__(self, seqs):
self.alignlen = -1
self.seqs = seqs
self.alphabet = None
for s in seqs:
if self.alignlen == -1:
self.alignlen = len(s)
elif self.alignlen != len(s):
raise RuntimeError("Alignment invalid: different lengths")
if self.alphabet != None and self.alphabet != s.alphabet:
raise RuntimeError("Alignment invalid: different alphabets")
self.alphabet = s.alphabet
def getnamelen(self):
namelen = 0
for seq in self.seqs:
namelen = max(len(seq.name), namelen)
return namelen
def __len__(self):
""" Defines what the "len" operator returns for an instance of Alignment, e.g.
>>> seqs = [Sequence('THIS-LI-NE', Protein_Alphabet, gappy = True), Sequence('--ISALIGNED', Protein_Alphabet, gappy = True)]
>>> aln = Alignment(seqs)
>>> print(len(aln))
2
"""
return len(self.seqs)
def getSize(self):
""" Returns the size of an alignment in terms of number of columns """
return self.alignlen
def __str__(self):
string = ''
namelen = self.getnamelen()
for seq in self.seqs:
string += seq.name.ljust(namelen+1)
for sym in seq:
string += sym
string += '\n'
return string
def __getitem__(self, ndx):
return self.seqs[ndx]
def writeClustal(self, filename = None):
""" Write the alignment to a string or file using the Clustal file format. """
symbolsPerLine = 60
maxNameLength = self.getnamelen() + 1
string = ''
wholeRows = self.alignlen / symbolsPerLine
for i in range(wholeRows):
for j in range(len(self.seqs)):
string += self.seqs[j].name.ljust(maxNameLength) + ' '
string += self.seqs[j][i*symbolsPerLine:(i+1)*symbolsPerLine] + '\n'
string += '\n'
# Possible last row
lastRowLength = self.alignlen - wholeRows*symbolsPerLine
if lastRowLength > 0:
for j in range(len(self.seqs)):
if maxNameLength > 0:
string += self.seqs[j].name.ljust(maxNameLength) + ' '
string += self.seqs[j][-lastRowLength:] + '\n'
if filename != None:
fh = open(filename, 'w')
fh.write('CLUSTAL W (1.83) multiple sequence alignment\n\n\n') # fake header so that clustal believes it
fh.write(string)
fh.close()
return
return string
def getProfile(self, pseudo = 0.0, countGaps = True):
""" Determine the probability matrix from the alignment, assuming
that each position is independent of all others. """
p = IndepJoint([self.alphabet for _ in range(self.alignlen)], pseudo)
for seq in self.seqs:
p.observe(seq, 1, countGaps = countGaps)
return p
def getConsensus(self):
""" Construct a consensus sequence. """
syms = []
for col in range(self.alignlen):
d = Distrib(self.alphabet)
for seq in self.seqs:
if seq[col] in self.alphabet:
d.observe(seq[col])
syms.append(d.getmax())
return Sequence(syms)
def getConsensusForColumn(self, colidx):
symcnt = {}
for seq in self.seqs:
mysym = seq[colidx]
try:
symcnt[mysym] += 1
except:
symcnt[mysym] = 1
consensus = None
maxcnt = 0
for mysym in symcnt:
if symcnt[mysym] > maxcnt:
maxcnt = symcnt[mysym]
consensus = mysym
return consensus
def displayConsensus(self, theta1 = 0.2, theta2 = 0.05, lowercase = True):
""" Display a table with rows for each alignment column, showing
column index, entropy, number of gaps, and symbols in order of decreasing probability.
theta1 is the threshold for displaying symbols in upper case,
theta2 is the threshold for showing symbols at all, and in lower case. """
print(("Alignment of %d sequences, with %d columns" % (len(self.seqs), self.alignlen)))
print(("Column\tEntropy\tGaps\tProb\tConserv\tSymbols (Up>=%.2f;Low>=%.2f)\n" % (theta1, theta2)))
for col in range(self.alignlen):
d = Distrib(self.alphabet)
gaps = 0
for seq in self.seqs:
if seq[col] in self.alphabet:
d.observe(seq[col])
else:
gaps += 1
print(((col + 1), "\t%5.3f" % d.entropy(), "\t%4d\t" % gaps,))
symprobs = d.getProbsort()
(_, maxprob) = symprobs[0]
if maxprob >= theta1:
print(("%d\tTRUE\t" % int(maxprob * 100),))
else:
print(("%d\t\t" % int(maxprob * 100),))
for (sym, prob) in symprobs:
if prob >= theta1:
print((sym, "%d%%" % int(prob * 100),))
elif prob >= theta2 and lowercase:
print((sym.lower(), "%d%%" % int(prob * 100),))
elif prob >= theta2:
print((sym, "%d%%" % int(prob * 100),))
print()
def saveConsensus(self, myseq, filename, theta1 = 0.2, theta2 = 0.05, lowercase = True, compact = False):
""" Display a table with rows for each alignment column, showing
column index, entropy, number of gaps, and symbols in order of decreasing probability.
theta1 is the threshold for displaying symbols in upper case,
theta2 is the threshold for showing symbols at all, and in lower case. """
filename = ''.join(e for e in filename if e.isalnum() or e == '_' or e == '.')
f = open(filename, 'w')
f.write("Alignment of %d sequences, with %d columns\n" % (len(self.seqs), self.alignlen))
if compact:
f.write("Column\tConserv\tVariab\tAll (Up>=%.2f;Low>=%.2f)\n" % (theta1, theta2))
else:
f.write("Column\tProb\tConserv\tSymbols (Up>=%.2f;Low>=%.2f)\n" % (theta1, theta2))
countrow = 0
for col in range(self.alignlen):
countrow += 1
if myseq[col] == '-':
continue
alist = list(self.alphabet)
alist.append('-')
gapalphabet = Alphabet(alist)
d_gap = Distrib(gapalphabet)
d_nogap = Distrib(self.alphabet)
for seq in self.seqs:
if seq[col] in gapalphabet:
d_gap.observe(seq[col])
if seq[col] in self.alphabet:
d_nogap.observe(seq[col])
f.write("%d\t" % (col + 1))
symprobs_nogap = d_nogap.getProbsort()
symprobs_gap = d_gap.getProbsort()
(maxsym, maxprob) = symprobs_nogap[0]
if compact:
if maxprob >= theta1:
f.write("%c\t" % maxsym)
else:
f.write("\t")
for (sym, prob) in symprobs_gap:
if prob >= theta2 and lowercase:
f.write("%c" % sym.lower())
elif prob >= theta2:
f.write("%c" % sym)
f.write("\t")
else:
if maxprob >= theta1:
f.write("%d\t" % int(maxprob * 100))
else:
f.write("%d\t\t" % int(maxprob * 100))
for (sym, prob) in symprobs_gap:
if prob >= theta1:
f.write("%c %d%% " % (sym, int(prob * 100)))
elif prob >= theta2 and lowercase:
f.write("%c %d%% " % (sym.lower(), int(prob * 100)))
elif prob >= theta2:
f.write("%c %d%% " % (sym, int(prob * 100)))
f.write('\n')
f.close()
def calcBackground(self):
""" Count the proportion of each amino acid's occurrence in the
alignment, and return as a probability distribution. """
p = Distrib(self.alphabet)
for seq in self.seqs:
for sym in seq:
if sym in self.alphabet: # ignore "gaps"
p.observe(sym)
return p
def calcSubstMatrix(self, background = None):
""" Return a substitutionMatrix whose fg are based on this un-gapped
multiple sequence alignment. Scores are given in half-bits. """
# Get a list of the amino acids
aminoAcids = self.alphabet.symbols
columns = self.alignlen # Length of sequences in alignment
numSeqs = len(self.seqs) # Number of sequences in alignment
seqPairs = (numSeqs* (numSeqs - 1) ) / 2 # Number of pairs of sequences in ungapped alignment
aaPairs = seqPairs * columns # Number of pairs of amino acids in ungapped alignment
# For each pair of amino acids, calculate the proportion of all aligned
# amino acids in this alignment which are made up of that pair
# (i.e., q[ab] = fab / aaPairs, where fab is the number of times
# a and b are aligned in this alignment)
# See page 122 in Understanding Bioinformatics.
q = {}
for i in range( len(aminoAcids) ):
a = aminoAcids[i]
for j in range(i, len(aminoAcids)):
b = aminoAcids[j]
# Count the number of times each pair of amino acids is aligned
fab = 0
for column in range(columns):
# Count number of each amino acid in each column
col = [seq[column] for seq in self.seqs]
if a == b:
# Number of ways of pairing up n occurrences of amino
# acid a is n*(n-1)/2
cnt = col.count(a)
fab += cnt * (cnt-1)/2
else:
# Number of ways of pairing up n & m occurrences of
# amino acids a & b is n*m
fab += col.count(a)*col.count(b)
# Calculate proportion of all aligned pairs of amino acids
q[a+b] = q[b+a] = float(fab) / aaPairs
if q[a+b] == 0: # This is so we don't end up doing log(0)
q[a+b] = q[b+a] = 0.001
# Background frequency calculation if required
p = background or self.calcBackground()
# Calculate log-odds ratio for each pair of amino acids
s = SubstMatrix(self.alphabet)
for a in aminoAcids:
for b in aminoAcids:
# Calculate random chance probabilitity (eab)
if a == b:
eab = p[a]**2
else:
eab = 2*p[a]*p[b]
if eab == 0:
eab = 0.001
# Calculate final score to be set in the substitution matrix
odds = q[a+b] / eab
sab = math.log(odds, 2) # log_2 transform
sab = sab * 2 # units in half bits
s.set(a, b, int(round(sab)))
return s
def calcDistances(self, measure, a=1.0):
""" Calculate the evolutionary distance between all pairs of sequences
in this alignment, using the given measure. Measure can be one of
'fractional', 'poisson', 'gamma', 'jc' or 'k2p'. If 'gamma' or 'k2p' is
given, then the parameter a must also be specified (or else it will use
the default value of 1.0).
Definitions of each distance metric are found in Zvelebil and Baum p268-276.
These are mostly intended for DNA, but adapted for protein (as below).
Note however that there are alternative distance matrices for proteins (p276).
"""
measure = measure.lower()
if not measure in ['fractional', 'poisson', 'gamma', 'jc', 'k2p']:
raise RuntimeError('Unsupported evolutionary distance measure: %s' % measure)
a = float(a)
if len(self.alphabet) == 4:
oneless = 3
alphalen = 4
elif len(self.alphabet) == 20:
oneless = 19
alphalen = 20
else:
raise RuntimeError('Invalid sequence alphabet: %s' % str(self.alphabet))
distmat = numpy.zeros((len(self.seqs), len(self.seqs)))
# Loop through each pair of sequences
for i in range(len(self.seqs)):
for j in range(i + 1, len(self.seqs)):
seqA = self.seqs[i]
seqB = self.seqs[j]
# Calculate the fractional distance (p) first
# The two sequences of interest are in seqA and seqB
L = 0
D = 0
for k in range(self.alignlen):
# For every non-gapped column, put to L
# For every non-gapped column where the sequences are
# different, put to D
if seqA[k] != '-' and seqB[k] != '-':
L += 1
if seqA[k] != seqB[k]:
D += 1
p = float(D)/L
# Now calculate the specified measure based on p
if measure == 'fractional':
dist = p
elif measure == 'poisson':
dist = -math.log(1-p)
elif measure == 'jc':
dist = -(float(oneless)/alphalen)*math.log(1 - (float(alphalen)/oneless)*p)
elif measure == 'k2p':
dist = (float(oneless)/alphalen)*a*((1 - (float(alphalen)/oneless)*p)**(-1/a) - 1)
else: # measure == 'gamma'
dist = a*((1-p)**(-1/a) - 1)
distmat[i, j] = distmat[j, i] = dist
return distmat
def writeHTML(self, filename = None):
""" Generate HTML that displays the alignment in color.
Requires that the alphabet is annotated with the label 'html-color' (see Sequence.annotateSym)
and that each symbol maps to a text string naming the color, e.g. 'blue'
"""
html = '''<html><head><meta content="text/html; charset=ISO-8859-1" http-equiv="Content-Type">\n<title>Sequence Alignment</title>\n</head><body><pre>\n'''
maxNameLength = self.getnamelen()
html += ''.ljust(maxNameLength) + ' '
for i in range(self.alignlen - 1):
if (i+1) % 10 == 0:
html += str(i/10+1)[0]
else:
html += ' '
html += '%s\n' % (self.alignlen)
if self.alignlen > 10:
html += ''.ljust(maxNameLength) + ' '
for i in range(self.alignlen - 1):
if (i+1) % 10 == 0:
index = len(str(i/10 + 1).split('.')[0])
html += str(i / 10 + 1).split('.')[0][(index * -1) + 1 ] if (len(str(i / 10 + 1).split('.')[0]) > 1) else '0'
else:
html += ' '
html += '\n'
if self.alignlen > 100:
html += ''.ljust(maxNameLength) + ' '
for i in range(self.alignlen - 1):
if (i+1) % 10 == 0 and i >= 99:
index = len(str(i/10 + 1).split('.')[0])
html += str(i / 10 + 1).split('.')[0][-1] if (len(str(i / 10 + 1).split('.')[0]) >2) else '0'
else:
html += ' '
html += '\n'
if self.alignlen > 1000:
html += ''.ljust(maxNameLength) + ' '
for i in range(self.alignlen - 1):
if (i+1) % 10 == 0:
html += '0' if (len(str(i / 10 + 1).split('.')[0]) > 2) else ' '
else:
html += ' '
html += '\n'
for seq in self.seqs:
html += seq.name.ljust(maxNameLength) + ' '
for sym in seq:
color = self.alphabet.getAnnotation('html-color', sym)
if not color:
color = 'white'
html += '<font style="BACKGROUND-COLOR: %s">%s</font>' % (color, sym)
html += '\n'
html += '</pre></body></html>'
if filename:
fh = open(filename, 'w')
fh.write(html)
fh.close()
return html
def saveConsensus(aln, theta1 = 0.99, theta2 = 0.01, countgaps = False, consensus = True, filename = None):
""" Display a table with rows for each alignment column, showing
column index, entropy, number of gaps, and symbols in order of decreasing probability.
theta1 is the percent threshold for consensus (when achieved, all other symbols are ignored)
theta2 is the percent threshold for inclusion (symbols below are ignored).
countgaps, if true, count gaps (default false).
consensus, if true, always print the consensus symbol.
filename is name of file to save the output to (default stdout)."""
if filename == None:
f = sys.stdout
else:
filename = ''.join(e for e in filename if e.isalnum() or e == '_' or e == '.')
f = open(filename, 'w')
if consensus:
f.write("Alignment of %d sequences, with %d columns\n" % (len(aln.seqs), aln.alignlen))
f.write("Consensus>=%.2f;Inclusion>=%.2f)\n" % (theta1, theta2))
for col in range(aln.alignlen):
# collect probabilities for column, with or without gap
myalpha = aln.alphabet
if countgaps:
alist = list(aln.alphabet)
alist.append('-')
myalpha = Alphabet(alist)
d = Distrib(myalpha)
for seq in aln.seqs:
if seq[col] in myalpha:
d.observe(seq[col])
symprobs = d.getProbsort() # the symbols sorted by probability
ninclusions = 0
for (s, p) in symprobs:
if p >= theta2:
ninclusions += 1
else:
break
if consensus or ninclusions > 1:
f.write("%d " % (col + 1))
(maxs, maxp) = symprobs[0]
# if maxp >= theta1 or consensus:
# f.write("%c" % maxs)
for (s, p) in symprobs[1:]:
if p >= theta2:
f.write("%c" % s)
f.write("; ")
f.write('\n')
f.close()
def alignGlobal(seqA, seqB, substMatrix, gap = -1):
""" Align seqA with seqB using the Needleman-Wunsch
(global) algorithm. subsMatrix is the substitution matrix to use and
gap is the linear gap penalty to use. """
lenA, lenB = len(seqA), len(seqB)
# Create the scoring matrix (S)
S = numpy.zeros((lenA + 1, lenB + 1))
# Fill the first row and column of S with multiples of the gap penalty
for i in range(lenA + 1):
S[i, 0] = i * gap
for j in range(lenB + 1):
S[0, j] = j * gap
# Calculate the optimum score at each location in the matrix S
# (where the score represents the best possible score for an alignment
# that ends at sequence indices i and j, for A and B, resp.)
for i in range(1, lenA + 1):
for j in range(1, lenB + 1):
match = S[i-1, j-1] + substMatrix.get(seqA[i-1], seqB[j-1])
delete = S[i-1, j ] + gap
insert = S[i , j-1] + gap
S[i, j] = max([match, delete, insert])
# Traceback the optimal alignment
alignA = '' # a string for sequence A when aligned (e.g. 'THIS-LI-NE-', initially empty).
alignB = '' # a string for sequence B when aligned (e.g. '--ISALIGNED', initially empty).
# Start at the end (bottom-right corner of S)
i = lenA
j = lenB
# Stop when we hit the beginning of at least one sequence
while i > 0 and j > 0:
if S[i, j] == S[i-1, j] + gap:
# Got here by a gap in sequence B (go up)
alignA = seqA[i-1] + alignA
alignB = '-' + alignB
i -= 1
elif S[i, j] == S[i, j-1] + gap:
# Got here by a gap in sequence A (go left)
alignA = '-' + alignA
alignB = seqB[j-1] + alignB
j -= 1
else:
# Got here by aligning the bases (go diagonally)
alignA = seqA[i-1] + alignA
alignB = seqB[j-1] + alignB
i -= 1
j -= 1
# Fill in the rest of the alignment if it begins with gaps
# (i.e., traceback all the way to S[0, 0])
while i > 0:
# Go up
alignA = seqA[i-1] + alignA
alignB = '-' + alignB
i -= 1
while j > 0:
# Go left
alignA = '-' + alignA
alignB = seqB[j-1] + alignB
j -= 1
return Alignment([Sequence(alignA, seqA.alphabet, seqA.name, gappy = True), Sequence(alignB, seqB.alphabet, seqB.name, gappy = True)])
def alignLocal(seqA, seqB, substMatrix, gap = -1):
""" Align seqA with seqB using the Smith-Waterman
(local) algorithm. subsMatrix is the substitution matrix to use and
gap is the linear gap penalty to use. """
lenA, lenB = len(seqA), len(seqB)
# Create the scoring matrix (S)
S = numpy.zeros((lenA + 1, lenB + 1))
# Fill the first row and column of S with multiples of the gap penalty
for i in range(lenA + 1):
S[i, 0] = 0 # Local: init 0
for j in range(lenB + 1):
S[0, j] = 0 # Local: init 0
# Calculate the optimum score at each location in the matrix S
# (where the score represents the best possible score for an alignment
# that ends at sequence indices i and j, for A and B, resp.)
for i in range(1, lenA + 1):
for j in range(1, lenB + 1):
match = S[i-1, j-1] + substMatrix.get(seqA[i-1], seqB[j-1])
delete = S[i-1, j ] + gap
insert = S[i , j-1] + gap
S[i, j] = max([match, delete, insert, 0]) # Local: add option that we re-start alignment from "0"
# Trace back the optimal alignment
alignA = ''
alignB = ''
# Local: start at the cell which has the highest score; find it
i = 0
j = 0
for ii in range(1, lenA + 1):
for jj in range(1, lenB + 1):
if S[ii, jj] > S[i, j]:
i = ii
j = jj
# Stop when we hit the end of a sequence
# Local: also stop when we hit a score 0
while i > 0 and j > 0 and S[i, j] > 0:
if S[i, j] == S[i-1, j] + gap:
# Got here by a gap in sequence B (go up)
alignA = seqA[i-1] + alignA
alignB = '-' + alignB
i -= 1
elif S[i, j] == S[i, j-1] + gap:
# Got here by a gap in sequence A (go left)
alignA = "-" + alignA
alignB = seqB[j-1] + alignB
j -= 1
else:
# Got here by aligning the bases (go diagonally)
alignA = seqA[i-1] + alignA
alignB = seqB[j-1] + alignB
i -= 1
j -= 1
return Alignment([Sequence(alignA, seqA.alphabet, seqA.name, gappy = True), Sequence(alignB, seqB.alphabet, seqB.name, gappy = True)])
def tripletAlignGlobal(seqA, seqB, seqC, subsMatrix, gap = -1):
""" Triplet-wise align this sequence with sequences seqB and seqC,
using the Needleman-Wunsch (global) algorithm. subsMatrix is the
substitution matrix to use and gap is the linear gap penalty to use. """
lenA, lenB, lenC = [s.length for s in [seqA, seqB, seqC]]
# Create the 3D scoring matrix
traceback = numpy.zeros((lenA+1, lenB+1, lenC+1))
# Fill the first row (in each dimension) with multiples of the gap penalty
S = numpy.zeros((lenA+1, lenB+1, lenC+1))
for i in range(lenA+1):
S[i,0,0] = i * gap
for j in range(lenB+1):
S[0,j,0] = j * gap
for k in range(lenC+1):
S[0,0,k] = k * gap
# Calculate the optimum __getitem__ at each location in the matrix
for i in range(1, lenA+1):
for j in range(1, lenB+1):
for k in range(1, lenC+1):
# Scored using sum-of-pairs
matchABC = S[i-1, j-1, k-1] + subsMatrix.get(seqA[i-1], seqB[j-1]) \
+ subsMatrix.get(seqA[i-1], seqC[k-1]) \
+ subsMatrix.get(seqB[j-1], seqC[k-1])
matchAB = S[i-1, j-1, k] + 2*gap + subsMatrix.get(seqA[i-1], seqB[j-1])
matchBC = S[i, j-1, k-1] + 2*gap + subsMatrix.get(seqB[j-1], seqC[k-1])
matchAC = S[i-1, j, k-1] + 2*gap + subsMatrix.get(seqA[i-1], seqC[k-1])
gapAB = S[i, j, k-1] + 3*gap
gapBC = S[i-1, j, k] + 3*gap
gapAC = S[i, j-1, k] + 3*gap
# Use maximum of the 7 options for this location
S[i, j, k] = max([matchABC, matchAB, matchBC, matchAC, gapAB, gapBC, gapAC])
# Remember which one was max., for the traceback
if S[i, j, k] == matchABC:
traceback[i, j, k] = 0 #"matchABC"
elif S[i, j, k] == matchBC:
traceback[i, j, k] = 1 #"matchBC"
elif S[i, j, k] == matchAC:
traceback[i, j, k] = 2 #"matchAC"
elif S[i, j, k] == matchAB:
traceback[i, j, k] = 3 #"matchAB"
elif S[i, j, k] == gapAB:
traceback[i, j, k] = 4 #"gapAB"
elif S[i, j, k] == gapBC:
traceback[i, j, k] = 5 #"gapBC"
elif S[i, j, k] == gapAC:
traceback[i, j, k] = 6 #"gapAC"
# Traceback the optimal alignment
alignA = ""
alignB = ""
alignC = ""
# Start at the end
i = lenA
j = lenB
k = lenC
# Stop when we hit the end of all but one sequence
while (i>0 and j>0) or (j>0 and k>0) or (i>0 and k>0):
if traceback[i, j, k] == 0: #"matchABC":
alignA = seqA[i-1] + alignA
alignB = seqB[j-1] + alignB
alignC = seqC[k-1] + alignC
i -= 1
j -= 1
k -= 1
elif traceback[i, j, k] == 3: #"matchAB":
alignA = seqA[i-1] + alignA
alignB = seqB[j-1] + alignB
alignC = "-" + alignC
i -= 1
j -= 1
elif traceback[i, j, k] == 2: #"matchAC":
alignA = seqA[i-1] + alignA
alignB = "-" + alignB
alignC = seqC[k-1] + alignC
i -= 1
k -= 1
elif traceback[i, j, k] == 1: #"matchBC":
alignA = "-" + alignA
alignB = seqB[j-1] + alignB
alignC = seqC[k-1] + alignC
j -= 1
k -= 1
elif traceback[i, j, k] == 4: #"gapAB":
alignA = "-" + alignA
alignB = "-" + alignB
alignC = seqC[k-1] + alignC
k -= 1
elif traceback[i, j, k] == 6: #"gapAC":
alignA = "-" + alignA
alignB = seqB[j-1] + alignB
alignC = "-" + alignC
j -= 1
elif traceback[i, j, k] == 5: #"gapBC":
alignA = seqA[i-1] + alignA
alignB = "-" + alignB
alignC = "-" + alignC
i -= 1
# Fill in the rest of the alignment if it begins with gaps
# (i.e., traceback all the way to S[0, 0, 0])
while i > 0:
alignA = seqA[i-1] + alignA
alignB = "-" + alignB
alignC = "-" + alignC
i -= 1
while j > 0:
alignA = "-" + alignA
alignB = seqB[j-1] + alignB
alignC = "-" + alignC
j -= 1
while k > 0:
alignA = "-" + alignA
alignB = "-" + alignB
alignC = seqC[k-1] + alignC
k -= 1
return Alignment([Sequence(alignA, seqA.alphabet, seqA.name, gappy = True),
Sequence(alignB, seqB.alphabet, seqB.name, gappy = True),
Sequence(alignC, seqC.alphabet, seqC.name, gappy = True)])
def readClustal(string, alphabet):
""" Read a ClustalW2 alignment in the given string and return as an
Alignment object. """
seqs = {} # sequence data
for line in string.splitlines():
if line.startswith('CLUSTAL') or line.startswith('STOCKHOLM') \
or line.startswith('#'):
continue
if len(line.strip()) == 0:
continue
if line[0] == ' ' or '*' in line or ':' in line:
continue
sections = line.split()
name, seqstr = sections[0:2]
index = name.find('/')
if index >= 0:
name = name[0:index]
if name in seqs:
seqs[name] += seqstr
else:
seqs[name] = seqstr
sequences = []
for name, seqstr in list(seqs.items()):
sequences.append(Sequence(seqstr, alphabet, name, gappy = True))
return Alignment(sequences)
def readClustalFile(filename, alphabet):
""" Read a ClustalW2 alignment file and return an Alignment object
containing the alignment. """
fh = open(filename)
data = fh.read()
fh.close()
aln = readClustal(data, alphabet)
return aln
# Substitution Matrix ------------------
class SubstMatrix():
scoremat = None
alphabet = None
def __init__(self, alphabet):
self.alphabet = alphabet
self.scoremat = {}
def setScores(self, scoremat):
""" Set all scores in one go.
scoremat is a (sym1, sym2)-keyed dictionary of scores. """
self.scoremat = scoremat
def _getkey(self, sym1, sym2):
""" Construct canonical (unordered) key for two symbols """
if sym1 <= sym2:
return tuple([sym1, sym2])
else:
return tuple([sym2, sym1])
def set(self, sym1, sym2, score):
""" Add a score to the substitution matrix """
self.scoremat[self._getkey(sym1, sym2)] = score
def get(self, sym1, sym2):
return self.scoremat[self._getkey(sym1, sym2)]
def __str__(self):
symbols = self.alphabet.symbols # what symbols are in the alphabet
i = len(symbols)
string = ''
for a in symbols:
string += a + ' '
for b in symbols[:len(symbols)-i+1]:
score = self.scoremat[self._getkey(a, b)]
if score != None:
string += str(score).rjust(3) + ' '
else:
string += "?".rjust(3) + ' '
string += '\n'
i -= 1
string += ' ' + ' '.join(self.alphabet.symbols)
return string
def writeFile(self, filename):
""" Write this substitution matrix to the given file. """
fh = open(filename, 'w')
file = ''
for key in self.scoremat:
file += ''.join(key) + ': ' + str(self.scoremat[key]) + '\n'
fh.write(file)
fh.close()
def readSubstMatrix(filename, alphabet):
""" Read in the substitution matrix stored in the given file. """
mat = SubstMatrix(alphabet)
fh = open(filename, 'r')
data = fh.read()
fh.close()
lines = data.splitlines()
for line in lines:
if len(line.strip()) == 0:
continue
symbols, score = line.split(':')
score = int(score)
mat.set(symbols[0], symbols[1], score)
return mat
#import os
#os.chdir('/Users/mikael/workspace/binf/data/') # set to the directory where you keep your files
#BLOSUM62 = readSubstMatrix('blosum62.matrix', Protein_Alphabet)
# Motifs -------------------
class Regexp(object):
""" A class that defines a sequence pattern in terms of a
given regular expression, with . indicating any symbol and square brackets
indicating a selection. See standard regexp definitions for more. """
def __init__(self, pattern):
""" Create a new consensus sequence with the given pattern. """
try:
self.pattern = pattern
self.regex = re.compile(pattern)
except:
raise RuntimeError('invalid consensus sequence given: %s' % pattern)
def __str__(self):
return self.pattern
def search(self, sequence):
""" Find matches to the motif in the specified sequence. Returns a list
of triples, of the form (position, matched string, score). Note that
the score is always 1.0 because a consensus sequence either matches
or doesn't. """
if not type(sequence) is Sequence:
sequence = Sequence(sequence)
sequenceString = sequence[:]
results = []
for match in self.regex.finditer(sequenceString):
results.append((match.start(), match.group(), 1.0))
return results
class PWM(object):
""" A position weight matrix. """
def __init__(self, foreground, background = None, start = 0, end = None, pseudo = 0.0):
""" Create a new PWM from the given probability matrix/ces.
foreground: can be either an Alignment, a list of Distrib's or an instance of IndepJoint.
background: must be a Distrib instance or None (in which case a uniform background will be used)
Specify only a section of the matrix to use with start and end. """
if isinstance(foreground, Alignment): #If the foreground variable is type Alignment
foreground = foreground.getProfile(pseudo = pseudo) #Returns IndepJoint
if isinstance(foreground, IndepJoint): #If the foreground variable is type IndepJoint
foreground = foreground.store
#Otherwise, foreground must be list of Distrib's
self.start = start
self.end = end or len(foreground)
self.length = self.end - self.start
self.alphabet = foreground[self.start].alpha
if False in [ col.alpha == self.alphabet for col in foreground[self.start + 1 : self.end] ]:
raise RuntimeError("All positions need to be based on the same alphabet")
self.symbols = self.alphabet.symbols
# Set foreground probabilities from given alignment
self.m = numpy.zeros((len(self.symbols), self.length)) #Matrix is populated in loops below (see additional comment)
self.fg = foreground[self.start:self.end]
self.bg = background or Distrib(self.alphabet, 1.0) # specified background or uniform
if not self.alphabet == self.bg.alpha:
raise RuntimeError("Background needs to use the same alphabet as the foreground")
p = self.bg.prob()
for i in range(self.length):
q = self.fg[i].prob()
for j in range(len(self.alphabet)):
self.m[j][i] = self.logme(q[j], p[j]) #Exercise 1b - this is where the key calculation occurs
def __len__(self):
return self.length
def getRC(self, swap = [('A', 'T'), ('C', 'G')] ):
""" Get the reverse complement of the current PWM.
Use for DNA sequences with default params.
"""
new_fg = self.fg[::-1] # backwards
for s in swap:
new_fg = [d.swapxcopy(s[0], s[1]) for d in new_fg]
return PWM(new_fg, self.bg)
MIN_VALUE = 0.00000000001
def logme(self, fg, bg):
if fg > self.MIN_VALUE and bg > self.MIN_VALUE:
ratio = fg / bg
return math.log(ratio)
# if not, one of fg and bg is practically zero
if fg > self.MIN_VALUE: # bg is zero
return math.log(fg / self.MIN_VALUE)
else: # fg is zero
return math.log(self.MIN_VALUE)
def getMatrix(self):
return self.m
def __str__(self):
str = ''
for j in range(len(self.alphabet)):
str += "%s\t%s\n" % (self.alphabet[j], ' '.join("%+6.2f" % (y) for y in self.m[j]))
return str
def display(self, format = 'COLUMN'):
if format == 'COLUMN':
print((" \t%s" % (' '.join(" %5d" % (i + 1) for i in range(self.length)))))
for j in range(len(self.alphabet)):
print(("%s\t%s" % (self.alphabet[j], ' '.join("%+6.2f" % (y) for y in self.m[j]))))
elif format == 'JASPAR':
for j in range(len(self.alphabet)):
print(("%s\t[%s]" % (self.alphabet[j], ' '.join("%+6.2f" % (y) for y in self.m[j]))))
def search(self, sequence, lowerBound=0):
""" Find matches to the motif in a specified sequence. Returns a list
of results as triples: (position, matched string, score).
The optional argument lowerBound specifies a lower bound on reported
scores. """
results = []
for i in range(len(sequence)-self.length+1):
subseq = sequence[i:i + self.length]
ndxseq = [ self.alphabet.index(sym) for sym in subseq ]
score = 0.0
for w in range(len(ndxseq)):
score += self.m[ ndxseq[w] ][ w ]
if score > lowerBound:
results.append((i, subseq, score))
return results
def maxscore(self, sequence):
""" Find matches to the motif in a specified sequence.
Returns the maximum score found in the sequence and its index as a tuple:
(maxscore, maxindex) """
maxscore = None
maxindex = None
for i in range(len(sequence)-self.length+1):
subseq = sequence[i:i + self.length]
ndxseq = [ self.alphabet.index(sym) for sym in subseq ]
score = 0.0
for w in range(len(ndxseq)):
score += self.m[ ndxseq[w] ][ w ]
if maxscore == None:
maxscore = score
maxindex = i
elif maxscore < score:
maxscore = score
maxindex = i
return (maxscore, maxindex)
# Web Service Functions -------------------
def getSequence(id, database = 'uniprotkb', start=None, end=None):
""" Get the sequence identified by the given ID from the given database
(e.g. 'uniprotkb', 'refseqn' or 'refseqp'), and return it as a Sequence
object. An error is caused if the sequence ID is not found. If start and
end are given, then only that section of the sequence is returned.
Note: more flexible search options are supported by using webservice.fetch
directly."""
MAX_TRY = 5
for i in range(MAX_TRY):
try:
fastaData = fetch(id, database)
seq = readFasta(fastaData)[0]
break
except:
from time import sleep
print(('Failed on {i}th try for id {id}'.format(i=i, id=id)))
sleep(0.1)
try:
return Sequence(seq[start:end], seq.alphabet, seq.name, seq.info)
except:
raise RuntimeError('An error occurred while retrieving the specified sequence: %s (maybe the ID doesn\'t exist)' % id)
def searchSequences(query, database='uniprot'):
""" Search for sequences matching the given query in the given database
(must be 'uniprot'), and return a list of sequence IDs. """
ids = search(query, limit = None)
return ids
def runClustal(sequences, method='slow'):
""" Run a ClustalOmega alignment of the given list of Sequence objects.
Return an Alignment object. Method should be one of 'fast' or 'slow'. """
alpha = None
for seq in sequences:
if alpha == None:
alpha = seq.alphabet
elif alpha != seq.alphabet:
raise RuntimeError("Invalid alphabet: " + str(seq.alphabet) + ". Not compatible with " + str(alpha))
serviceName = 'clustalo'
resultType = 'aln-clustal'
fastaSeqs = ''.join([seq.writeFasta() for seq in sequences])
params = {'alignment': method.lower(), 'sequence': fastaSeqs}
service = EBI(serviceName)
result = service.submit(params, resultType)
alignment = readClustal(result, alpha)
return alignment
def createTree(alignment, type):
""" Run a ClustalW 2 phylogeny tree creation of either a 'Neighbour-joining'
or 'UPGMA' type tree from the given multiple sequence Alignment object. """
if not type in ['Neighbour-joining', 'UPGMA']:
raise RuntimeError('type must be either \'Neighbour-joining\' or \'UPGMA\'.')
serviceName = 'clustalw2_phylogeny'
resultType = 'tree'
output = 'dist'
clustalAln = alignment.writeClustal()
params = {'tree': output, 'sequence': clustalAln, 'clustering': type, 'tossgaps': 'true'}
service = EBI(serviceName)
tree = service.submit(params, resultType)
return tree
def runBLAST(sequence, program='blastp', database='uniprotkb', exp='1e-1'):
""" Run a BLAST search of nucleotide mouse databases using the given
sequence as a query. Return a list of matched sequence IDs, in descending
order of similarity to query sequence.
program: either blastn (nucleotide) or blastp (protein)
database: many available, e.g. uniprotkb, pdb (protein); em_rel, nrnl1 (EMBL nucleotide, non-redundant resp)
(for protein see http://www.ebi.ac.uk/Tools/sss/ncbiblast/help/index-protein.html#database)
(for nucleotide see http://www.ebi.ac.uk/Tools/sss/ncbiblast/help/index-nucleotide.html#database)
exp: E-value threshold (select only hits that have a better E-value than this)
"""
if sequence.alphabet == predefAlphabets['DNA']:
stype = 'dna'
elif sequence.alphabet == predefAlphabets['RNA']:
stype = 'rna'
else:
stype = 'protein'
serviceName = 'ncbiblast'
resultTypes = ['ids', 'out'] # request
fastaSeq = sequence.writeFasta()
databases = [database]
params = {'program': program, 'database': databases, 'sequence': fastaSeq,
'stype': stype, 'exp': exp}
service = EBI(serviceName)
idsData, output = service.submit(params, resultTypes)
ids=[]
for id in idsData.splitlines():
if len(id) > 0:
ids.append(id.split(':')[1])
return ids
|
[
"740807262@qq.com"
] |
740807262@qq.com
|
7c4ab82bd2bfc79be5fa29cf52330c7658613239
|
ef077253f652a7b2863a8f57a55b27ef9b630510
|
/optimize2points.py
|
520df8395d0062ce4f1a3eb091725c246eeb2a70
|
[] |
no_license
|
PatrickC05/cmimc-2021
|
2386ef84fa20bee006966fe7f80f586b97a1a262
|
e8cc7c5e22f1885e123390f9f90a7a0d885ff167
|
refs/heads/main
| 2023-03-04T02:30:19.835142
| 2021-02-07T22:23:08
| 2021-02-07T22:23:08
| 334,438,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,278
|
py
|
from ast import literal_eval
import numpy as np
INCR = 1
# edit to the name of the input file
print(INCR)
f = open('circlecovers4.txt', 'r')
n = int(f.readline())
points = [f.readline() for _ in range(n)]
m = int(f.readline())
radii = [int(f.readline()) for _ in range(m)]
centers = []
# replace from here to line 18 with your own logic
# points is a list of tuples of the points, and radii is a list of radii
points = [i.strip().split(' ') for i in points]
points = [(int(i[0]), int(i[1])) for i in points]
radii = np.array([int(i) for i in radii])
radii_indices = np.argsort(radii)[::-1]
radii = radii[radii_indices]
inverse = np.empty_like(radii_indices)
inverse[radii_indices] = np.arange(radii_indices.size)
def inCircle(center,coords,r):
return np.linalg.norm(np.array(coords)-np.array(center)) <= r
active_points = points.copy()
def getCenters(point,r):
r2 = r**2
yield point
for i in np.arange(1,r+INCR,INCR):
yield i+point[0],point[1]
yield point[0]-i,point[1]
yield point[0],point[1]+i
yield point[0],point[1]-i
for x in np.arange(1,r+INCR,INCR):
for y in np.arange(x,r+INCR,INCR):
if x**2 + y**2 < r2:
yield x+point[0],y+point[1]
yield point[0]-x,y+point[1]
yield point[0]+x,point[1]-y
yield point[0]-x,point[1]-y
if x!=y:
yield y+point[0],x+point[1]
yield point[0]-y,x+point[1]
yield point[0]+y,point[1]-x
yield point[0]-y,point[1]-x
for r in radii:
c_points = {}
for p in active_points:
for c in getCenters(p,r):
try:
c_points[c] +=1
except KeyError:
c_points[c] = 1
best_center = max(c_points, key=lambda k: c_points[k])
# print(c_points)
new_points = [p for p in active_points if not inCircle(best_center,p,r)]
active_points = new_points.copy()
centers.append(best_center)
print(best_center)
print(n-len(active_points))
# change to whatever you want your output file to be called
out = open('output24.txt', 'w')
for t in np.array(centers)[inverse]:
out.write(str(t[0]) + ' '+ str(t[1]))
out.write("\n")
out.close()
|
[
"53089989+PatrickC05@users.noreply.github.com"
] |
53089989+PatrickC05@users.noreply.github.com
|
09bfcb8ce09e40bfd8fbf0812963f0eb4959c722
|
35f9ba79f017ad056235a7a33d6220c6ab5c88b5
|
/pylytics/conf/project_template/dimension/store/transform.py
|
bee025b370bf694c62b5f05c3c68df55146b3754
|
[
"Apache-2.0"
] |
permissive
|
onefinestay/pylytics
|
1672f79bebe66919548c9f2c3eb447ff7a24107e
|
b6e77e5d9931244efa6120409a4b97cc73efa4c9
|
refs/heads/master
| 2021-06-08T21:25:06.120510
| 2016-07-14T16:13:50
| 2016-07-14T16:13:50
| 9,165,356
| 5
| 4
|
NOASSERTION
| 2021-03-19T21:47:17
| 2013-04-02T08:04:43
|
Python
|
UTF-8
|
Python
| false
| false
| 163
|
py
|
# Define your expansions here.
def convert_str_to_int(data):
""" An example expansion.
"""
data['employees'] = int(data['employees'])
return data
|
[
"townsend51@gmail.com"
] |
townsend51@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.