blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5396130afb19fb7565ea155641dd2078b0d838e5
|
7431f68767f71ff763e01170f03cb72f22912d9a
|
/ImgProcess/ImgProcess/opencv_src/opencv-1.0.0/tests/python/highgui/cvQueryFrame.py
|
9f4656598dfecf0eb96ef7addaf2bc9521707a66
|
[
"BSD-3-Clause"
] |
permissive
|
panyong198801/omr_C_plus_plus
|
115d7c137510fb76e89e1daa38297493d932d6aa
|
27316625eaa7004b9414348d43f2f7a0167bae75
|
refs/heads/master
| 2021-07-07T03:00:28.147954
| 2017-09-25T05:16:00
| 2017-09-25T05:16:00
| 104,538,415
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,159
|
py
|
#! /usr/bin/env python
"""
This script will test highgui's cvQueryFrame() function
"""
# name of this test and it's requirements
TESTNAME = "cvQueryFrame"
REQUIRED = ["cvGrabFrame","cvRetrieveFrame"]
# needed for sys.exit(int) and .works file handling
import sys
import works
# check requirements and delete old flag file, if it exists
if not works.check_files(REQUIRED,TESTNAME):
sys.exit(77)
# import the necessary things for OpenCV
import opencv
from opencv.highgui import *
from opencv.cv import *
# create a video reader using the tiny video 'vd_uncompressed.avi'
video = cvCaptureFromFile("/home/asbach/Data/video_test/vd_uncompressed.avi")
# call cvQueryFrame for 30 frames and check if the returned image is ok
for k in range(0,30):
image = cvQueryFrame( video )
if not isinstance(image, opencv.cv.IplImagePtr):
# returned image is not a correct IplImage (pointer),
# so return an error code
sys.exit(77)
# ATTENTION: We do not release the video reader, window or any image.
# This is bad manners, but Python and OpenCV don't care...
# create flag file for sollowing tests
works.set_file(TESTNAME)
# return 0 ('PASS')
sys.exit(0)
|
[
"py@mteach.cn"
] |
py@mteach.cn
|
f77dfc01a456c527b711da06d60acf67218c7d24
|
1fe46d2df2e4ad13ec92643c715fb8c2aa8b9e60
|
/Bayesian_networks/bn.py
|
8161d9e73dbe57fa9802b2ee2239c427a49a3814
|
[] |
no_license
|
ninikolov/MLAP
|
f05f5c39816362cdee7c398e233a2cc0172e5de8
|
b70f4412fc21c8f2e918192136767142c96fcc00
|
refs/heads/master
| 2020-12-28T20:09:14.721553
| 2015-06-01T07:17:58
| 2015-06-01T07:17:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,355
|
py
|
'''
Created on 25 Feb 2014
Code for MLAP Open Assessment, Part 2.
Bayesian networks.
@author: Y6189686
'''
from collections import OrderedDict
import csv
import itertools
import random
import time
import numpy as np
def read_data_file(input_file):
"""Read a csv data file and produce a numpy ndarray.
0s and 1s are expected.
"""
with open(input_file, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
rows = list(reader)
# Generate empty ndarray, forcing the type to be int
data = np.zeros((len(rows), len(rows[0])), dtype=np.int)
for i in range(0, len(rows)):
data[i] = rows[i]
return data
def read_conditions(structure, index):
"""Compose a list of conditions for a variable,
from a given structure of a BN."""
conditions = []
for item in range(structure.shape[1]):
if structure[item][index] == 1:
conditions.append(item)
return conditions
def print_probability(conditions, probability, var_index):
"""Format variable and probability for display."""
if not conditions: # If variable has no conditions
print "Probability(", var_index, " = 0 ) =", 1 - probability
print "Probability(", var_index, " = 1 ) =", probability
return
condition_str = ""
for condition, bool_value in conditions:
condition_str = condition_str + str(condition) + " = " + str(bool_value) + "; "
condition_str = condition_str[:-2]
print "Probability(", var_index, " = ", 1, "|", condition_str, ") =", probability
print "Probability(", var_index, " = ", 0, "|", condition_str, ") =", 1 - probability
def calculate_conditional_prob(var_index, cond_list, data, alpha=1., beta=1.):
"""Calculate conditional probabilities for a variable with a Bayesian approach.
"""
rows = data.shape[0]
# Output is an ordered dictionary, containing probability for every
# condition combination. OrderedDict can be sorted, which is useful.
output = OrderedDict()
# Generate all combinations for values of conditions
var_combinations = list(itertools.product([0, 1], repeat=len(cond_list)))
for combination in var_combinations:
# Keep track of 0 and 1 occurrences, relevant to the conditional
# probability that's examined.
count_1_occurances = 0
count_0_occurances = 0
# Possible values for combinations.
# Those are tuples with the variable index as first value, and the boolean value as the second.
values = zip(cond_list, combination)
for row in range(rows):
success = True
# Loop over conditions and their boolean value
for condition, bool_value in values:
# If value not as expected, break
if data[row][condition] != bool_value:
success = False
break
if success: # If we've found value that should be counted update our counts.
if data[row][var_index] == 1:
count_1_occurances += 1
else:
count_0_occurances += 1
# Calculate probability from counts
probability = (alpha + float(count_1_occurances)) / (alpha + beta + float(count_1_occurances + count_0_occurances))
output[tuple(values)] = probability
print_probability(values, probability, var_index)
return output
def estimate_parameter(structure, data, index, alpha=1., beta=1.):
"""Estimate parameter at index.
Uses a bayesian approach. Implemented as described in section
9.4 of BRML book (page 199).
Uses an uniform prior - alpha and beta are 1.
"""
rows = data.shape[0]
conditions = read_conditions(structure, index)
if not conditions: # if target variable has no conditions calculate directly
total = np.sum(data[:, index]) # count 1s at index
prob = (alpha + float(total)) / (alpha + beta + float(rows))
print_probability([], prob, index)
return [prob]
else:
return calculate_conditional_prob(index, conditions, data, alpha, beta)
def sample(prob):
"""Draw a random number and compare it to prob. """
rand = random.random()
if rand < prob:
return 1
return 0
def conditional_sample(var_index, probability_dict, current_dict):
"""Sample conditional probability.
var_index - index of variable we're looking at
probability_dict - OrderedDict containing the estimated probabilities of the variable
current_dict - dictionary of already estimated values for variables"""
output = current_dict
remaining = [] # list monitoring the variables, which haven't been sampled yet
for condition_tuples, probability in probability_dict.items():
found = True
for condition in condition_tuples:
if condition[0] in output: # If we've already estimated the value for this variable
# If the estimated value isn't for the current condition's value
if output[condition[0]] != condition[1]:
found = False
break
else: # If condition hasn't been estimated yet
if not var_index in remaining: # If not in remaining
remaining.append(var_index)
found = False
break
if found: # If we've found the right condition, and it has a value
output[var_index] = sample(probability) # Sample with its probability and add to output
return output, remaining
def ancestral_sampling(network, predefined=None):
"""Implements ancestral sampling as described in slide 8 of the MLAP
Markov Chain Monte Carlo lecture.
predefined variable is used to specify already sampled values of the BN to use"""
# Sort by length
network = dict(sorted(network.iteritems(), key=lambda x: len(x[1])))
if predefined:
output = predefined
else:
output = {}
remaining = []
for key, item in network.items(): # for each pair of variable and probability representation of the BN
if isinstance(item, list): # not a conditional probability
output[key] = sample(item[0])
elif isinstance(item, OrderedDict): # conditional probability
output, missing_conditions = conditional_sample(key, item, output)
remaining = remaining + missing_conditions
if remaining: # Any values not determined? Go over network again.
# Use recursion to estimate values for any remaining variables.
output = dict(output.items() + ancestral_sampling(network, output).items())
return output
def bnbayesfit(structure_file_name, data_file_name):
"""Estimate parameters of bayesian network, defined in structure_file_name
by using data from data_file_name.
data_file_name can also be a numpy ndarray."""
# Structure of BN
structure = read_data_file(structure_file_name)
if not isinstance(data_file_name, np.ndarray):
data = read_data_file(data_file_name)
else: # Data can also be an ndarray, rather than a file.
data = data_file_name
rows, cols = data.shape
print "Data is", rows, "rows", cols, "cols."
# fittedbn is dictionary of structures
fittedbn = {}
for i in range(cols):
fittedbn[i] = estimate_parameter(structure, data, i)
print "Fitted BN:", fittedbn
return fittedbn
def bnsample(fittedbn, nsamples):
"""Generates samples for a fitted bayesian network fittedbn.
Uses ancestral sampling.
nsamples - number of samples to generate"""
output = np.empty((nsamples, len(fittedbn)), dtype=np.int) # generate empty matrix
for i in range(nsamples):
sample_dict = ancestral_sampling(fittedbn)
# Replace row with generated sample
output[i] = np.array([sample_dict[key] for key in sorted(sample_dict.keys())])
print "Generated sample:\n", output
return output
if __name__ == '__main__':
start_time = time.time()
fittedbn = bnbayesfit("bnstruct.csv", "bndata.csv")
bnsample(fittedbn, 10)
elapsed_time = time.time() - start_time
print "Total execution time:", elapsed_time, "seconds."
# EOF
|
[
"Nikola Nikolov"
] |
Nikola Nikolov
|
941be4b8438792abfd124adeb719020767cd20a6
|
3b9d763180410bf0abf5b9c37391a64319efe839
|
/otp/otpbase/OTPLocalizer.py
|
3222f25761caa91c3b918961d47e157f00ac9608
|
[] |
no_license
|
qphoton/Reverse_Engineering_Project_ToonTown
|
442f15d484324be749f6f0e5e4e74fc6436e4e30
|
11468ab449060169191366bc14ff8113ee3beffb
|
refs/heads/master
| 2021-05-08T00:07:09.720166
| 2017-10-21T02:37:22
| 2017-10-21T02:37:22
| 107,617,661
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
# File: O (Python 2.4)
from panda3d.core import *
import string
import types
try:
language = getConfigExpress().GetString('language', 'english')
checkLanguage = getConfigExpress().GetBool('check-language', 0)
except:
language = simbase.config.GetString('language', 'english')
checkLanguage = simbase.config.GetBool('check-language', 0)
def getLanguage():
return language
print 'OTPLocalizer: Running in language: %s' % language
if language == 'english':
_languageModule = 'otp.otpbase.OTPLocalizer' + string.capitalize(language)
else:
checkLanguage = 1
_languageModule = 'otp.otpbase.OTPLocalizer_' + language
print 'from ' + _languageModule + ' import *'
from otp.otpbase.OTPLocalizerEnglish import *
if checkLanguage:
l = { }
g = { }
englishModule = __import__('otp.otpbase.OTPLocalizerEnglish', g, l)
foreignModule = __import__(_languageModule, g, l)
for (key, val) in englishModule.__dict__.items():
if not foreignModule.__dict__.has_key(key):
print 'WARNING: Foreign module: %s missing key: %s' % (_languageModule, key)
locals()[key] = val
continue
if isinstance(val, types.DictType):
fval = foreignModule.__dict__.get(key)
for (dkey, dval) in val.items():
if not fval.has_key(dkey):
print 'WARNING: Foreign module: %s missing key: %s.%s' % (_languageModule, key, dkey)
fval[dkey] = dval
continue
for dkey in fval.keys():
if not val.has_key(dkey):
print 'WARNING: Foreign module: %s extra key: %s.%s' % (_languageModule, key, dkey)
continue
for key in foreignModule.__dict__.keys():
if not englishModule.__dict__.has_key(key):
print 'WARNING: Foreign module: %s extra key: %s' % (_languageModule, key)
continue
|
[
"Infinitywilee@rocketmail.com"
] |
Infinitywilee@rocketmail.com
|
92d6d409cc61eabd9d9d8c101f3019820125f502
|
7da4c8fc694ea88eb2b37bb903e4a694c197eca7
|
/Box_Sync/lib/python3.5/copy.py
|
489e65f88ce3bd88ff8b7591fbb5e19f26c64115
|
[] |
no_license
|
Michael-hsiu/Paper-Planes
|
313f0cd2f57f96a3ff41e1048746389c53a30d58
|
f19eae1984e493d88d81bebfae24d13ba751a3ab
|
refs/heads/master
| 2020-04-05T10:14:38.354357
| 2017-09-01T06:43:45
| 2017-09-01T06:43:45
| 81,372,204
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
/Users/michaelhsiu/anaconda/lib/python3.5/copy.py
|
[
"michael.hsiu@berkeley.edu"
] |
michael.hsiu@berkeley.edu
|
4d37cb255f781336c1466d1edc90cebb7ef0e768
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_249/ch85_2019_06_06_22_44_10_896318.py
|
540d8e6df78c3bf871b8d7fc86bdf1bce34efcbb
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
with open('macacos-me-mordam.txt','r') as arquivo:
a=arquivo.read()
b=a.lower()
c=b.split()
x=0
for i in c:
if i== 'banana':
x+=1
|
[
"you@example.com"
] |
you@example.com
|
a8c5acce8b25dcec81f3fe249e37e41278bda7a7
|
8a70f42d3ce1138761bbff76f336bf5ffd338730
|
/array-1/3052.py
|
ea7ad73ec5cb54ceadb147bb06f2c7b08503ee47
|
[] |
no_license
|
sohye-lee/algorithm_study
|
2d72d352b13e464809487ff5ce269676c8ff3e43
|
79648a3f1ee30d77ee7f584c210eedc5f1c17b7b
|
refs/heads/main
| 2023-05-31T01:22:01.835138
| 2021-07-11T01:08:31
| 2021-07-11T01:08:31
| 378,082,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
nums = []
for i in range(10):
nums.append(int(input()))
def leftover(num):
return num%42
nums = map(leftover, nums)
print(len(set(nums)))
|
[
"61291530+sohye-lee@users.noreply.github.com"
] |
61291530+sohye-lee@users.noreply.github.com
|
0aac9721680fc55f72df8b2676889cf637aebadd
|
441f9702776b4073a8458108a50386294d8848da
|
/实验心理学/阈限测定实验/python数据处理/恒定刺激法/test2.py
|
499df301a9d9f3b55c3e70bdfddebedd91bcdae5
|
[] |
no_license
|
Lemonononon/Psychology-Learning-In-ZJU
|
17a5c6c7a217f36cfe16f7d8a8848a0913be687f
|
a3373a9d40c0a6c825cd880d4efd81e31bc59c80
|
refs/heads/master
| 2023-05-08T12:34:27.065469
| 2021-06-03T08:32:17
| 2021-06-03T08:32:17
| 232,965,773
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
import numpy as np ##科学计算库
import scipy as sp ##在numpy基础上实现的部分算法库
import matplotlib.pyplot as plt ##绘图库
from scipy.optimize import leastsq ##引入最小二乘法算法
'''
设置样本数据,真实数据需要在这里处理
'''
##样本数据(Xi,Yi),需要转换成数组(列表)形式
Xi=np.array([6.19,2.51,7.29,7.01,5.7,2.66,3.98,2.5,9.1,4.2])
Yi=np.array([5.25,2.83,6.41,6.71,5.1,4.23,5.05,1.98,10.5,6.3])
'''
设定拟合函数和偏差函数
函数的形状确定过程:
1.先画样本图像
2.根据样本图像大致形状确定函数形式(直线、抛物线、正弦余弦等)
'''
##需要拟合的函数func :指定函数的形状
def func(p,x):
k,b=p
return k*x+b
##偏差函数:x,y都是列表:这里的x,y更上面的Xi,Yi中是一一对应的
def error(p,x,y):
return func(p,x)-y
'''
主要部分:附带部分说明
1.leastsq函数的返回值tuple,第一个元素是求解结果,第二个是求解的代价值(个人理解)
2.官网的原话(第二个值):Value of the cost function at the solution
3.实例:Para=>(array([ 0.61349535, 1.79409255]), 3)
4.返回值元组中第一个值的数量跟需要求解的参数的数量一致
'''
#k,b的初始值,可以任意设定,经过几次试验,发现p0的值会影响cost的值:Para[1]
p0=[1,20]
#把error函数中除了p0以外的参数打包到args中(使用要求)
Para=leastsq(error,p0,args=(Xi,Yi))
#读取结果
k,b=Para[0]
print("k=",k,"b=",b)
print("cost:"+str(Para[1]))
print("求解的拟合直线为:")
print("y="+str(round(k,2))+"x+"+str(round(b,2)))
'''
绘图,看拟合效果.
matplotlib默认不支持中文,label设置中文的话需要另行设置
如果报错,改成英文就可以
'''
#画样本点
plt.figure(figsize=(8,6)) ##指定图像比例: 8:6
plt.scatter(Xi,Yi,color="green",label="样本数据",linewidth=2)
#画拟合直线
x=np.linspace(0,12,100) ##在0-15直接画100个连续点
y=k*x+b ##函数式
plt.plot(x,y,color="red",label="拟合直线",linewidth=2)
plt.legend(loc='lower right') #绘制图例
plt.show()
|
[
"lemonon@zju.edu.cn"
] |
lemonon@zju.edu.cn
|
09dba8b2532852da052af347d2511e9e7e8db3c2
|
ecd2b21b7bb6a47ffc45757021c6916ece3f7f93
|
/K-State/CIS301/Assign-2/q1A.py
|
5d00347e5ec2fb5f69a6268f060e9094f8d529ca
|
[] |
no_license
|
amgregoi/School
|
312bc9449f5584713a475f0e8a64af0805169425
|
effb6b6d0667a19437efd15d15a43cf95fda6ac2
|
refs/heads/master
| 2021-01-17T06:50:39.438326
| 2016-06-23T05:03:40
| 2016-06-23T05:03:40
| 47,589,088
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
# Q1.py
x = readInt()
y = readInt()
if y > x :
ans = y - x
"""{1.OK y > x premise
2.OK ans == y-x premise
3.OK ans > 0 algebra 2 1
}"""
else :
"""{1.OK not(y>x) premise}"""
ans = x - y
"""{1.OK not(y>x) premise
2.OK ans == x-y premise
3.OK ans >= 0 algebra 2 1
}"""
# prove ans >= 0
|
[
"amgregoi@ksu-wireless-236-202.campus.ksu.edu"
] |
amgregoi@ksu-wireless-236-202.campus.ksu.edu
|
1c683d90daeadfafe005fbb19d7277ae13143a25
|
79f0c69251edd20f7927a4450d1be8902777f56a
|
/visualize.py
|
dbbaae45b39189e514ebc8c7ebafa854796a956e
|
[] |
no_license
|
Dai7Igarashi/DCGAN
|
887a63211ce71f0e5559e99bbd3d4e7f21b07015
|
502239cb462cd83f3cf2894497aaa6b71bb4615c
|
refs/heads/master
| 2020-03-20T08:37:52.835501
| 2018-06-14T07:24:07
| 2018-06-14T07:24:07
| 137,308,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
# coding: UTF-8
import os
import numpy as np
from PIL import Image
import chainer
def out_generated_image(gen, rows, cols, seed, iteration, xp):
np.random.seed(seed)
n_images = rows * cols
z = chainer.Variable(xp.asarray(gen.make_hidden(n_images)))
with chainer.using_config('train', False):
x = gen(z)
x = chainer.cuda.to_cpu(x.data)
np.random.seed()
x = np.asarray(np.clip(x * 255, 0.0, 255.0), dtype=np.uint8)
_, _, H, W = x.shape
x = x.reshape((rows, cols, 1, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * H, cols * W))
preview_dir = 'images/train'
preview_path = preview_dir + '/image_iteration_{:0>8}.png'.format(iteration)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
|
[
"40178733+Dai7Igarashi@users.noreply.github.com"
] |
40178733+Dai7Igarashi@users.noreply.github.com
|
08e80c4ac2d82bc38c4733a226748b5b31e8685b
|
42c25d423438afd39fa4ee79a69877dee6f6e7ce
|
/models/gannet/common.py
|
39bbe425d61d2c0d45e40b01d5bb80de437443f3
|
[
"MIT"
] |
permissive
|
LiuShaohan/AnimeGAN-1
|
0d93bc67444fe635cc276f385a7935d384a3f795
|
1c050777e2f361e98cd53cd4a6373e89a86ffd35
|
refs/heads/master
| 2022-04-23T02:03:40.290782
| 2020-04-26T07:09:36
| 2020-04-26T07:09:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,407
|
py
|
import tensorflow as tf
k = tf.keras
K = tf.keras.backend
kl = tf.keras.layers
from tensorflow.python.ops import nn
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.keras.utils import conv_utils, tf_utils
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.keras.layers.convolutional import Conv
class InstanceNormalization(kl.Layer):
"""Instance Normalization Layer (https://arxiv.org/abs/1607.08022)."""
def __init__(self, epsilon=1e-5):
super(InstanceNormalization, self).__init__()
self.epsilon = epsilon
def build(self, input_shape):
self.scale = self.add_weight(
name='scale',
shape=input_shape[-1:],
initializer=tf.random_normal_initializer(1., 0.02),
trainable=True)
self.offset = self.add_weight(
name='offset',
shape=input_shape[-1:],
initializer='zeros',
trainable=True)
def call(self, x):
mean, variance = tf.nn.moments(x, axes=[1, 2], keepdims=True)
inv = tf.math.rsqrt(variance + self.epsilon)
normalized = (x-mean) * inv
return self.scale * normalized + self.offset
def get_config(self):
config = {
'epsilon': self.epsilon,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvSpectralNormal(Conv):
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
try:
# Disable variable partitioning when creating the variable
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.u = self.add_weight(
name='sn_u',
shape=(1, tf.reduce_prod(kernel_shape[:-1])),
dtype=self.dtype,
initializer=tf.keras.initializers.ones,
synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_channel})
self._build_conv_op_input_shape = input_shape
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
self._conv_op_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
self.built = True
def call(self, inputs, training=None):
# Check if the input_shape in call() is different from that in build().
# If they are different, recreate the _convolution_op to avoid the stateful
# behavior.
if training is None:
training = K.learning_phase()
call_input_shape = inputs.get_shape()
recreate_conv_op = (
call_input_shape[1:] != self._build_conv_op_input_shape[1:])
if recreate_conv_op:
self._convolution_op = nn_ops.Convolution(
call_input_shape,
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
# Apply causal padding to inputs for Conv1D.
if self.padding == 'causal' and self.__class__.__name__ == 'Conv1D':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
# Update SpectralNormalization variable
u, v, w = self.calc_u(self.kernel)
def u_update():
# TODO u_update maybe need `training control`
return tf_utils.smart_cond(training, lambda: self._assign_new_value(
self.u, u), lambda: array_ops.identity(u))
# NOTE add update must in call function scope
self.add_update(u_update)
sigma = self.calc_sigma(u, v, w)
new_kernel = tf_utils.smart_cond(
training, lambda: self.kernel / sigma, lambda: self.kernel)
outputs = self._convolution_op(inputs, new_kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def calc_u(self, w):
w = K.reshape(w, (-1, w.shape[-1]))
v = K.l2_normalize(K.dot(self.u, w))
u = K.l2_normalize(K.dot(v, K.transpose(w)))
return u, v, w
def calc_sigma(self, u, v, w):
return K.sum(K.dot(K.dot(u, w), K.transpose(v)))
def _assign_new_value(self, variable, value):
with K.name_scope('AssignNewValue') as scope:
with ops.colocate_with(variable):
return state_ops.assign(variable, value, name=scope)
class Conv2DSpectralNormal(ConvSpectralNormal):
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=tf.keras.activations.get(activation),
use_bias=use_bias,
kernel_initializer=tf.keras.initializers.get(kernel_initializer),
bias_initializer=tf.keras.initializers.get(bias_initializer),
kernel_regularizer=tf.keras.regularizers.get(kernel_regularizer),
bias_regularizer=tf.keras.regularizers.get(bias_regularizer),
activity_regularizer=tf.keras.regularizers.get(activity_regularizer),
kernel_constraint=tf.keras.constraints.get(kernel_constraint),
bias_constraint=tf.keras.constraints.get(bias_constraint),
**kwargs)
class ReflectionPadding2D(kl.ZeroPadding2D):
@staticmethod
def reflect_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = k.backend.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return tf.pad(x, pattern, mode='REFLECT')
def call(self, inputs):
return self.reflect_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
|
[
"597323109@qq.com"
] |
597323109@qq.com
|
b1f024ff452af5ca6cf657236e16ffd2f19832a1
|
56ca9cbd29bf0bbd545b5857530fbbe8c6ffff95
|
/training_api/gluoncv/auto/estimators/image_classification/image_classification.py
|
3fbb08ba36958ba4a66598e61eb190433995b610
|
[
"Apache-2.0"
] |
permissive
|
hadikoub/BMW-Semantic-Segmentation-Training-GUI
|
b34bf819942dbe20a3a6df2bc44b6435ca3e6754
|
902f35a7e367e635898f687b16a830db892fbaa5
|
refs/heads/main
| 2023-06-13T05:00:55.631277
| 2021-06-30T15:34:26
| 2021-06-30T15:34:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,262
|
py
|
"""Classification Estimator"""
# pylint: disable=unused-variable,bad-whitespace,missing-function-docstring,logging-format-interpolation,arguments-differ,logging-not-lazy
import time
import os
import math
import copy
from PIL import Image
import pandas as pd
import numpy as np
import mxnet as mx
from mxnet import gluon, nd
from mxnet.optimizer import Optimizer
from mxnet import autograd as ag
from mxnet.gluon.data.vision import transforms
from ....data.transforms.presets.imagenet import transform_eval
from ....model_zoo import get_model
from ....utils import LRSequential, LRScheduler
from .... import nn
from .... import loss
from ..base_estimator import BaseEstimator, set_default
from .utils import get_data_loader, get_data_rec, smooth
from .default import ImageClassificationCfg
from ...data.dataset import ImageClassificationDataset
from ..conf import _BEST_CHECKPOINT_FILE
from ..utils import EarlyStopperOnPlateau
__all__ = ['ImageClassificationEstimator']
@set_default(ImageClassificationCfg())
class ImageClassificationEstimator(BaseEstimator):
"""Estimator implementation for Image Classification.
Parameters
----------
config : dict
Config in nested dict.
logger : logging.Logger
Optional logger for this estimator, can be `None` when default setting is used.
reporter : callable
The reporter for metric checkpointing.
net : mx.gluon.Block
The custom network. If defined, the model name in config will be ignored so your
custom network will be used for training rather than pulling it from model zoo.
"""
Dataset = ImageClassificationDataset
def __init__(self, config, logger=None, reporter=None, net=None, optimizer=None):
super(ImageClassificationEstimator, self).__init__(config, logger, reporter=reporter, name=None)
self.last_train = None
self.input_size = self._cfg.train.crop_size
self._feature_net = None
if net is not None:
assert isinstance(net, gluon.Block), f"given custom network {type(net)}, `gluon.Block` expected"
try:
# to avoid cuda initialization error, we keep network copies in cpu
net.collect_params().reset_ctx(mx.cpu())
except ValueError:
pass
self._custom_net = net
if optimizer is not None:
if isinstance(optimizer, str):
pass
else:
assert isinstance(optimizer, Optimizer)
self._optimizer = optimizer
def _fit(self, train_data, val_data, time_limit=math.inf):
tic = time.time()
self._best_acc = 0
self.epoch = 0
self._time_elapsed = 0
if max(self._cfg.train.start_epoch, self.epoch) >= self._cfg.train.epochs:
return {'time', self._time_elapsed}
if not isinstance(train_data, pd.DataFrame):
self.last_train = len(train_data)
else:
self.last_train = train_data
self._init_trainer()
self._time_elapsed += time.time() - tic
return self._resume_fit(train_data, val_data, time_limit=time_limit)
def _resume_fit(self, train_data, val_data, time_limit=math.inf):
tic = time.time()
if max(self._cfg.train.start_epoch, self.epoch) >= self._cfg.train.epochs:
return {'time', self._time_elapsed}
if not self.classes or not self.num_class:
raise ValueError('Unable to determine classes of dataset')
num_workers = self._cfg.train.num_workers
if self._cfg.train.use_rec:
self._logger.info(f'Loading data from rec files: {self._cfg.train.rec_train}/{self._cfg.train.rec_val}')
train_loader, val_loader, self.batch_fn = get_data_rec(self._cfg.train.rec_train,
self._cfg.train.rec_train_idx,
self._cfg.train.rec_val,
self._cfg.train.rec_val_idx,
self.batch_size, num_workers,
self.input_size,
self._cfg.train.crop_ratio)
else:
train_dataset = train_data.to_mxnet()
val_dataset = val_data.to_mxnet()
train_loader, val_loader, self.batch_fn = get_data_loader(self._cfg.train.data_dir,
self.batch_size, num_workers,
self.input_size,
self._cfg.train.crop_ratio,
train_dataset=train_dataset,
val_dataset=val_dataset)
self._time_elapsed += time.time() - tic
return self._train_loop(train_loader, val_loader, time_limit=time_limit)
def _train_loop(self, train_data, val_data, time_limit=math.inf):
start_tic = time.time()
if self._cfg.train.no_wd:
for k, v in self.net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
if self._cfg.train.label_smoothing or self._cfg.train.mixup:
sparse_label_loss = False
else:
sparse_label_loss = True
if self.distillation:
L = loss.DistillationSoftmaxCrossEntropyLoss(temperature=self._cfg.train.temperature,
hard_weight=self._cfg.train.hard_weight,
sparse_label=sparse_label_loss)
else:
L = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=sparse_label_loss)
if self._cfg.train.mixup:
train_metric = mx.metric.RMSE()
else:
train_metric = mx.metric.Accuracy()
if self._cfg.train.mode == 'hybrid':
self.net.hybridize(static_alloc=True, static_shape=True)
if self.distillation:
self.teacher.hybridize(static_alloc=True, static_shape=True)
self._logger.info('Start training from [Epoch %d]', max(self._cfg.train.start_epoch, self.epoch))
early_stopper = EarlyStopperOnPlateau(
patience=self._cfg.train.early_stop_patience,
min_delta=self._cfg.train.early_stop_min_delta,
baseline_value=self._cfg.train.early_stop_baseline,
max_value=self._cfg.train.early_stop_max_value)
train_metric_score = -1
cp_name = ''
self._time_elapsed += time.time() - start_tic
for self.epoch in range(max(self._cfg.train.start_epoch, self.epoch), self._cfg.train.epochs):
epoch = self.epoch
if self._best_acc >= 1.0:
self._logger.info('[Epoch {}] Early stopping as acc is reaching 1.0'.format(epoch))
break
should_stop, stop_message = early_stopper.get_early_stop_advice()
if should_stop:
self._logger.info('[Epoch {}] '.format(epoch) + stop_message)
break
tic = time.time()
last_tic = time.time()
mx.nd.waitall()
if self._cfg.train.use_rec:
train_data.reset()
train_metric.reset()
# pylint: disable=undefined-loop-variable
for i, batch in enumerate(train_data):
btic = time.time()
if self._time_elapsed > time_limit:
self._logger.warning(f'`time_limit={time_limit}` reached, exit early...')
return {'train_acc': train_metric_score, 'valid_acc': self._best_acc,
'time': self._time_elapsed, 'checkpoint': cp_name}
data, label = self.batch_fn(batch, self.ctx)
if self._cfg.train.mixup:
lam = np.random.beta(self._cfg.train.mixup_alpha,
self._cfg.train.mixup_alpha)
if epoch >= self._cfg.train.epochs - self._cfg.train.mixup_off_epoch:
lam = 1
data = [lam*X + (1-lam)*X[::-1] for X in data]
if self._cfg.train.label_smoothing:
eta = 0.1
else:
eta = 0.0
label = mixup_transform(label, classes, lam, eta)
elif self._cfg.train.label_smoothing:
hard_label = label
label = smooth(label, self.num_class)
if self.distillation:
teacher_prob = [nd.softmax(self.teacher(X.astype(self._cfg.train.dtype, copy=False)) \
/ self._cfg.train.temperature) for X in data]
with ag.record():
outputs = [self.net(X.astype(self._cfg.train.dtype, copy=False)) for X in data]
if self.distillation:
losses = [L(yhat.astype('float32', copy=False),
y.astype('float32', copy=False),
p.astype('float32', copy=False)) \
for yhat, y, p in zip(outputs, label, teacher_prob)]
else:
losses = [L(yhat,
y.astype(self._cfg.train.dtype, copy=False)) for yhat, y in zip(outputs, label)]
for l in losses:
l.backward()
self.trainer.step(self.batch_size)
if self._cfg.train.mixup:
output_softmax = [nd.SoftmaxActivation(out.astype('float32', copy=False)) \
for out in outputs]
train_metric.update(label, output_softmax)
else:
if self._cfg.train.label_smoothing:
train_metric.update(hard_label, outputs)
else:
train_metric.update(label, outputs)
if self._cfg.train.log_interval and not (i+1)%self._cfg.train.log_interval:
train_metric_name, train_metric_score = train_metric.get()
self._logger.info('Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\t%s=%f\tlr=%f',
epoch, i,
self._cfg.train.batch_size*self._cfg.train.log_interval/(time.time()-last_tic),
train_metric_name, train_metric_score, self.trainer.learning_rate)
last_tic = time.time()
self._time_elapsed += time.time() - btic
post_tic = time.time()
train_metric_name, train_metric_score = train_metric.get()
throughput = int(self.batch_size * i /(time.time() - tic))
top1_val, top5_val = self._evaluate(val_data)
early_stopper.update(top1_val)
self._logger.info('[Epoch %d] training: %s=%f', epoch, train_metric_name, train_metric_score)
self._logger.info('[Epoch %d] speed: %d samples/sec\ttime cost: %f', epoch, throughput, time.time()-tic)
self._logger.info('[Epoch %d] validation: top1=%f top5=%f', epoch, top1_val, top5_val)
if top1_val > self._best_acc:
cp_name = os.path.join(self._logdir, _BEST_CHECKPOINT_FILE)
self._logger.info('[Epoch %d] Current best top-1: %f vs previous %f, saved to %s',
self.epoch, top1_val, self._best_acc, cp_name)
self.save(cp_name)
self._best_acc = top1_val
if self._reporter:
self._reporter(epoch=epoch, acc_reward=top1_val)
self._time_elapsed += time.time() - post_tic
return {'train_acc': train_metric_score, 'valid_acc': self._best_acc,
'time': self._time_elapsed, 'checkpoint': cp_name}
def _init_network(self):
if not self.num_class:
raise ValueError('Unable to create network when `num_class` is unknown. \
It should be inferred from dataset or resumed from saved states.')
assert len(self.classes) == self.num_class
# ctx
valid_gpus = []
if self._cfg.gpus:
valid_gpus = self._validate_gpus(self._cfg.gpus)
if not valid_gpus:
self._logger.warning(
'No gpu detected, fallback to cpu. You can ignore this warning if this is intended.')
elif len(valid_gpus) != len(self._cfg.gpus):
self._logger.warning(
f'Loaded on gpu({valid_gpus}), different from gpu({self._cfg.gpus}).')
self.ctx = [mx.gpu(int(i)) for i in valid_gpus]
self.ctx = self.ctx if self.ctx else [mx.cpu()]
# network
if self._custom_net is None:
model_name = self._cfg.img_cls.model.lower()
input_size = self.input_size
if 'inception' in model_name or 'googlenet' in model_name:
self.input_size = 299
elif 'resnest101' in model_name:
self.input_size = 256
elif 'resnest200' in model_name:
self.input_size = 320
elif 'resnest269' in model_name:
self.input_size = 416
elif 'cifar' in model_name:
self.input_size = 28
else:
self._logger.debug('Custom network specified, ignore the model name in config...')
self.net = copy.deepcopy(self._custom_net)
model_name = ''
self.input_size = input_size = self._cfg.train.crop_size
if input_size != self.input_size:
self._logger.info(f'Change input size to {self.input_size}, given model type: {model_name}')
if self._cfg.img_cls.use_pretrained:
kwargs = {'ctx': self.ctx, 'pretrained': True, 'classes': 1000 if 'cifar' not in model_name else 10}
else:
kwargs = {'ctx': self.ctx, 'pretrained': False, 'classes': self.num_class}
if self._cfg.img_cls.use_gn:
kwargs['norm_layer'] = nn.GroupNorm
if model_name.startswith('vgg'):
kwargs['batch_norm'] = self._cfg.img_cls.batch_norm
elif model_name.startswith('resnext'):
kwargs['use_se'] = self._cfg.img_cls.use_se
if self._cfg.img_cls.last_gamma:
kwargs['last_gamma'] = True
if model_name:
self.net = get_model(model_name, **kwargs)
if model_name and self._cfg.img_cls.use_pretrained:
# reset last fully connected layer
fc_layer_found = False
for fc_name in ('output', 'fc'):
fc_layer = getattr(self.net, fc_name, None)
if fc_layer is not None:
fc_layer_found = True
break
if fc_layer_found:
in_channels = list(fc_layer.collect_params().values())[0].shape[1]
if isinstance(fc_layer, gluon.nn.Dense):
new_fc_layer = gluon.nn.Dense(self.num_class, in_units=in_channels)
elif isinstance(fc_layer, gluon.nn.Conv2D):
new_fc_layer = gluon.nn.Conv2D(self.num_class, in_channels=in_channels, kernel_size=1)
elif isinstance(fc_layer, gluon.nn.HybridSequential):
new_fc_layer = gluon.nn.HybridSequential(prefix='output_')
with new_fc_layer.name_scope():
new_fc_layer.add(gluon.nn.Conv2D(self.num_class, in_channels=in_channels, kernel_size=1))
new_fc_layer.add(gluon.nn.Flatten())
else:
raise TypeError(f'Invalid FC layer type {type(fc_layer)} found, expected (Conv2D, Dense)...')
new_fc_layer.initialize(mx.init.MSRAPrelu(), ctx=self.ctx)
self.net.collect_params().setattr('lr_mult', self._cfg.train.transfer_lr_mult)
new_fc_layer.collect_params().setattr('lr_mult', self._cfg.train.output_lr_mult)
self._logger.debug(f'Reduce network lr multiplier to {self._cfg.train.transfer_lr_mult}, while keep ' +
f'last FC layer lr_mult to {self._cfg.train.output_lr_mult}')
setattr(self.net, fc_name, new_fc_layer)
else:
raise RuntimeError('Unable to modify the last fc layer in network, (output, fc) expected...')
else:
self.net.initialize(mx.init.MSRAPrelu(), ctx=self.ctx)
self.net.cast(self._cfg.train.dtype)
# teacher model for distillation training
if self._cfg.train.teacher is not None and self._cfg.train.hard_weight < 1.0 and self.num_class == 1000:
teacher_name = self._cfg.train.teacher
self.teacher = get_model(teacher_name, pretrained=True, classes=self.num_class, ctx=self.ctx)
self.teacher.cast(self._cfg.train.dtype)
self.teacher.collect_params().reset_ctx(self.ctx)
self.distillation = True
else:
self.distillation = False
self.net.collect_params().reset_ctx(self.ctx)
def _init_trainer(self):
if self.last_train is None:
raise RuntimeError('Cannot init trainer without knowing the size of training data')
if isinstance(self.last_train, pd.DataFrame):
train_size = len(self.last_train)
elif isinstance(self.last_train, int):
train_size = self.last_train
else:
raise ValueError("Unknown type of self.last_train: {}".format(type(self.last_train)))
num_gpus = len(self.ctx)
batch_size = self._cfg.train.batch_size
self.batch_size = batch_size
lr_decay = self._cfg.train.lr_decay
lr_decay_period = self._cfg.train.lr_decay_period
if self._cfg.train.lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, self._cfg.train.epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in self._cfg.train.lr_decay_epoch.split(',')]
lr_decay_epoch = [e - self._cfg.train.warmup_epochs for e in lr_decay_epoch]
num_batches = train_size // batch_size
lr_scheduler = LRSequential([
LRScheduler('linear', base_lr=0, target_lr=self._cfg.train.lr,
nepochs=self._cfg.train.warmup_epochs, iters_per_epoch=num_batches),
LRScheduler(self._cfg.train.lr_mode, base_lr=self._cfg.train.lr, target_lr=0,
nepochs=self._cfg.train.epochs - self._cfg.train.warmup_epochs,
iters_per_epoch=num_batches,
step_epoch=lr_decay_epoch,
step_factor=lr_decay, power=2)
])
if self._optimizer is None:
optimizer = 'nag'
optimizer_params = {'wd': self._cfg.train.wd,
'momentum': self._cfg.train.momentum,
'lr_scheduler': lr_scheduler}
if self._cfg.train.dtype != 'float32':
optimizer_params['multi_precision'] = True
self.trainer = gluon.Trainer(self.net.collect_params(), optimizer, optimizer_params)
else:
optimizer = self._optimizer
if isinstance(optimizer, str):
try:
optimizer = mx.optimizer.create(optimizer, learning_rate=self._cfg.train.lr)
except TypeError:
optimizer = mx.optimizer.create(optimizer)
self.trainer = gluon.Trainer(self.net.collect_params(), optimizer)
def _evaluate(self, val_data):
"""Test on validation dataset."""
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(min(5, self.num_class))
if not isinstance(val_data, (gluon.data.DataLoader, mx.io.MXDataIter)):
if hasattr(val_data, 'to_mxnet'):
val_data = val_data.to_mxnet()
resize = int(math.ceil(self.input_size / self._cfg.train.crop_ratio))
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transform_test = transforms.Compose([
transforms.Resize(resize, keep_ratio=True),
transforms.CenterCrop(self.input_size),
transforms.ToTensor(),
normalize
])
val_data = gluon.data.DataLoader(
val_data.transform_first(transform_test),
batch_size=self._cfg.valid.batch_size, shuffle=False, last_batch='keep',
num_workers=self._cfg.valid.num_workers)
for _, batch in enumerate(val_data):
data, label = self.batch_fn(batch, self.ctx)
outputs = [self.net(X.astype(self._cfg.train.dtype, copy=False)) for X in data]
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
return top1, top5
def _predict_preprocess(self, x):
resize = int(math.ceil(self.input_size / self._cfg.train.crop_ratio))
if isinstance(x, str):
x = self._predict_preprocess(transform_eval(
mx.image.imread(x), resize_short=resize, crop_size=self.input_size))
elif isinstance(x, Image.Image):
x = self._predict_preprocess(np.array(x))
elif isinstance(x, np.ndarray):
x = self._predict_preprocess(mx.nd.array(x))
elif isinstance(x, mx.nd.NDArray):
if len(x.shape) == 3 and x.shape[-1] == 3:
x = transform_eval(x, resize_short=resize, crop_size=self.input_size)
elif len(x.shape) == 4 and x.shape[1] == 3:
expected = (self.input_size, self.input_size)
assert x.shape[2:] == expected, "Expected: {}, given {}".format(expected, x.shape[2:])
elif x.shape[1] == 1:
# gray image to rgb
x = mx.nd.concat([x] * 3, dim=1)
else:
raise ValueError('array input with shape (h, w, 3) or (n, 3, h, w) is required for predict')
return x
def _predict(self, x, ctx_id=0, with_proba=False):
if with_proba:
return self._predict_proba(x, ctx_id=ctx_id)
x = self._predict_preprocess(x)
if isinstance(x, pd.DataFrame):
assert 'image' in x.columns, "Expect column `image` for input images"
df = self._predict(tuple(x['image']))
return df.reset_index(drop=True)
elif isinstance(x, (list, tuple)):
bs = self._cfg.valid.batch_size
self.net.hybridize()
results = []
topK = min(5, self.num_class)
loader = mx.gluon.data.DataLoader(
ImageListDataset(x, self._predict_preprocess), batch_size=bs, last_batch='keep')
idx = 0
for batch in loader:
batch = mx.gluon.utils.split_and_load(batch, ctx_list=self.ctx, even_split=False)
pred = [self.net(input) for input in batch]
for p in pred:
for ii in range(p.shape[0]):
ind = nd.topk(p[ii], k=topK).astype('int').asnumpy().flatten()
probs = mx.nd.softmax(p[ii]).asnumpy().flatten()
for k in range(topK):
results.append({'class': self.classes[ind[k]],
'score': probs[ind[k]], 'id': ind[k], 'image': x[idx]})
idx += 1
return pd.DataFrame(results)
elif not isinstance(x, mx.nd.NDArray):
raise ValueError('Input is not supported: {}'.format(type(x)))
assert len(x.shape) == 4 and x.shape[1] == 3, "Expect input to be (n, 3, h, w), given {}".format(x.shape)
x = x.as_in_context(self.ctx[ctx_id])
pred = self.net(x)
topK = min(5, self.num_class)
ind = nd.topk(pred, k=topK)[0].astype('int').asnumpy().flatten()
probs = mx.nd.softmax(pred)[0].asnumpy().flatten()
df = pd.DataFrame([{'class': self.classes[ind[i]], 'score': probs[ind[i]], 'id': ind[i]} for i in range(topK)])
return df
def _get_feature_net(self):
"""Get the network slice for feature extraction only"""
if hasattr(self, '_feature_net') and self._feature_net is not None:
return self._feature_net
self._feature_net = copy.copy(self.net)
fc_layer_found = False
for fc_name in ('output', 'fc'):
fc_layer = getattr(self._feature_net, fc_name, None)
if fc_layer is not None:
fc_layer_found = True
break
if fc_layer_found:
self._feature_net.register_child(nn.Identity(), fc_name)
super(gluon.Block, self._feature_net).__setattr__(fc_name, nn.Identity())
self.net.__setattr__(fc_name, fc_layer)
else:
raise RuntimeError('Unable to modify the last fc layer in network, (output, fc) expected...')
return self._feature_net
def _predict_feature(self, x, ctx_id=0):
x = self._predict_preprocess(x)
if isinstance(x, pd.DataFrame):
assert 'image' in x.columns, "Expect column `image` for input images"
df = self._predict_feature(tuple(x['image']))
df = df.set_index(x.index)
df['image'] = x['image']
return df
elif isinstance(x, (list, tuple)):
assert isinstance(x[0], str), "expect image paths in list/tuple input"
bs = self._cfg.valid.batch_size
feat_net = self._get_feature_net()
feat_net.hybridize()
results = []
loader = mx.gluon.data.DataLoader(
ImageListDataset(x, self._predict_preprocess), batch_size=bs, last_batch='keep')
for batch in loader:
batch = mx.gluon.utils.split_and_load(batch, ctx_list=self.ctx, even_split=False)
feats = [feat_net(input) for input in batch]
for p in feats:
for ii in range(p.shape[0]):
feat = p[ii].asnumpy().flatten()
results.append({'image_feature': feat})
df = pd.DataFrame(results)
df['image'] = x
return df
elif not isinstance(x, mx.nd.NDArray):
raise ValueError('Input is not supported: {}'.format(type(x)))
assert len(x.shape) == 4 and x.shape[1] == 3, "Expect input to be (n, 3, h, w), given {}".format(x.shape)
x = x.as_in_context(self.ctx[ctx_id])
feat_net = self._get_feature_net()
results = []
for ii in range(x.shape[0]):
feat = feat_net(x)[ii].asnumpy().flatten()
results.append({'image_feature': feat})
df = pd.DataFrame(results)
return df
def _predict_proba(self, x, ctx_id=0):
x = self._predict_preprocess(x)
if isinstance(x, pd.DataFrame):
assert 'image' in x.columns, "Expect column `image` for input images"
df = self._predict_proba(tuple(x['image']))
return df.reset_index(drop=True)
elif isinstance(x, (list, tuple)):
bs = self._cfg.valid.batch_size
self.net.hybridize()
results = []
loader = mx.gluon.data.DataLoader(
ImageListDataset(x, self._predict_preprocess), batch_size=bs, last_batch='keep')
idx = 0
for batch in loader:
batch = mx.gluon.utils.split_and_load(batch, ctx_list=self.ctx, even_split=False)
pred = [self.net(input) for input in batch]
for p in pred:
probs = mx.nd.softmax(p, axis=-1)
for ii in range(p.shape[0]):
prob = probs[ii]
results.append({'image_proba': prob.asnumpy().flatten().tolist(), 'image': x[idx]})
idx += 1
return pd.DataFrame(results)
elif not isinstance(x, mx.nd.NDArray):
raise ValueError('Input is not supported: {}'.format(type(x)))
assert len(x.shape) == 4 and x.shape[1] == 3, "Expect input to be (n, 3, h, w), given {}".format(x.shape)
x = x.as_in_context(self.ctx[ctx_id])
pred = self.net(x)
probs = mx.nd.softmax(pred)[0].asnumpy().flatten().tolist()
df = pd.DataFrame([{'image_proba': probs}])
return df
class ImageListDataset(mx.gluon.data.Dataset):
"""An internal image list dataset for batch predict"""
def __init__(self, imlist, fn):
self._imlist = imlist
self._fn = fn
def __getitem__(self, idx):
img = self._fn(self._imlist[idx])[0]
return img
def __len__(self):
return len(self._imlist)
|
[
"123.hadikoubeissy@gmail.com"
] |
123.hadikoubeissy@gmail.com
|
76485426bb9446a75974abee66c0f5c0901297b7
|
41abc6daadc12dc0f28ece3a7ff885c3fa6bb1d6
|
/total_crescente.py
|
fbde3ba973d2f9ef163fda35c1a070eb078d7443
|
[
"MIT"
] |
permissive
|
fabiobarretopro/Aprendendo-Python
|
fab0f6c08a03b6fda08dcb4df7419c1f1bdd7bb4
|
a47acf6b9fdfdad55853e620db451a6a2e61bc6f
|
refs/heads/main
| 2023-03-01T04:09:05.382774
| 2021-01-29T22:22:36
| 2021-01-29T22:22:36
| 321,541,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
resp = "S"
lista = []
totnum = 0
while resp == "S":
num = int(input("Digite um valor: "))
totnum = totnum + 1
lista.append(num)
resp = str(input("Quer continuar? [S/N] ")).upper().strip()
if resp == "N":
break
ordem = sorted(lista)
desc = ordem[::-1]
print(f"Foram digitados {totnum} números.\nA lista na ordem decrescente: {desc}")
if 5 in lista: print(f"O valor 5 está na lista.")
else: print("O valor 5 não está na lista.")
|
[
"romaobarreto@hotmail.com"
] |
romaobarreto@hotmail.com
|
6bb7779dafd0960ae37c5e18da495cc118513082
|
f5c9a7881d5816af16dc8165958af3ddaaf6e54d
|
/keyboard.py
|
f33c2def1265b9cad0dc5f69230af03b2e3dc184
|
[] |
no_license
|
kishkoltz/Pygame-projects
|
cd039a74cb30d4629a24a31edc9c5d80a648e971
|
4f45bcb50ebdb9c6787e3d61b9e2f3a627a07cad
|
refs/heads/master
| 2020-03-07T07:49:47.083219
| 2018-04-06T23:28:42
| 2018-04-06T23:28:42
| 127,359,788
| 0
| 0
| null | 2018-04-05T02:16:08
| 2018-03-30T00:04:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,025
|
py
|
# E:\ppython\app\python.exe keyboard.py
import pygame, sys, random
from pygame.locals import *
pygame.init()
mainClock = pygame.time.Clock()
WINDOWWIDTH = 400
WINDOWHEIGHT = 400
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)
pygame.display.set_caption('Input')
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
WHITE = (255, 255, 255)
foodCounter = 0
NEWFOOD = 40
FOODSIZE = 20
player = pygame.Rect(300, 100, 50, 50)
foods = []
for i in range(20):
foods.append(pygame.Rect(random.randint(0, WINDOWWIDTH - FOODSIZE),
random.randint(0, WINDOWHEIGHT - FOODSIZE), FOODSIZE, FOODSIZE))
moveLeft = False
moveRight = False
moveUp = False
moveDown = False
MOVESPEED = 6
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT or event.key == ord('a'):
moveRight = False
moveLeft = True
if event.key == K_RIGHT or event.key == ord('d'):
moveLeft = False
moveRight = True
if event.key == K_UP or event.key == ord('w'):
moveDown = False
moveUp = True
if event.key == K_DOWN or event.key == ord('s'):
moveUp = False
moveDown = True
if event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == K_LEFT or event.key == ord('a'):
moveLeft = False
if event.key == K_RIGHT or event.key == ord('d'):
moveRight = False
if event.key == K_UP or event.key == ord('w'):
moveUp = False
if event.key == K_DOWN or event.key == ord('s'):
moveDown = False
if event.key == ord('x'):
player.top = random.randint(0, WINDOWHEIGHT - player.height)
player.left = random.randint(0, WINDOWWIDTH - player.width)
if event.type == MOUSEBUTTONUP:
foods.append(pygame.Rect(event.pos[0], event.pos[1], FOODSIZE, FOODSIZE))
foodCounter += 1
if foodCounter >= NEWFOOD:
foodCounter = 0
foods.append(pygame.Rect(random.randint(0, WINDOWWIDTH - FOODSIZE),
random.randint(0, WINDOWHEIGHT - FOODSIZE), FOODSIZE, FOODSIZE))
windowSurface.fill(BLACK)
if moveDown and player.bottom < WINDOWHEIGHT:
player.top += MOVESPEED
if moveUp and player.top > 0:
player.top -= MOVESPEED
if moveLeft and player.left > 0:
player.left -= MOVESPEED
if moveRight and player.right < WINDOWWIDTH:
player.right += MOVESPEED
pygame.draw.rect(windowSurface, WHITE, player)
for food in foods[:]:
if player.colliderect(food):
foods.remove(food)
for i in range(len(foods)):
pygame.draw.rect(windowSurface, GREEN, foods[i])
pygame.display.update()
mainClock.tick(40)
|
[
"AB64420@savvis.ad.savvis.net"
] |
AB64420@savvis.ad.savvis.net
|
d4cf6e769295f8c41c5952664bf2190929cee3ed
|
d4c47276c8fbd15240aa228eda04ee8e338caf02
|
/Python/Python Lesson/First/Lesson10/Sample7.py
|
4e62f97f74dd3eab8feef9e103efde9cdc40f7f6
|
[] |
no_license
|
developer579/Practice
|
a745384450172fb327913c130303ab76492096f1
|
54084468af83afcc44530e757800c8c3678147c1
|
refs/heads/main
| 2023-05-06T01:36:06.222554
| 2021-06-02T07:04:03
| 2021-06-02T07:04:03
| 324,312,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
import os
import os.path
curdir = os.listdir(".")
for name in curdir:
print(os.path.abspath(name),end="")
if(os.path.isfile(name)):
print("ファイルです。")
else:
print("ディレクトリです。")
print()
|
[
"69954570+developer579@users.noreply.github.com"
] |
69954570+developer579@users.noreply.github.com
|
c5f6abc35b9a7866479d761fd1cbbe9b175d886c
|
2db9dc919c1018fd968920a230dd17731aada1f6
|
/setting.py
|
bf552986bb988d24f68f216d0f8a488e5fd5e002
|
[] |
no_license
|
Helo250/ops-audit
|
d6efc0cbcfbad4b8e434ce20575afa2762622117
|
71a6d0f49e2bbd42e26aede9f18ecb51d9be702d
|
refs/heads/master
| 2022-12-09T12:14:01.811694
| 2019-11-07T06:25:45
| 2019-11-07T06:25:45
| 220,165,686
| 0
| 0
| null | 2022-12-08T06:56:20
| 2019-11-07T06:20:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,218
|
py
|
# -*- coding: utf-8 -*-
# Filename: setting
# Author: brayton
# Datetime: 2019-Oct-12 10:51 AM
import os
import yaml
import dill
import datetime
PROJECT_ROOT = os.path.dirname(__file__)
SERVICE_NAME = 'cmdb-audit-service'
SERVICE_ID = 'cmdb-audit-service01'
SERVICE_HOST = os.environ.get('SERVER_HOST', 'localhost')
SERVICE_PORT = os.environ.get('SERVER_PORT', 8050)
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
SECRET_KEY = 'XXXXXXXXXXXXXXXX'
DB_CONFIG = {
'host': os.environ.get('POSTGRES_SERVICE_HOST', 'localhost'),
'user': os.environ.get('POSTGRES_SERVICE_USER', 'brayton'),
'password': os.environ.get('POSTGRES_SERVICE_PASSWORD', 'wang1234'),
'port': os.environ.get('POSTGRES_SERVICE_PORT', 5432),
'database': os.environ.get('POSTGRES_SERVICE_DB_NAME', 'cmdb_audit')
}
SWAGGER = {
'version': '1.0.0',
'title': 'CMDB AUDIT API',
'description': 'CMDB AUDIT API',
'terms_of_service': 'Use with caution!',
'termsOfService': ['application/json'],
'contact_email': 'shenwei@huored.cn'
}
JWT_AUTH = {
'SECRET_KEY': 'y_3$q&g8h=(v@w@2dyu33z%xa2^e%)^h314z47_fvw8ii)6coo',
'GET_USER_SECRET_KEY': None,
'PUBLIC_KEY': None,
'PRIVATE_KEY': None,
'ALGORITHM': 'HS256',
'VERIFY': True,
'VERIFY_EXPIRATION': True,
'LEEWAY': 0,
'EXPIRATION_DELTA': datetime.timedelta(seconds=30000),
'AUDIENCE': None,
'ISSUER': None,
'ALLOW_REFRESH': True,
'REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7),
'AUTH_HEADER_PREFIX': 'JWT',
'AUTH_COOKIE': None,
}
with open(os.path.join(os.path.dirname(__file__), 'logging.yml'), 'r') as logging:
LOGGING_CONFIG = yaml.safe_load(logging)
ZIPKIN_SERVER = os.environ.get('ZIPKIN_SERVER', None)
ACCESS_CONTROL_ALLOW_ORIGIN = os.environ.get("ACCESS_CONTROL_ALLOW_ORIGIN", "")
ACCESS_CONTROL_ALLOW_HEADERS = os.environ.get("ACCESS_CONTROL_ALLOW_HEADERS", "")
ACCESS_CONTROL_ALLOW_METHODS = os.environ.get("ACCESS_CONTROL_ALLOW_METHODS", "")
CONSUL_ENABLED = False
CONSUL_AGENT_HOST = os.environ.get('CONSUL_AGENT_HOST', '127.0.0.1')
CONSUL_AGENT_PORT = os.environ.get('CONSUL_AGENT_PORT', 8500)
SERVICE_WATCH_INTERVAL = 60
KAFKA_SERIALIZER = dill.dumps
KAFKA_DESERIALIZER = dill.loads
|
[
"shenwei@huored.com"
] |
shenwei@huored.com
|
874a33c9d182ae552ade2e38239d1c022226c5c7
|
675cac8e63c6427e96cca90e5a9406df5a5b59d1
|
/ex18.py
|
4d7b192ec5f61928716557ede1d07bb41108c7eb
|
[] |
no_license
|
agranado2k/learn_python_hard_way
|
a0b3f9ac46cd9af66d14b3c8efc78ad7ef17e689
|
771ed2aea8f25a77b36e69b0d6323436cec944ad
|
refs/heads/master
| 2020-04-05T16:09:49.235040
| 2018-11-14T23:04:27
| 2018-11-14T23:04:27
| 156,999,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
# this one is like your scripts with argv
def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
# ok, that *args is actually pointless, we can just do this
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r"% (arg1, arg2)
# this just takes one argument
def print_one(arg1):
print "arg1: %r" % arg1
# this one takes no arguments
def print_none():
print "I got nothin'."
print_two('Zed', 'Shaw')
print_two_again('Zed', 'Shaw')
print_one('First!')
print_none()
|
[
"agranado2k@gmail.com"
] |
agranado2k@gmail.com
|
1c86d27e38edb36ba9212a3f592e493248fee0d9
|
be61a9f30274514857ea34297719157f1e5b8447
|
/fhir/resources/DSTU2/bodysite.py
|
8d3a68655c6a3d01d5742057013718e73ddc7690
|
[
"BSD-3-Clause"
] |
permissive
|
jwygoda/fhir.resources
|
ceff3a620100d2e875136b86d3e82816c0e60a33
|
5053565570d1ca992d9971d20db813c53fd350b9
|
refs/heads/master
| 2021-02-05T02:59:17.436485
| 2019-07-18T10:57:33
| 2019-07-18T10:57:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,582
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/BodySite) on 2019-05-14.
# 2019, SMART Health IT.
from . import domainresource
class BodySite(domainresource.DomainResource):
""" Specific and identified anatomical location.
Record details about the anatomical location of a specimen or body part.
This resource may be used when a coded concept does not provide the
necessary detail needed for the use case.
"""
resource_name = "BodySite"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Named anatomical location.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.description = None
""" The Description of anatomical location.
Type `str`. """
self.identifier = None
""" Bodysite identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.image = None
""" Attached images.
List of `Attachment` items (represented as `dict` in JSON). """
self.modifier = None
""" Modification to location code.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.patient = None
""" Patient.
Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """
super(BodySite, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(BodySite, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("description", "description", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("image", "image", attachment.Attachment, True, None, False),
("modifier", "modifier", codeableconcept.CodeableConcept, True, None, False),
("patient", "patient", fhirreference.FHIRReference, False, None, True),
])
return js
from . import attachment
from . import codeableconcept
from . import fhirreference
from . import identifier
|
[
"connect2nazrul@gmail.com"
] |
connect2nazrul@gmail.com
|
6a6fb7d10791e87785ef4b04d292d754cac96e74
|
12a012ace19a14fc2c4ce2daec7a9df94cd5d925
|
/숫자 짝꿍.py
|
55b4dcb72a2af3bc5d96acd7b0e4367906372cfb
|
[] |
no_license
|
newfull5/Programmers
|
a0a25fd72c0a8a7932122cb72e65b28ecd29ff71
|
b880a8043427f6aa7dc72caa3e46b1f6584a8962
|
refs/heads/master
| 2022-12-28T13:46:52.215347
| 2022-12-12T13:50:53
| 2022-12-12T13:50:53
| 211,209,943
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
from collections import Counter
def solution(X, Y):
answer = ''
X = Counter(X)
Y = Counter(Y)
y_keys = Y.keys()
for k, v in X.items():
if k in y_keys:
answer += k*min(X[k], Y[k])
if not answer:
return '-1'
if list(set(answer)) == ['0']:
return '0'
return ''.join(sorted(answer, reverse=True))
|
[
"noreply@github.com"
] |
newfull5.noreply@github.com
|
69f4d59b29de8bc418e9078ce321a4b3680987dc
|
0642e86f639f3a14ccf0029cc07ce07ae43ed14e
|
/dashboard/migrations/0003_logbydate.py
|
0f414444b3ffa2a1aa2c314da94e1ec9f14f2b36
|
[] |
no_license
|
towfiq001/mylogmonitor
|
ff88ff1a72717ceeeb9b3cba2cc0411aa9d9a1c5
|
bd5409d69e80ab28face488e18a0baaeb03ad69e
|
refs/heads/main
| 2023-07-17T14:58:58.768491
| 2021-08-29T09:48:07
| 2021-08-29T09:48:07
| 400,973,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
# Generated by Django 3.2.4 on 2021-08-28 19:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0002_loghistory'),
]
operations = [
migrations.CreateModel(
name='LogByDate',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('category', models.CharField(max_length=100)),
('message', models.CharField(max_length=1000)),
],
options={
'ordering': ['date', 'category'],
},
),
]
|
[
"trtonoy@yahoo.com"
] |
trtonoy@yahoo.com
|
c58765992933303158855e4c04170fd68415b82d
|
37b889b51e96e234289d5f585841ae02eb256d58
|
/tictactoe.py
|
25454430043f0d9aa00112a1b886e9ca3ca1f803
|
[] |
no_license
|
aambrioso1/Generalized-Tictactoe
|
7306a9342ba82889eead71e64ef0389ff8dcd6e4
|
e2aa605763893b68ec5dbdc0bccaef332001afb6
|
refs/heads/master
| 2020-05-18T08:45:04.211587
| 2020-03-25T02:41:51
| 2020-03-25T02:41:51
| 184,304,117
| 2
| 0
| null | 2019-05-05T17:49:59
| 2019-04-30T17:28:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,368
|
py
|
"""
Tic-Tac-Toe Program based on a program found here:
https://www.geeksforgeeks.org/python-implementation-automatic-tic-tac-toe-game-using-random-number/
The size of the board is arbitrary and controlled by the size variable.
The user can input moves based on the move dictionary: { 1: (0,0), 2: (0, 1), ...}.
Players are:
1 for X (moves first)
2 for O (moves second)
Outcomes are indicated as follows:
Winner is: 1
Winner is: 2
Winner is: -1 if its a draw (cat's game)
Has nice imput checking in the make_a_move function using the try statement and exception handlling.
If the function make_a_move(board, player) is commented out and random_place(board, player) is put back in CPU will play both sides with random moves.
"""
# importing all necessary libraries
import numpy as np
import random
from time import sleep # I commented the use of time.sleep out. May need it for CPU play.
# Determines the size of the board (size x size)
size = 15
# Creates an empty board of zeros with dimensions size x size
def create_board():
return(np.zeros((size,size), dtype=np.int))
"""
return(np.array([[0, 0, 0,0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]))
"""
# Check for empty places on board
def possibilities(board):
l = []
for i in range(len(board)):
for j in range(len(board)):
if board[i][j] == 0:
l.append((i, j))
return(l)
# Select a random place for the player. The program does not use this code in the currently.
#
def random_place(board, player):
selection = possibilities(board)
current_loc = random.choice(selection)
board[current_loc] = player
return(board)
# Create a dictionary that associates the integers from 1 to size of the board with a position, (row, col), on the board.
# { 1: (0,0), 2: (0, 1), ...}
tuple_list= [(i,j) for i in range(size) for j in range(size)]
move_dict = dict(zip(range(1,size ** 2 + 1), tuple_list))
def make_a_move(board, player):
run = True
while run:
move = input(f"Select number from 1 to {size ** 2}: ")
try:
move = int(move)
if 1 <= move <= (size ** 2):
current_loc = move_dict[move]
if current_loc in possibilities(board):
run = False
board[current_loc] = player
return(board)
else:
print("Sorry this space is occupied.")
else:
print(f"Please input a number within the range 1 to {size ** 2}: ")
except:
print("Please type a number: ")
# Checks whether the player has three
# of their marks in a horizontal row
def row_win(board, player):
for x in range(len(board)):
win = True
for y in range(len(board)):
if board[x, y] != player:
win = False
continue
if win == True:
return(win)
return(win)
# Checks whether the player has three
# of their marks in a vertical row
def col_win(board, player):
for x in range(len(board)):
win = True
for y in range(len(board)):
if board[y][x] != player:
win = False
continue
if win == True:
return(win)
return(win)
# Checks whether the player has three
# of their marks in a diagonal row
def diag_win(board, player):
win = True
for x in range(len(board)):
if board[x, x] != player:
win = False
return(win)
# Evaluates whether there is
# a winner or a tie
def evaluate(board):
winner = 0
for player in [1, 2]:
if (row_win(board, player) or
col_win(board,player) or
diag_win(board,player)):
winner = player
if np.all(board != 0) and winner == 0:
winner = -1
return winner
# Main function to start the game
def play_game():
board, winner, counter = create_board(), 0, 1
print(board)
# sleep(2)
while winner == 0:
for player in [1, 2]:
# board = random_place(board, player) # Put this line in, and comment out the next one, to have the CPU play itself with random moves randomly.
board = make_a_move(board, player) # Comment this line out to have CPU play itself.
print("Board after " + str(counter) + " move")
print(board)
# sleep(2) # May be need for CPU play.
counter += 1
winner = evaluate(board)
if winner != 0:
break
return(winner)
# Driver Code
print("Winner is: " + str(play_game()))
|
[
"noreply@github.com"
] |
aambrioso1.noreply@github.com
|
7c6675cfd79cfd95ce08113e603d7e0fa6f4c6be
|
7569bb6b8d64b6d526bdc9f89927fda21845876e
|
/demo/normalize_intensity.py
|
61db34106f11de2171c420b266a007b282103707
|
[] |
no_license
|
ActiveBrainAtlas/MouseBrainAtlas
|
e46077be7c0819daa7c95797422ea288263dcf78
|
b6ca6afcfc88f6772b073b9ad2bf5d0b3bf840bd
|
refs/heads/master
| 2021-07-03T09:01:23.073135
| 2019-04-19T23:24:19
| 2019-04-19T23:24:19
| 140,217,476
| 12
| 6
| null | 2018-07-09T16:58:47
| 2018-07-09T01:44:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
#! /usr/bin/env python
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Linearly normalize intensity to between 0 and 255')
parser.add_argument("input_spec", type=str, help="Input specification")
parser.add_argument("out_version", type=str, help="Output image version")
args = parser.parse_args()
import sys
import os
sys.path.append(os.environ['REPO_DIR'] + '/utilities')
from utilities2015 import *
from data_manager import *
from metadata import *
from distributed_utilities import *
out_version = args.out_version
input_spec = load_ini(args.input_spec)
image_name_list = input_spec['image_name_list']
stack = input_spec['stack']
prep_id = input_spec['prep_id']
if prep_id == 'None':
prep_id = None
resol = input_spec['resol']
version = input_spec['version']
if version == 'None':
version = None
for img_name in image_name_list:
t = time.time()
in_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, resol=resol, version=version, fn=img_name)
out_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, resol=resol, version=out_version, fn=img_name)
create_parent_dir_if_not_exists(out_fp)
cmd = """convert "%(in_fp)s" -normalize -depth 8 "%(out_fp)s" """ % {'in_fp': in_fp, 'out_fp': out_fp}
execute_command(cmd)
sys.stderr.write("Intensity normalize: %.2f seconds.\n" % (time.time() - t))
|
[
"cyc3700@gmail.com"
] |
cyc3700@gmail.com
|
203ac1a069916f9294e948a6a187b71841823c70
|
f88dca5043cfd0ea9c41867af32f6590bac63eda
|
/index.py
|
1c3ef24389ff686d7c43864a16f5ce3b2f8f6de3
|
[] |
no_license
|
Jimmyopot/imran-ahmed-ds-algorithms
|
dd4e1856b4a7686938b3be89709a773bcbec21d7
|
7fcdd10ad6436fd56e7bc82e7d56077127ec0e1f
|
refs/heads/master
| 2023-07-27T04:12:36.905540
| 2021-09-14T03:40:12
| 2021-09-14T03:40:12
| 405,817,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
import pandas as pd
import numpy as np
df = pd.DataFrame([
['1', 'Fares', 32, True],
['2', 'Elena', 23, False],
['3', 'Steven', 40, True]
])
df.columns = ['id', 'name', 'age', 'decision']
print(df)
print(df.name)
myMatrix = np.array([[11, 12, 13], [21, 22, 23], [31, 32, 33]])
print(myMatrix)
print(type(myMatrix))
|
[
"jimmyopot@gmail.com"
] |
jimmyopot@gmail.com
|
1d480f4c67e3a9f992beed61a28c0a72eaffb130
|
d69c727d66c3f77e54382376540e6406ba015463
|
/posts/migrations/0002_auto_20161215_2239.py
|
b6f4aafa371945d48959a60557127f75918b6c75
|
[] |
no_license
|
4dd4m/LearnDjango
|
a97a848fc8c9ba974dce82d474fec2770fbfb4e0
|
2e1af10d7c49056b3979e9d7fd9c24d9a24b206f
|
refs/heads/master
| 2021-06-12T13:20:57.020884
| 2017-01-16T14:53:25
| 2017-01-16T14:53:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-15 22:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='categories',
name='sub1',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='categories',
name='sub2',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='categories',
name='sub3',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='categories',
name='sub4',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='categories',
name='sub5',
field=models.IntegerField(),
),
]
|
[
"87.adamtorok@gmail.com"
] |
87.adamtorok@gmail.com
|
aea6c45cb7c1ce7db49246097c0bd8fda3eb7386
|
2b2ae404b63d362582779e6faff987db2b8f4927
|
/manage.py
|
eb43a14d9eea76cab575c036009eb8404bd48617
|
[] |
no_license
|
kateapault/dog-watch-back
|
8e57b2bdf493b27f7200c7a28756d6050efc8e06
|
f7bad9bdc84f6197b5b069e3f8ea6ab4cdc653ce
|
refs/heads/master
| 2022-09-19T07:49:46.185539
| 2020-05-28T21:22:43
| 2020-05-28T21:22:43
| 264,987,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dbackend_dog_watch.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"kateapault@gmail.com"
] |
kateapault@gmail.com
|
68e68cb798e8a69d6eddc3ace5d719af706c343a
|
001babfcf8c4394cea56b670e523111804279e2d
|
/docs/conf.py
|
c716d12b23b3f96bec5abb2dd584e54ec9312247
|
[
"MIT"
] |
permissive
|
olayinkakings/py-gofiliate-client
|
d14e35ee78326f05f4eeae48a6f8c04d9bdeec50
|
ad4e5489f2dd918e160050280d1e2f33e0c3f2bb
|
refs/heads/master
| 2021-09-02T12:55:10.917814
| 2018-01-02T20:34:18
| 2018-01-02T20:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,177
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Gofilliate documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 29 00:27:50 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from pprint import pprint
sys.path.insert(0, os.path.abspath('../'))
pprint(sys.path.insert(0, os.path.abspath('../')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Gofilliate'
copyright = '2017, MGM'
author = 'MGM'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gofilliatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Gofilliate.tex', 'Gofilliate Documentation',
'MGM', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gofilliate', 'Gofilliate Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Gofilliate', 'Gofilliate Documentation',
author, 'Gofilliate', 'One line description of project.',
'Miscellaneous'),
]
|
[
"mgm@igpte.ch"
] |
mgm@igpte.ch
|
2866d78e6b817c873c49d49a86680e26f9606523
|
0080dd4d26c05a31d3957e9755d928411617e6a1
|
/python/make_sp_v3.py
|
fe7731df4404bf061b650893729bd622ce0de82d
|
[] |
no_license
|
coreywan/devnet-ucse
|
3b7a1f67cd3b52138533ac0a5ad31fe45bfa982b
|
f778364317e0ecaa5cdc0c4b035f4c258ffa65e2
|
refs/heads/master
| 2022-12-27T08:08:05.859607
| 2020-10-08T17:36:34
| 2020-10-08T17:36:34
| 264,577,287
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
## Same as v2, but uses a named Template, instead of auto generating... This is more idempotent
from ucsmsdk.ucshandle import UcsHandle
from ucsmsdk.ucsmethodfactory import ls_instantiate_n_named_template
from ucsmsdk.ucsmethodfactory import ls_instantiate_n_template
from ucsmsdk.ucsbasetype import DnSet, Dn
NUM_SP = 3
def main():
handle = UcsHandle("192.168.254.200","ucspe","ucspe", secure=False)
handle.login()
# Setup Variable for SP Templates to be deployed
dn_set = DnSet()
for i in range(1, NUM_SP+1):
dn = Dn()
sp_name = "SP{}".format(str(i))
dn.attr_set("value", sp_name)
dn_set.child_add(dn)
# Build XML Object to submit to the API
templates = ls_instantiate_n_named_template(
cookie=handle.cookie,
dn="org-root/ls-globotemplate",
in_target_org="org-root",
in_error_on_existing="false",
in_name_set=dn_set
)
# Send XML Object to xml process handler
mo_list = handle.process_xml_elem(templates)
for mo in mo_list:
print(mo.dn)
if __name__ == "__main__":
main()
|
[
"coreywanless@gmail.com"
] |
coreywanless@gmail.com
|
d4034b82a70372a5050cc12b1e5b89ef9be48fab
|
933f51a11f04caba7a14b3cbde2b1fa27d0b73c1
|
/apps/authors/views.py
|
e00e3a741ffa45635d43b1565a394a082c1294b0
|
[] |
no_license
|
tonythanuvelil/djangraphql
|
302705584d7cee0fcadf9b84d6febeabbe9a06e4
|
e87f5fda962a37caeee96b2b75c2ac5d50be2328
|
refs/heads/main
| 2023-02-17T11:16:41.342367
| 2021-01-04T11:49:53
| 2021-01-04T11:49:53
| 326,666,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
from rest_framework import serializers
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Author
class AuthorSerializer(serializers.ModelSerializer):
books = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Author
fields = '__all__'
class AuthorView(APIView):
def get(self, request):
authors = Author.objects.all()
serializer = AuthorSerializer(authors, many=True)
return Response(serializer.data)
|
[
"tonythanuvelil@gmail.com"
] |
tonythanuvelil@gmail.com
|
23dbf980599ba328d73a87f1b015dd7d013cc36c
|
630a342d40aaf6e4ae1d0cfe72325a9ca7cfae1d
|
/player.py
|
b9677896f7246437d78c64ed0fab450b4dc2ab54
|
[
"MIT"
] |
permissive
|
arthurpreis/pytruco
|
cc39abdfb10f65d50871cec7c5db7b9887ea1e36
|
948de00a70615135469087b2f6ea6a88cba0bf92
|
refs/heads/master
| 2021-01-01T17:18:54.790517
| 2017-08-14T20:54:17
| 2017-08-14T20:54:17
| 98,044,852
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,542
|
py
|
from stack import Stack
class Player():
def __init__(self, name = ''):
self.hand = Stack()
self.name = name
self.score = 0
self.round_pts = 0
self.turn = False
self.is_winning = False
self.won_first = False
self.won_second = False
self.won_third = False
self.has_accepted = False
def play_card(self, index, mesa):
self.hand.move_card(mesa, index)
def draw_cards(self, target, number):
self.hand.draw_cards(target,number)
def reset_win_flag(self):
self.is_winning = False
self.won_first = False
self.won_second = False
self.won_third = False
def won_game(self):
if self.score >= 12:
return True
else:
return False
def print_hand(self):
for card in self.hand:
print(str(self.hand.index(card) + 1) + ': ' +
str(card))
def accept_truco(self):
s = str(input(self.name + ', aceita Truco? \n'))
if (s in ['s', 'S', 'y', 'Y']) :
return True
else:
return False
def trucar(self, other_players):
for player in other_players:
player.has_accepted = False
self.has_accepted = True
for player in other_players:
if player.accept_truco():
player.has_accepted = True
print(player.name + ' aceitou \n')
else:
print(player.name + ' não aceitou \n')
|
[
"arthur_p_reis@yahoo.com.br"
] |
arthur_p_reis@yahoo.com.br
|
75cc3315080017e013a5cb194ab7ca2bc7ffb60b
|
0dbf6a52e119cc79d07fdb4722d0581b777fc04d
|
/gridworld/natural_net.py
|
e25d4ddc32772bafa24e908da4a465ebd4341be9
|
[] |
no_license
|
EYcab/natural-q-learning
|
5313a21b197dcb145e3f2882425490cbc3d46ad7
|
a30ea79a3eda4ac9867e68dc512c91c0fd105f18
|
refs/heads/master
| 2020-03-25T12:08:32.039333
| 2016-12-13T20:16:30
| 2016-12-13T20:16:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,980
|
py
|
import tensorflow as tf
import numpy as np
import sys
slim = tf.contrib.slim
def _batch_outer_product(A, B):
return tf.batch_matmul(tf.expand_dims(A, 2), tf.expand_dims(B, 1))
def _one_sided_batch_matmul(A, B):
A = tf.tile(tf.expand_dims(A, 0), [int(B.get_shape()[0]),1,1])
output = tf.batch_matmul(A, B)
return output
def _identity_init():
"""Identity matrix initializer"""
def _identity_initializer(shape, **kwargs):
out = np.identity(shape[0])
return out
return _identity_initializer
def _conv_identity_init():
"""Identity matrix initializer"""
def _identity_initializer(shape, **kwargs):
out = np.identity(shape[2])
out = np.expand_dims(out, 0)
out = np.expand_dims(out, 0)
return out
return _identity_initializer
def whitened_fully_connected(h, output_size, activation=tf.nn.relu):
with tf.variable_scope('whitened/fully_connected'):
layer_index = len(tf.get_collection('WHITENED_HIDDEN_STATES')) + 1
tf.add_to_collection('WHITENED_HIDDEN_STATES', h)
input_size = h.get_shape()[-1]
V = tf.get_variable('V_' + str(layer_index), (input_size, output_size))
d = tf.get_variable('d_' + str(layer_index), (output_size, ))
# whitening params
U = tf.get_variable('U_' + str(layer_index - 1), (input_size, input_size),
initializer=_identity_init(), trainable=False)
c = tf.get_variable('c_' + str(layer_index - 1), (input_size, ),
initializer=tf.constant_initializer(), trainable=False)
# whitened layer
h = tf.matmul(h - c, tf.matmul(U, V)) + d
if activation:
h = activation(h)
# store params in collection for later reuse
tf.add_to_collection('WHITENED_PARAMS', [V, U, d, c])
return h
def whitened_conv2d(h, num_outputs, kernel_size,
stride=1, padding='SAME', activation=tf.nn.relu):
with tf.variable_scope('whitened/Conv'):
layer_index = len(tf.get_collection('WHITENED_HIDDEN_STATES')) + 1
tf.add_to_collection('WHITENED_HIDDEN_STATES', h)
input_size = h.get_shape()[-1]
V = tf.get_variable('V_' + str(layer_index),
(kernel_size, kernel_size, input_size, num_outputs))
U = tf.get_variable('U_' + str(layer_index - 1),
(1, 1, input_size, input_size), trainable=False,
initializer=_conv_identity_init())
prev_h = h
# whitening 1x1 conv
h = tf.nn.conv2d(h, U, [1, 1, 1, 1], padding)
# normal conv
h = tf.nn.conv2d(h, V, [1, stride, stride, 1], padding)
if activation:
h = activation(h)
# store params in collection for later reuse in reparametrization
tf.add_to_collection('WHITENED_PARAMS', [V, U])
return h
def reparam_op(epsilon=0.1):
# perform inference on samples to later estimate mu and sigma
out = []
hidden_states = tf.get_collection('WHITENED_HIDDEN_STATES')
with tf.variable_scope('natural/net', reuse=True):
for i, var_list in enumerate(tf.get_collection('WHITENED_PARAMS')):
# decompose var list
V = var_list[0]
U = var_list[1]
if len(var_list) > 2:
d = var_list[2]
c = var_list[3]
conv = True if len(V.get_shape()) > 2 else False
# compute canonical parameters
if conv:
V_t = tf.reshape(V, [-1, int(V.get_shape()[2]), int(V.get_shape()[3])])
U_t = tf.squeeze(U)
W = _one_sided_batch_matmul(U_t, V_t)
else:
W = tf.matmul(U, V)
b = d - tf.matmul(tf.expand_dims(c, 0), W)
# treat spatial dimensions of hidden states as part of the batch
if conv:
hidden_states[i] = tf.reshape(hidden_states[i],
[-1, int(hidden_states[i].get_shape()[-1])])
mu = tf.reduce_mean(hidden_states[i], 0)
# estimate mu and sigma with samples from D
sigma = tf.reduce_mean(_batch_outer_product(hidden_states[i], hidden_states[i]), 0)
# update c and U from new mu and sigma
new_c = mu
# sigma must be self adjoint as it is composed of matrices of the form u*u'
sigma = tf.cast(sigma, tf.float64)
eig_vals, eig_vecs = tf.self_adjoint_eig(sigma)
eig_vals, eig_vecs = tf.cast(eig_vals, tf.float32), tf.cast(eig_vecs, tf.float32)
diagonal = tf.diag(tf.rsqrt(eig_vals + epsilon))
# make sure reciprocal/root of eig vals isn't nan
diagonal = tf.select(tf.is_nan(diagonal), tf.ones_like(diagonal) * 1000, diagonal)
new_U = tf.matmul(tf.transpose(eig_vecs), diagonal)
new_U_inverse = tf.matrix_inverse(new_U)
if conv:
# transform U
new_U_t = tf.expand_dims(tf.expand_dims(new_U, 0), 0)
#c = tf.assign(c, new_c)
U = tf.assign(U, new_U_t)
# update V
new_V = _one_sided_batch_matmul(new_U_inverse, W)
new_V = tf.reshape(new_V, V.get_shape())
else:
c = tf.assign(c, new_c)
U = tf.assign(U, new_U)
# update V and d
new_V = tf.matmul(new_U_inverse, W)
new_d = b + tf.matmul(tf.expand_dims(c, 0), tf.matmul(U, new_V))
new_d = tf.squeeze(new_d, [0])
d = tf.assign(d, new_d)
V = tf.assign(V, new_V)
tensors = [tf.reshape((U), [-1]), tf.reshape((V), [-1])]
if not conv:
tensors += [c, d]
out = [tf.concat(0, out + tensors)]
return out[0] # only exists to provide op for TF to run (there's probably a nicer way of doing this)
|
[
"admb@stanford.edu"
] |
admb@stanford.edu
|
2088e8732b43b14bf4d5e0aa2157f4eb78054235
|
c63d36071ca02405a0942fa3d814ac70a0665e4e
|
/xgboost_ray/data_sources/numpy.py
|
bfd632c575eb7d399791b097c579c0de338068d7
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ryantd/xgboost_ray
|
6ad4b2c09bb579e13a9b3a30cb22b2769ac81e1b
|
86a72d209e2a95e4962fe0b044544bc7e9e322ce
|
refs/heads/master
| 2023-09-03T18:44:52.070689
| 2021-10-14T09:22:41
| 2021-10-14T09:22:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
from typing import Any, Optional, Sequence, List, TYPE_CHECKING
import numpy as np
import pandas as pd
from xgboost_ray.data_sources.data_source import DataSource, RayFileType
from xgboost_ray.data_sources.pandas import Pandas
if TYPE_CHECKING:
from xgboost_ray.xgb import xgboost as xgb
class Numpy(DataSource):
"""Read from numpy arrays."""
@staticmethod
def is_data_type(data: Any,
filetype: Optional[RayFileType] = None) -> bool:
return isinstance(data, np.ndarray)
@staticmethod
def update_feature_names(matrix: "xgb.DMatrix",
feature_names: Optional[List[str]]):
# Potentially unset feature names
matrix.feature_names = feature_names
@staticmethod
def load_data(data: np.ndarray,
ignore: Optional[Sequence[str]] = None,
indices: Optional[Sequence[int]] = None,
**kwargs) -> pd.DataFrame:
local_df = pd.DataFrame(
data, columns=[f"f{i}" for i in range(data.shape[1])])
return Pandas.load_data(local_df, ignore=ignore, indices=indices)
|
[
"noreply@github.com"
] |
ryantd.noreply@github.com
|
844b785434e84611d0889a15664403f6b830d1c2
|
1788a78ca05cb3e1785e71c882455674613b5ce2
|
/detect time/pattern of group/map_pattern_generate.py
|
bd9399c1d26e702f583b379938441ea1cca27fe1
|
[] |
no_license
|
yenhao/Bipolar-Disorder-Detection
|
9dd2e8ccdfad3e195298c74a33547524b4d3dd8c
|
1a815ec84f53bc509298c3345149bc0321ea36f0
|
refs/heads/master
| 2022-01-10T18:16:05.993288
| 2019-04-29T07:36:50
| 2019-04-29T07:36:50
| 72,731,683
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
# -*- coding: utf8 -*-
from nltk.tokenize import TweetTokenizer
from collections import defaultdict
import re
import sys, gzip, re, os, fileinput
def slideWindows(token_list, size = 3):
if len(token_list) > size:
return getPatternCombination(token_list[:3]) + slideWindows(token_list[2:], size)
else:
return []
def getPatternCombination(pattern_word_list):
pattern_list = []
for i in range(3):
temp_pattern_word_list = list(pattern_word_list)
temp_pattern_word_list[i] = '<.>'
pattern_list.append(' '.join(temp_pattern_word_list))
return pattern_list
# function to delete url
def del_url(line):
return re.sub(r'(\S*(\.com).*)|(https?:\/\/.*)', "", line)
# replace tag
def checktag(line):
return re.sub(r'\@\S*', "@", line)
def checkhashtag(line):
return re.sub(r'\#\S*', "#", line)
# Some special character
def checkSpecial(line):
return line.replace('♡', 'love ').replace('\"','').replace('“','').replace('”','').replace('…','...').replace('—','-')
def checkline(line):
return del_url(checkhashtag(checktag(checkSpecial(line))))
if __name__ == '__main__':
pattern_dict = defaultdict(lambda : 0)
tknzr = TweetTokenizer()
for line in fileinput.input():
split_line = line.split('\t')
if len(split_line) != 2: continue
uid, text = split_line
token_list = tknzr.tokenize(checkline(text).lower())
for pattern in slideWindows(token_list):
pattern_dict[pattern] += 1
for pattern, count in pattern_dict.items():
print('{}\t{}'.format(pattern.encode('utf-8'), count))
|
[
"yenhao0218@gmail.com"
] |
yenhao0218@gmail.com
|
2fe284082ab03918b59f2d77b288ec6dfc5693fb
|
61061d1c05230fe66f9eee6e2066dc393103c039
|
/classesandinheritance/unexpectedidentifiersusedict/c_dictofsubjgradebecomessub_gradeweight.py
|
c8c8d4ddbf661f651e6b674bd2fbe61630e8121e
|
[] |
no_license
|
syeed-ode/python_scratch_work
|
3b100028fa549d9904cdf5cb6c990d01e9f77c94
|
a406d74ce0eff9a69970a54af717176661c1aaf8
|
refs/heads/master
| 2021-01-24T12:47:44.012107
| 2018-07-15T11:29:57
| 2018-07-15T11:29:57
| 123,149,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,184
|
py
|
"""
This class builds off of overextendingaddinggradesbysubject
module's BySubjectGradebook. In BySubjectGradebook a dictionary
consisting of name to another dictionary is use. The inner dictionary
is a subject (the key) to grade (the value).
This class now needlessly converts the grade value to a tuple
consisting of (grade, weight).
This class demonstrates how nesting dicitionaries/tuples too deeply
can increase complexity.
#
# for an unexpected amount of identifiers ('dynamic' bookkeeping) -- utilize
# dictionary instead of fields.
#
# Avoid more than one layer of nesting.
# break out the inner dictionary into classes.
#
"""
class WeightedGradebook(object):
def __init__(self):
self.__grades = {}
def add_student(self, name):
self.__grades[name] = {}
def report_grade(self, name, subject, score, weight) -> None:
"""
Replaced the inner dictionary with a tuple of (grade, weight)
instead of just utilizing grade.
Python's built-in dictionaries and tuple types make it easy to
keep going, adding layer after layer.
:param name:
:param subject:
:param score:
:param weight:
:return:
"""
by_subject = self.__grades[name]
grade_list = by_subject.setdefault(subject, [])
grade_list.append((score, weight))
def average_grade(self, name) -> 'FloatingPoint':
"""
This method should avoid a dictionary containing a tuple. It
is too many levels deep and makes the code brittle.
It should be broken out into classes. This lets you provide
well defined interfaces that better encapsulate your data.
Calculating the total grade by computing the weight score is
managed by a tuple.
Refactoring should start at the bottom level. So, converting
the grade tuple into a class would be the best place to start.
:param name:
:return:
"""
by_subject = self.__grades[name]
score_sum, score_count = 0, 0
for subject, scores in by_subject.items():
subject_avg, total_weight = 0, 0
for score, weight in scores:
subject_avg += score * weight
print('This is subject_avg:')
print(subject_avg)
total_weight += weight
print('This is total_weight:')
print(total_weight, end='\n\n')
score_sum += subject_avg / total_weight
print('This is score_sum:')
print(score_sum, end='\n\n')
score_count += 1
print('This is score_count:')
print(score_count, end='\n\n')
return score_sum / score_count
book = WeightedGradebook()
book.add_student('Albert Einstein')
book.report_grade('Albert Einstein', 'Math', 80, .10)
book.report_grade('Albert Einstein', 'Math', 90, .40)
# book.report_grade('Albert Einstein', 'Math', 90, .10)
print("Total grade average: %d" % book.average_grade('Albert Einstein'))
|
[
"sy_ode@yahoo.com"
] |
sy_ode@yahoo.com
|
db59de691e48bfdddb5a99fd5a9502f38286acfa
|
5d48aba44824ff9b9ae7e3616df10aad323c260e
|
/string/800.similar_RGB_color.py
|
8f2df1e3f4ff4d578ff40098592e39750beb2beb
|
[] |
no_license
|
eric496/leetcode.py
|
37eab98a68d6d3417780230f4b5a840f6d4bd2a6
|
32a76cf4ced6ed5f89b5fc98af4695b8a81b9f17
|
refs/heads/master
| 2021-07-25T11:08:36.776720
| 2021-07-01T15:49:31
| 2021-07-01T15:49:31
| 139,770,188
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,577
|
py
|
"""
In the following, every capital letter represents some hexadecimal digit from 0 to f.
The red-green-blue color "#AABBCC" can be written as "#ABC" in shorthand. For example, "#15c" is shorthand for the color "#1155cc".
Now, say the similarity between two colors "#ABCDEF" and "#UVWXYZ" is -(AB - UV)^2 - (CD - WX)^2 - (EF - YZ)^2.
Given the color "#ABCDEF", return a 7 character color that is most similar to #ABCDEF, and has a shorthand (that is, it can be represented as some "#XYZ"
Example 1:
Input: color = "#09f166"
Output: "#11ee66"
Explanation:
The similarity is -(0x09 - 0x11)^2 -(0xf1 - 0xee)^2 - (0x66 - 0x66)^2 = -64 -9 -0 = -73.
This is the highest among any shorthand color.
Note:
color is a string of length 7.
color is a valid RGB color: for i > 0, color[i] is a hexadecimal digit from 0 to f
Any answer which has the same (highest) similarity as the best answer will be accepted.
All inputs and outputs should use lowercase letters, and the output is 7 characters.
"""
class Solution:
def similarRGB(self, color: str) -> str:
res = [self.calc_closest(color[i : i + 2]) for i in range(1, 7, 2)]
return "#" + "".join(res)
def calc_closest(self, s: str) -> str:
candidates = [str(i) * 2 for i in range(10)] + [ch * 2 for ch in "abcdef"]
min_dist = float("inf")
min_candidate = None
hex_s = int(s, 16)
for c in candidates:
if abs(hex_s - int(c, 16)) < min_dist:
min_dist = abs(hex_s - int(c, 16))
min_candidate = c
return min_candidate
|
[
"eric.mlengineer@gmail.com"
] |
eric.mlengineer@gmail.com
|
493a6c08c2521ae5e90e427cd910cb8d03318ee6
|
1721e09c3b5d7f531765f831539ade6d84efe069
|
/Test_Case/test_Main.py
|
c68125aae57a3fff4569dc21826787cfa90584df
|
[] |
no_license
|
zhouzhenquan/API_test
|
b9589c4081d47fe293f046dfe6561aaa203a3a7b
|
301a23121bbdf6f03f0120a37365e404253a0ed1
|
refs/heads/master
| 2021-03-04T09:57:56.603267
| 2020-08-04T13:08:10
| 2020-08-04T13:08:10
| 246,025,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,865
|
py
|
"""
-- coding: utf-8 --
@Time : 2020/3/11 10:50
@Author : 周振全
@Site :
@File : test_invest.py
@Software: PyCharm
"""
import random
import jsonpath
import unittest
import os
from Library.ddt import ddt, data
from Common.Excel import operation_excel
from Common.Path import DATADIR
from Common.config import conf
from Common.request_1 import SendRequests
from Common.Login import log
from Common.Colour import color
from Common.Connect_DB import DB
from Common.handle_data import replace_data,Case_Data
case_file = os.path.join(DATADIR, 'apicases.xlsx')
"""登录"""
@ddt
class Test_Main(unittest.TestCase):
excel = operation_excel(case_file, 'main_stream')
cases = excel.read_excel()
request = SendRequests()
@data(*cases)
def test_main(self, case):
# 准备用例数据
url = conf.get("env", "url") + replace_data(case["url"])
method = case['method']
if case["interface"] == "register":
Case_Data.mobilephone = self.random_phone()
data = eval(replace_data(case["data"]))
headers = eval(conf.get('env', 'headers'))
# 判断当前接口是不是 注册和登录,加token
if case["interface"] != "login" and case["interface"] != "register":
headers["Authorization"] = getattr(Case_Data,"token_value")
# 预期结果
expected = eval(case['expected'])
row = case['case_id'] + 1
# 发送请求获取结果
response = self.request.send_requests_sc(url=url, method=method, json=data, headers=headers)
res = response.json()
# 判断发送请求之后,判断是不是登录,提取token
if case["interface"].lower() == "login":
Case_Data.member_id = str(jsonpath.jsonpath(res, '$..id')[0])
token = jsonpath.jsonpath(res, '$..token')[0]
token_type = jsonpath.jsonpath(res, '$..token_type')[0]
# 提取token保存类属性
Case_Data.token_value = token_type + " " + token
if case["interface"] == "add":
Case_Data.loan_id = str(jsonpath.jsonpath(res,"$..id")[0])
# 断言比对预期结果和实际结果
try:
self.assertEqual(expected['code'], res['code'])
self.assertIn(expected["msg"],res["msg"])
except AssertionError as e:
self.excel.write_excel(row=row, column=8, value='未通过')
log.error('用例:{},执行'.format(case['title']) + color.white_red('未通过'))
raise e
else:
self.excel.write_excel(row=row, column=8, value='通过')
log.info('用例:{},执行'.format(case['title']) + color.white_green('通过'))
def random_phone(self):
phone = "138"
N = random.randint(100000000,999999999)
phone += str(N)[1:]
return phone
|
[
"17764509133@163.com"
] |
17764509133@163.com
|
76cb29aedcd43a74cf7ea49d8d5f047c7aa64f37
|
34364898be2a85f3e6aa5c8769c5991be45b4a3d
|
/app/whatson/settings/__init__.py
|
5477e8c0d8a233d156857c7624cc6f1db8c49fa8
|
[] |
no_license
|
MattSegal/whats-on-melbourne
|
e2e285f22dce57738f49accf840809df88c94dc4
|
169cbd72e7afd89411cf6e2076bddf2fa8367f6e
|
refs/heads/master
| 2023-01-09T12:58:41.247657
| 2019-12-24T10:43:16
| 2019-12-24T10:43:16
| 131,386,234
| 5
| 1
| null | 2023-01-04T02:45:11
| 2018-04-28T07:43:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,470
|
py
|
# https://github.com/django/django/blob/master/django/conf/global_settings.py
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
GEOCODING_API_KEY = os.environ.get("GEOCODING_API_KEY")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"django_extensions",
"corsheaders",
"rest_framework",
"scrapers",
"whatson",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "whatson.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "whatson", "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "whatson.wsgi.application"
# Database
REDIS_HOST = os.environ.get("REDIS_HOST")
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.environ.get("PGDATABASE"),
"USER": os.environ.get("PGUSER"),
"PASSWORD": os.environ.get("PGPASSWORD"),
"HOST": os.environ.get("PGHOST"),
"PORT": os.environ.get("PGPORT"),
}
}
# Authentication
LOGIN_URL = "home"
LOGIN_REDIRECT_URL = "home"
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Australia/Melbourne"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = "/static/"
STATIC_ROOT = "/static/"
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"root": {"level": "INFO", "handlers": ["console"]},
"handlers": {"console": {"level": "INFO", "class": "logging.StreamHandler"}},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO", "propagate": True},
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
"raven": {"level": "DEBUG", "handlers": ["console"], "propagate": False},
"sentry.errors": {"level": "DEBUG", "handlers": ["console"], "propagate": False},
},
}
SHELL_PLUS = "ipython"
|
[
"mattdsegal@gmail.com"
] |
mattdsegal@gmail.com
|
4a3d1286000f3ba9b2941d5ec3888fea244f28e7
|
3eae9c14c119ee2d6a7d02ef1ba5d61420959e3c
|
/modules/core/rwvx/rwlog/test/logtesttasklet-python.py
|
036ec2e9deada682ce16d039342bdf1082038713
|
[
"Apache-2.0"
] |
permissive
|
RIFTIO/RIFT.ware
|
94d3a34836a04546ea02ec0576dae78d566dabb3
|
4ade66a5bccbeb4c5ed5b56fed8841e46e2639b0
|
refs/heads/RIFT.ware-4.4.1
| 2020-05-21T14:07:31.092287
| 2017-06-05T16:02:48
| 2017-06-05T16:02:48
| 52,545,688
| 9
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,114
|
py
|
#!/usr/bin/env python3
import asyncio
import gi
import logging
import time
gi.require_version("RwDts", "1.0")
from gi.repository import (
RwDts as rwdts,
RwDtsYang,
)
import rift.tasklets
import rift.test.dts
import rwlogger
@asyncio.coroutine
def fake_library_log_call(loop):
logger = logging.getLogger("library")
logger.setLevel(logging.DEBUG)
for _ in range(5):
# Use two seperate lines to bypass duplicate detection in rwlog
logger.debug("library_debug")
logger.debug("library_debug")
@asyncio.coroutine
def fake_threaded_library_log_calls(rwlog, loop):
""" Simulate a library logging messages while running in a seperate thread """
def thread_logs():
with rwlogger.rwlog_root_handler(rwlog):
logger = logging.getLogger("library")
for _ in range(4):
# Use two seperate lines to bypass duplicate detection in rwlog
logger.debug("threaded_library")
logger.debug("threaded_library")
with rwlogger.rwlog_root_handler(rwlog) as rwlog2:
rwlog2.set_category("rw-generic-log")
logger.debug("threaded_nested_library")
# Give the thread a chance to swap out and potentially
# conflict with the test's rwmain logger
time.sleep(.05)
logger.debug("threaded_library")
logger.debug("threaded_library")
yield from loop.run_in_executor(None, thread_logs)
class RwLogTestTasklet(rift.tasklets.Tasklet):
""" A tasklet to test Python rwlog interactions """
def __init__(self, *args, **kwargs):
super(RwLogTestTasklet, self).__init__(*args, **kwargs)
self._dts = None
self.rwlog.set_category("rw-logtest-log")
def start(self):
""" The task start callback """
super(RwLogTestTasklet, self).start()
self._dts = rift.tasklets.DTS(self.tasklet_info,
RwDtsYang.get_schema(),
self.loop,
self.on_dts_state_change)
@asyncio.coroutine
def init(self):
self.log.debug("tasklet_debug")
yield from fake_library_log_call(self.loop)
yield from fake_threaded_library_log_calls(self.rwlog, self.loop)
@asyncio.coroutine
def run(self):
pass
@asyncio.coroutine
def on_dts_state_change(self, state):
switch = {
rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
rwdts.State.CONFIG: rwdts.State.RUN,
}
handlers = {
rwdts.State.INIT: self.init,
rwdts.State.RUN: self.run,
}
# Transition application to next state
handler = handlers.get(state, None)
if handler is not None:
yield from handler()
# Transition dts to next state
next_state = switch.get(state, None)
if next_state is not None:
self.log.debug("Changing state to %s", next_state)
self._dts.handle.set_state(next_state)
|
[
"Leslie.Giles@riftio.com"
] |
Leslie.Giles@riftio.com
|
a637bb7b4a43c4ca0ea61ae7522fab6686801b8d
|
c3084ff4a1e8ec48647ef8f94d608c9ffa18e7b1
|
/codigos/base.py
|
e90fc97cf4c2abcd27b358a91c08d7848445ae23
|
[
"MIT"
] |
permissive
|
fda-tome/n-body-simulations
|
159d0c82646c0d9e80f828b4bc0fdfb67a7f23ff
|
74a0d8bf64bc9bf819fd29d0223c0f3c986bce48
|
refs/heads/main
| 2023-06-27T10:41:44.914303
| 2021-07-28T23:12:33
| 2021-07-28T23:12:33
| 379,115,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,124
|
py
|
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import time
start=time.time()
fig=plt.figure()
fig2=plt.figure()
ax=fig.add_subplot(projection='3d')
ax2=fig2.add_subplot()
def gera_part(sis,v):
x=np.random.rand()/((7/4)**(1/3))
y=np.random.rand()/((7/4)**(1/3))
z=np.random.rand()/((7/4)**(1/3))
vx=np.random.rand()*0
vy=np.random.rand()*0
vz=np.random.rand()*0
part=[x,y,z]
vpart=[vx,vy,vz]
sis.append(part)
v.append(vpart)
sis=[]
d=[]
aux=[]
q=[]
n=4000
nprint=n
mass=1.6605*1e-19
massb=1.6605*1e-27
charge=1.6*1e-19
m=[]
cont_el=0
t=0.0005
nb=10
j=0
v=[]#questionar se seria necessario gerar velocidades randômicas para as partículas
for i in range(n):
gera_part(sis,v)
if(cont_el!=nb):
q.append(charge)
cont_el+=1
m.append(massb)
else:
m.append(mass)
q.append(0)
v=np.array(v)
m=np.reshape(np.array(m),(n,1))
q=np.reshape(np.array(q),(n,1))
v-=np.mean(v*m,0)/np.mean(m)
auxplot=np.array(sis)
ax.scatter(np.reshape(auxplot[:,:-2],n),np.reshape(auxplot[:,1:-1],n),np.reshape(auxplot[:,2:],n),c='b',marker='D',s=m)
ael=[]
G=6*1e-11
c=9*1e9
controle=0
tsum=0
tico=0
teste=[]
tf=[]
start = time.time()
while(controle==0 and j<1):
if(j):
print(t)
anorm=np.sqrt(np.sum(a**2,axis=1))
controle=np.argwhere(anorm<=5e-30).shape[0]
t=0.005/anorm[np.argmax(anorm)]
tsum+=t
tf.append(tsum)
teste.append(anorm[100])
sis=sis.tolist()
print(tico)
tico+=1
aux=[]
aux2=[]
aux3=[]
d=[]
for i in range(n):
aux=np.reshape(n*sis[i],(n,3))
aux=np.array(sis)-aux
d.append(aux)
d=np.array(d)
dnorm=d**2
for i in range(n):
aux2.append(np.sqrt(np.sum(dnorm[i],axis=1)))
dnorm=np.array(aux2)
for i in range(n):
dnorm[i][i]=1
args_coup=np.argwhere(dnorm<=1e-3)
filt=args_coup.shape[0]
for i in range(math.ceil(filt/2)):
args_coup=np.delete(args_coup,np.argwhere(args_coup==[args_coup[i][1],args_coup[i][0]])[0][0],0)
if(filt!=0):
print(args_coup)
for i in range(math.ceil(filt/2)):#neste trecho as partículas são acopladas, porém elas podem interagir de outra forma, perguntar ao vitor.
auxd=[]
indic=args_coup[i][0]
flag_m=2
flag_d=2
if(i<args_coup.shape[0]-1):
while(args_coup[i+1][0]==indic):
if(m[args_coup[i+1][1]][0]>m[args_coup[i][1]][0]):
flag_m=1
elif(m[args_coup[i+1][1]][0]<m[args_coup[i][1]][0]):
flag_m=0
if(dnorm[indic][args_coup[i][1]]>dnorm[indic][args_coup[i+1][1]]):
flag_d=1
elif(dnorm[indic][args_coup[i+1][1]]>dnorm[indic][args_coup[i][1]]):
flag_d=0
if (flag_d==1 and flag_m==1):
args_coup=np.delete(args_coup,i,0)
if (flag_d==0 and flag_m==0):
args_coup=np.delete(args_coup,i+1,0)
if (flag_d==0 and flag_m==1):
if(m[args_coup[i+1][1]][0]>dnorm[indic][args_coup[i][1]]**(-2)):
args_coup=np.delete(args_coup,i,0)
else:
args_coup=np.delete(args_coup,i+1,0)
if (flag_d==1 and flag_m==0):
if(m[args_coup[i][1]][0]>dnorm[indic][args_coup[i+1][1]]**(-2)):
args_coup=np.delete(args_coup,i+1,0)
else:
args_coup=np.delete(args_coup,i,0)
print(args_coup)
for l in range(3):#limpeza de excessos
v[args_coup[i][1]][l]=(m[args_coup[i][1]]*(v[args_coup[i][1]][l]+a[args_coup[i][1]][l]*t)+m[indic]*(v[indic][l]+a[indic][l]*t))/(m[args_coup[i][1]]+m[indic])
v=np.delete(v,indic,0)
sis=np.delete(sis,indic,0).tolist()
m[args_coup[i][1]]+=m[indic]
q[args_coup[i][1]]+=q[indic]
m=np.delete(m,indic,0)
q=np.delete(q,indic,0)
d=np.delete(d,indic,0)
dnorm=np.delete(dnorm,indic,0)
dnorm=np.delete(dnorm,indic,1)
for k in range(n-1):
auxd.append(np.delete(d[k],indic,0))
d=np.array(auxd)
n-=1
args_coup-=1
d=np.reshape(d,(n**2,3))
dnorm=np.reshape(dnorm,(n**2))
for i in range(3):
aux3.append(np.reshape(d[ : ,i:1+i],(n**2))/dnorm**3)
aux3=np.array(aux3)
d=np.reshape(aux3.transpose(),(n,n,3))
a=[]
for i in range(n):
a.append(np.reshape(d[i].transpose()@(G*m),3)-np.reshape(d[i].transpose()@(c*q*q[i][0]/m[i][0]),3))
a=np.array(a)
v+=a*t/2
sis=np.array(sis)+v*t
v+=a*t/2
j+=1
print(time.time() - start)
ax.scatter(np.reshape(sis[:,:-2],n),np.reshape(sis[:,1:-1],n),np.reshape(sis[:,2:],n),c='r',marker='o')
ax2.plot(tf,teste)
plt.show()
|
[
"tomefelipe0@usp.br"
] |
tomefelipe0@usp.br
|
6b371c7be1bc69ec9aa6c2668b074f453b427a01
|
5a3bd18734cc9f8e65f9e90f77bfc67eb0b05921
|
/util_print.py
|
ee68844de827c39fdb97cb865ad7270c9eb922bb
|
[] |
no_license
|
byshen/pycode
|
e027a965b2837147527712a44ea6e49934bd7714
|
3396b198f943e3817d432c9e8dc27ea786879057
|
refs/heads/master
| 2021-01-10T18:44:23.273696
| 2016-10-20T13:14:43
| 2016-10-20T13:14:43
| 71,464,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,575
|
py
|
#!/usr/bin/env python
"""
Use DPKT to read in a pcap file and print out the contents of the packets
This example is focused on the fields in the Ethernet Frame and IP packet
"""
import dpkt
import datetime
import socket
def mac_addr(address):
"""Convert a MAC address to a readable/printable string
Args:
address (str): a MAC address in hex form (e.g. '\x01\x02\x03\x04\x05\x06')
Returns:
str: Printable/readable MAC address
"""
return ':'.join('%02x' % ord(b) for b in address)
def inet_to_str(inet):
"""Convert inet object to a string
Args:
inet (inet struct): inet network address
Returns:
str: Printable/readable IP address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_ntop(socket.AF_INET, inet)
except ValueError:
return socket.inet_ntop(socket.AF_INET6, inet)
def print_packets(pcap):
"""Print out information about each packet in a pcap
Args:
pcap: dpkt pcap reader object (dpkt.pcap.Reader)
"""
# For each packet in the pcap process the contents
for timestamp, buf in pcap:
# Print out the timestamp in UTC
print 'Timestamp: ', str(datetime.datetime.utcfromtimestamp(timestamp))
# Unpack the Ethernet frame (mac src/dst, ethertype)
eth = dpkt.ethernet.Ethernet(buf)
print 'Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type
# Make sure the Ethernet data contains an IP packet
if not isinstance(eth.data, dpkt.ip.IP):
print 'Non IP Packet type not supported %s\n' % eth.data.__class__.__name__
continue
# Now unpack the data within the Ethernet frame (the IP packet)
# Pulling out src, dst, length, fragment info, TTL, and Protocol
ip = eth.data
# Pull out fragment information (flags and offset all packed into off field, so use bitmasks)
do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)
more_fragments = bool(ip.off & dpkt.ip.IP_MF)
fragment_offset = ip.off & dpkt.ip.IP_OFFMASK
# Print out the info
print 'IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\n' % \
(inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)
def test():
"""Open up a test pcap file and print out the packets"""
with open('data/http.pcap', 'rb') as f:
pcap = dpkt.pcap.Reader(f)
print_packets(pcap)
if __name__ == '__main__':
test()
|
[
"ahshenbingyu@163.com"
] |
ahshenbingyu@163.com
|
0e439f661afa48a26d8170c83e3e3f6d261bc440
|
70a08180ee6f60715107860e0c555ba56b74af94
|
/covod/blueprints/web_app.py
|
4f524f56e85db93c3420da1f79af0eb3983e3203
|
[] |
no_license
|
cau-covod/covod-backend
|
3a765edbc8389a9d5e63204a0ad47783fe6b3bba
|
e0cf0436e8f080d5e6e254948ebc93917ec73c99
|
refs/heads/master
| 2021-07-17T02:04:25.543127
| 2020-12-15T19:16:22
| 2020-12-15T19:16:22
| 249,190,515
| 1
| 0
| null | 2021-06-02T01:15:59
| 2020-03-22T13:35:27
|
Python
|
UTF-8
|
Python
| false
| false
| 388
|
py
|
import os
from flask import Blueprint, send_from_directory
bp = Blueprint("web-app", __name__, url_prefix="/")
@bp.route("/", defaults={"path": ""})
@bp.route("/<path:path>")
def serve(path):
if path != "": # and os.path.exists("../web-app/" + path):
return send_from_directory("../web-app", path)
else:
return send_from_directory("../web-app", "index.html")
|
[
"stu210876@mail.uni-kiel.de"
] |
stu210876@mail.uni-kiel.de
|
3ead27a4df00dc38f399d472b56eafd4d2af78bc
|
02cf2e6a4063cd5a584dd98d89304330963c74b9
|
/utils/prepare_vaildOnly.py
|
6bff9cae70810fbe39dfbaa34ae3813d0fd29b6c
|
[] |
no_license
|
uw-cmg/MedicalImgAnalysis-BleedingSitesDetection
|
79c678067c7348233e4e41422715e2d18b937a32
|
79f72281d76a7accbad353e70ffb6d18569ff497
|
refs/heads/master
| 2023-05-05T06:06:00.760846
| 2021-05-24T21:40:05
| 2021-05-24T21:40:05
| 153,353,721
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,722
|
py
|
#!/usr/bin/env python
"""
The preparing scipts that translate all the labeling results to the input for defect detection
In this script, only images with bbox are kept!
"""
"""
Project Information modify if needed
"""
__author__ = "Mingren Shen"
__copyright__ = "Copyright 2019, The medical image analysis Project"
__credits__ = [""]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Mingren Shen"
__email__ = "mshen32@wisc.edu"
__status__ = "Development"
"""
End of Project information
"""
# import libraries
import os
import errno
from shutil import copy
from skimage import io
import csv
import numpy as np
#import matplotlib.pyplot as plt
# global path and prefix or suffix
"""
Metadata for the running of Project
Modify before using
"""
# Directory created
datDir = "data"
imgDir = "IMG"
csvDir = "CSV"
txtDir = "TXT"
"""
Functions
"""
def loopAllImg(imgDir,csvDir,txtDir):
"""
For every JPG image generate TXT if CSV exist if not generate blank TXT
Parameters
----------
imgDir : the directory to store the images
csvDir : the directory to store the csv files
txtDir : the directory to store all the generate txt bbox information
Returns
-------
None
"""
for f in os.listdir(imgDir):
fs = f.split('.')
csv_file = csvDir + "/" + fs[0] + ".csv"
txt_file = fs[0] + ".txt"
if (os.path.exists(csv_file)):
generateTXT(csv_file,txt_file)
else:
with open(txt_file, 'a'): # Create file if does not exist
pass
copy(txt_file, txtDir)
def generateTXT(csvFile,txtFile):
"""
generate TXT from CSV and
Parameters
----------
csvFile : the CSV File that needs to be processed
txtFile : the TXT File that stores the bounding box information
Returns
-------
None
"""
with open(txtFile,'w') as txtfile:
with open(csvFile) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
bbox = calculateBoundingBoxes(0,float(row['X']), float(row['Y']),float(row['Width']),float(row['Height']))
for i in range(0,len(bbox)):
if i == len(bbox) - 1:
txtfile.write("%s \n" % bbox[i])
else:
txtfile.write("%s " % bbox[i])
def calculateBoundingBoxes(label, x, y, w, h):
"""
calculate bounding box information form the center and length, width
Parameters
----------
label : the label of current bbox
x : the x coordinate of center of bbox
y : the y coordinate of center of bbox
w : width of bbox
h : hight of bbox
Returns
-------
list contains the [label,Y1,X1,Y2,X2]
Where (X1,Y1) is the top left point of bbox
(X2,Y2) is the bottom right point of bbox
"""
X1 = x - (w / 2)
Y1 = y - (h / 2)
X2 = x + (w / 2)
Y2 = y + (h / 2)
return [label, round(Y1, 2), round(X1, 2), round(Y2, 2), round(X2, 2)]
def splitFiles_withBBox(datDir,imgDir,csvDir):
"""
pre-processing the files and prepare all needed files
Parameters
----------
datDir : the directory that you store all the data
imgDir : the directory to store the images
csvDir : the directory to store the csv files
Returns
-------
None
"""
for f in os.listdir(datDir):
fs = f.split('.')
if fs[1] == "csv":
copy(datDir+'/'+f, csvDir)
covertTIF2JPG(datDir + '/' + fs[0] + '.tif', fs[0])
copy(fs[0] + '.jpg', imgDir)
def covertTIF2JPG(imgsource,imgName):
"""
Convert source TIF image to JPG image
Parameters
----------
imgsource : the TIF source image
imgName : the target image name
Returns
-------
None
"""
img = io.imread(imgsource)
io.imsave(imgName + ".jpg", img, quality=100)
def createFolder(folderName):
"""
Safely create folder when needed
Parameters
----------
folderName : the directory that you want to safely create
Returns
-------
None
"""
if not os.path.exists(folderName):
try:
os.makedirs(folderName)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
if __name__ == '__main__':
print("start the pre-processing scripts")
print("Initialization")
createFolder(imgDir)
createFolder(csvDir)
createFolder(txtDir)
print("move files to separated files")
splitFiles_withBBox(datDir, imgDir, csvDir)
print("generate bbox from CSV \n and pair every JPG with TXT")
loopAllImg(imgDir, csvDir, txtDir)
|
[
"mshen32@wisc.edu"
] |
mshen32@wisc.edu
|
8da02b46c15c892baa4bb3c427ce8bae3a5481f9
|
2a4de474b5dbfaa81a4374660f43395405d4e44a
|
/motors2.py
|
1b4010719a2017cdbd87a56ab74b27eca996cfc9
|
[] |
no_license
|
amaclean2/robots
|
dfb9e990aa42a925297bda78df964b44ffdae17d
|
77144138a0ddd3d8db85c39f908d6660083dde7e
|
refs/heads/master
| 2023-06-20T14:00:04.353324
| 2021-07-22T22:02:24
| 2021-07-22T22:02:24
| 362,583,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
import RPi.GPIO as GPIO
import pigpio
import argparse
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
pi = pigpio.pi()
ESC_GPIO = 13
parser = argparse.ArgumentParser()
parser.add_argument("--calibrate", help="set the calibration mode of the esc")
args = parser.parse_args()
print("calibrate ", args.calibrate)
if args.calibrate == "c" :
# calibrating the ESC
pi.set_servo_pulsewidth(ESC_GPIO, 2000)
print("calibrate_hi")
sleep(2)
pi.set_servo_pulsewidth(ESC_GPIO, 1000)
print("calibrate_lo")
sleep(2)
elif args.calibrate == "t" :
for speed in range(6) :
pi.set_servo_pulsewidth(ESC_GPIO, speed * 1000 / 7 + 1000)
print("running speed: ", speed * 1000 / 7 + 1000)
sleep(2)
pi.set_servo_pulsewidth(ESC_GPIO, 0)
sleep(1)
else :
speed = 3.0
pi.set_servo_pulsewidth(ESC_GPIO, speed * 1000 / 7 + 1000)
print("testing speed: ", speed)
sleep(2)
pi.set_servo_pulsewidth(ESC_GPIO, 0)
sleep(2)
pi.set_servo_pulsewidth(ESC_GPIO, speed * 1000 / 7 + 1000)
print("testing speed: ", speed)
sleep(2)
# shutting everything down
pi.set_servo_pulsewidth(ESC_GPIO, 0)
pi.stop()
print("done")
|
[
"andrew.n.maclean@gmail.com"
] |
andrew.n.maclean@gmail.com
|
be8b62d5ae26e8ae7d0be239c62bcceeb0b2bcda
|
ca580577c35d70758afcd44671fedc709dccab59
|
/tools/matplot.py
|
2269d65d63dc3e96282755e74fc88691b315e291
|
[] |
no_license
|
Mittttttto/data_mining
|
d4b09645d6369f5983be2db8418ce3c7f3263834
|
5ced845a4015de115002c28b76a7c4113fd69ed1
|
refs/heads/master
| 2021-08-12T00:49:10.417422
| 2017-11-14T07:34:07
| 2017-11-14T07:34:07
| 109,941,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
#coding=utf-8
'''
Created on 2017年11月6日
@author: Wentao Mao
'''
import matplotlib.pyplot as plt
import random
def drawlines(xs,ys,labels=[],xlabel_name="",ylable_name=""):
if len(labels) != 0:
for i in range(0,len(xs)):
color="#"+str(hex(random.randint(1048576,16777215)))[2:]
plt.plot(xs[i],ys[i],color=color,label=labels[i])
else :
for i in range(0,len(xs)):
color="#"+str(hex(random.randint(1048576,16777215)))[2:]
plt.plot(xs[i],ys[i],color=color)
plt.legend(loc='upper right')
if xlabel_name:
plt.xlabel(xlabel_name)
if ylable_name:
plt.ylabel(ylable_name)
#辅助线
plt.grid(True)
# 展示
plt.show()
if __name__ == '__main__':
xs=[[1,2,3,4],[1,2,3,4],[1,2,3,4]]
ys=[[1,1,1,1],[1,2,3,4],[3,3,2,3]]
labels=["s1","s2","s3","s4"]
drawlines(xs,ys,labels,xlabel_name="x",ylable_name="y")
import numpy as np
x=np.arange(1,10,0.05)
xs=[]
xs.append(x)
ys=[]
ys.append(x**2)
drawlines(xs,ys,labels,xlabel_name="x",ylable_name="y")
|
[
"376901333@qq.com"
] |
376901333@qq.com
|
08bf084f5b32dcea69b751fca859a611b861dedb
|
f85b02c5d0e7bcbeb6b67025457f876fbf0bf4d0
|
/websiteInformationGathering.py
|
087a3467758043e4b76cebfbcbba3aadca64eb03
|
[] |
no_license
|
MrZello/basic-web-scraper
|
e88160ff4652f657f2be86fb37a6416586d7fdc7
|
7ef959e9060f08c2271201106ab8733e80b7b223
|
refs/heads/main
| 2023-08-25T11:26:56.720426
| 2021-10-28T00:13:45
| 2021-10-28T00:13:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
import re
import requests
##url = 'https://www.secdaemons.org/'
url = 'https://www.cdw.com/content/cdw/en/locations.html'
infile = open('WebPath.txt')
txt = infile.read()
txt = txt.split()
infile.close()
sitePath = [] # in the case for multiple paths found, I want em saved
for each in txt:
resp = requests.get(url + each)
if (resp.status_code == 200):
print(f'Successful link:', url+each)
sitePath.append(url+each)
email = '\w+\@[\w+\.]+\w{3}'
phone = '\(?\d{3}\)?\-?\ ?\.?\d{3}\.?\-?\ ?\d{4}'
for i in sitePath:
site = requests.get(i)
html = site.text
print(re.findall(phone, html))
print(re.findall(email, html))
|
[
"pateltejendra23@gmail.com"
] |
pateltejendra23@gmail.com
|
bd091c89b075facb127eaf0ab26726accd824fb3
|
90116e2643e44b5b4d209af247f2f748f444b1fd
|
/dthm4kaiako/general/management/commands/sample_data.py
|
2626d99949a63c7e2f215491077809fe35c56f9f
|
[
"MIT",
"CC-BY-4.0",
"CC-BY-2.0"
] |
permissive
|
uccser/dthm4kaiako
|
671451b52eaf7f8a7c06e55c8f46256822e6324b
|
e777619e168530fdf86e363b0139615e9d4f624c
|
refs/heads/develop
| 2023-08-23T18:11:18.687381
| 2023-08-23T01:52:58
| 2023-08-23T01:52:58
| 94,200,639
| 5
| 4
|
NOASSERTION
| 2023-09-14T01:29:13
| 2017-06-13T10:25:10
|
Python
|
UTF-8
|
Python
| false
| false
| 7,393
|
py
|
"""Module for the custom Django sample_data command."""
import csv
import random
from django.core import management
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.gis.geos import Point
from allauth.account.models import EmailAddress
from tests.users.factories import EntityFactory
from resources.models import (
Language,
TechnologicalArea,
ProgressOutcome,
YearLevel,
CurriculumLearningArea,
)
from tests.resources.factories import (
ResourceFactory,
NZQAStandardFactory,
)
# Events
from events.models import (
Location,
Series,
)
from tests.events.factories import (
EventFactory,
)
# DTTA
from tests.dtta.factories import (
NewsArticleFactory,
PageFactory,
ProjectFactory,
RelatedLinkFactory,
)
# POET
from tests.poet.factories import (
POETFormResourceFactory,
POETFormSubmissionFactory,
POETFormProgressOutcomeGroupFactory,
)
class Command(management.base.BaseCommand):
"""Required command class for the custom Django sample_data command."""
help = "Add sample data to database."
def handle(self, *args, **options):
"""Automatically called when the sample_data command is given."""
if settings.PRODUCTION_ENVIRONMENT:
raise management.base.CommandError(
'This command can only be executed on non-production website or local development.'
)
# Clear all data
management.call_command('flush', interactive=False)
print('Database wiped.')
User = get_user_model()
# Create admin account
admin = User.objects.create_superuser(
'admin',
'admin@dthm4kaiako.ac.nz',
password=settings.SAMPLE_DATA_ADMIN_PASSWORD,
first_name='Admin',
last_name='Account'
)
EmailAddress.objects.create(
user=admin,
email=admin.email,
primary=True,
verified=True
)
print('Admin created.')
# Create user account
user = User.objects.create_user(
'user',
'user@dthm4kaiako.ac.nz',
password=settings.SAMPLE_DATA_USER_PASSWORD,
first_name='Alex',
last_name='Doe'
)
EmailAddress.objects.create(
user=user,
email=user.email,
primary=True,
verified=True
)
print('User created.')
# Create entities
EntityFactory.create_batch(size=10)
print('Entities created.')
# Resources
Language.objects.create(name='English', css_class='language-en')
Language.objects.create(name='Māori', css_class='language-mi')
print('Languages created.')
curriculum_learning_areas = {
'English': 'english',
'Arts': 'arts',
'Health and physical education': 'health-pe',
'Learning languages': 'languages',
'Mathematics and statistics': 'mathematics',
'Science': 'science',
'Social sciences': 'social-sciences',
'Technology': 'technology',
}
for area_name, area_css_class in curriculum_learning_areas.items():
CurriculumLearningArea.objects.create(
name=area_name,
css_class=area_css_class,
)
print('Curriculum learning areas created.')
ta_ct = TechnologicalArea.objects.create(
name='Computational thinking',
abbreviation='CT',
css_class='ta-ct',
)
for i in range(1, 9):
ProgressOutcome.objects.create(
name='Computational thinking - Progress outcome {}'.format(i),
abbreviation='CT PO{}'.format(i),
technological_area=ta_ct,
css_class='po-ct',
)
ta_dddo = TechnologicalArea.objects.create(
name='Designing and developing digital outcomes',
abbreviation='DDDO',
css_class='ta-dddo',
)
for i in range(1, 7):
ProgressOutcome.objects.create(
name='Designing and developing digital outcomes - Progress outcome {}'.format(i),
abbreviation='DDDO PO{}'.format(i),
technological_area=ta_dddo,
css_class='po-dddo',
)
print('Technological areas created.')
print('Progress outcomes created.')
NZQAStandardFactory.create_batch(size=20)
for i in range(0, 14):
YearLevel.objects.create(
level=i
)
print('NZQA standards created.')
ResourceFactory.create_batch(size=20)
print('Resources created.')
# Events
event_series = {
(
'Computer Science for High Schools',
'CS4HS',
),
(
'Computer Science for Primary Schools',
'CS4PS',
),
(
'Computer Science for Professional Development',
'CS4PD',
),
(
'Code Club for Teachers',
'CC4T',
),
}
for (name, abbreviation) in event_series:
Series.objects.create(
name=name,
abbreviation=abbreviation,
)
print('Event series created.')
region_codes = dict()
region_suffix = ' region'
for (code, name) in Location.REGION_CHOICES:
if name.endswith(region_suffix):
name = name[:-len(region_suffix)]
region_codes[name] = code
with open('general/management/commands/sample-data/nz-schools.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in random.sample(list(reader), 100):
if row['Longitude'] and row['Latitude'] and row['Region']:
Location.objects.create(
room='Room A',
name=row['Name'],
street_address=row['Street'],
suburb=row['Suburb'],
city=row['City'],
region=region_codes[row['Region']],
coords=Point(
float(row['Longitude']),
float(row['Latitude'])
),
)
print('Event locations created.')
EventFactory.create_batch(size=50)
print('Events created.')
# DTTA
NewsArticleFactory.create_batch(size=20)
print('DTTA news articles created.')
PageFactory.create_batch(size=5)
print('DTTA pages created.')
ProjectFactory.create_batch(size=5)
print('DTTA projects created.')
RelatedLinkFactory.create_batch(size=10)
print('DTTA related links created.')
# POET
management.call_command('load_poet_data')
POETFormResourceFactory.create_batch(size=20)
print('POET resources created.')
POETFormProgressOutcomeGroupFactory.create_batch(size=6)
print('POET progress outcome groups created.')
POETFormSubmissionFactory.create_batch(size=800)
print('POET submissions created.')
|
[
"jackmorgannz@gmail.com"
] |
jackmorgannz@gmail.com
|
8b108f840fa436c4c01ce3a4081400c6b630b4ef
|
615471d97f8db246bb67a8a4302438e5ba8c11e9
|
/vectorhub/encoders/text/vectorai/vi_encoder.py
|
7e4514bc6135f34b8ea086c7546cce1f2283782c
|
[
"Apache-2.0"
] |
permissive
|
huamichaelchen/vectorhub
|
16cf154d2eed3a5df84c44ad89e19d6d550c9d2b
|
5c041c7046f613989ba8ae1d9e4e6e3db110a684
|
refs/heads/main
| 2023-02-05T16:02:49.431577
| 2020-12-28T15:41:43
| 2020-12-28T15:41:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
"""
Vector AI's deployed model. The purpose of this model is to allow developers to easily build encodings and see for themselves
how the embedding works. These models are selected to work out-of-the-box after testing for their success on our end.
To get access to Vector AI, we need to use
Example:
>>> from vectorhub.text.encoder.vectorai import ViText2Vec
>>> model = ViText2Vec(username, api_key)
>>> model.encode("Hey!")
>>> model.bulk_encode(["hey", "stranger"])
"""
import io
import base64
import numpy as np
import requests
from abc import abstractmethod
from typing import List, Union
from ..base import BaseText2Vec
from ....base import catch_vector_errors
class ViText2Vec(BaseText2Vec):
def __init__(self, username, api_key, url=None, collection_name="base"):
"""
Request for a username and API key from gh.vctr.ai!
"""
self.username = username
self.api_key = api_key
if url:
self.url = url
else:
self.url = "https://api.vctr.ai"
self.collection_name = collection_name
self._name = "default"
@catch_vector_errors
def encode(self, text: Union[str, List[str]]):
"""
Convert text to vectors.
"""
if isinstance(text, str):
return requests.get(
url="{}/collection/encode_text".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": self.collection_name,
"text": text,
},
).json()
elif isinstance(text, list):
return self.bulk_encode(text)
@catch_vector_errors
def bulk_encode(self, texts: List[str]):
"""
Bulk convert text to vectors
"""
return requests.get(
url="{}/collection/bulk_encode_text".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": self.collection_name,
"texts": texts,
}
).json()
@property
def __name__(self):
if self._name is None:
return "deployed_text"
return self._name
@__name__.setter
def __name__(self, value):
self._name = value
|
[
"jacky.koh@vylar.org"
] |
jacky.koh@vylar.org
|
7df8e7dcb7497bbd8e8adace4d40f0421fc8508e
|
ebc45124b5efbeb3c28ca68486d7cb03e470fa7a
|
/management/views.py
|
12ed387c9631b57a84e797bf3b5376bff669656a
|
[] |
no_license
|
548893684/dormitory
|
a531c900f4c096761e863f21f11558bfe5a17005
|
5b772de6b4b8cce7b1181a7b13a5f3f36631e1b9
|
refs/heads/master
| 2022-09-27T00:43:40.415902
| 2020-05-23T13:22:21
| 2020-05-23T13:22:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
from django.views.generic.base import View
from .models import Room
from django.core import serializers
from django.http import JsonResponse
# 二级联动View函数
class SelectView(View):
def get(self, request):
# 通过get得到父级选择项
typeparent_id = request.GET.get('module', '')
# 筛选出符合父级要求的所有子级,因为输出的是一个集合,需要将数据序列化 serializers.serialize()
typesons = serializers.serialize("json", Room.objects.filter(room_building=int(typeparent_id)))
# 判断是否存在,输出
if typesons:
return JsonResponse({'typeson': typesons})
|
[
"573248399@qq.com"
] |
573248399@qq.com
|
5ad084261fdef93083afbc9fea4120cc2cc38952
|
aafa404d3d2eb9c61b4cfe54dbbb5fecf4c8bcd3
|
/create_config_file.py
|
07152a13602d62ccbb194ff564a5f99f091db22d
|
[] |
no_license
|
JHoweWowe/RuneLiteBot
|
a336f90254562c225dfde986bff506a27788bab1
|
5a3a2238b67b2027996ba349cc6545dc7d27efec
|
refs/heads/master
| 2023-07-06T19:29:15.321867
| 2021-08-05T16:59:20
| 2021-08-05T16:59:20
| 381,971,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# This file should only be used if configuration file doesn't work :(
from configparser import ConfigParser
config = ConfigParser()
config['settings'] = {
'bot_type': 'woodcutting',
'woodcutting_find_tree_attempts': '20',
'chop_and_drop_time_seconds': '15',
'drop_number_of_logs_per_cycle': '4',
}
with open('./dev.ini', 'w') as f:
config.write(f)
|
[
"howejust@gmail.com"
] |
howejust@gmail.com
|
b240df976221a90cb0c2acb49f4ccc4f81d22fbd
|
197420c1f28ccb98059888dff214c9fd7226e743
|
/adaptive_python_cource/82.Char_by_number_1/82.char_by_number_1.py
|
7687c9695c677c2255a94afcbe5a8fa875877c5e
|
[] |
no_license
|
Vovanuch/python-basics-1
|
fc10b6f745defff31364b66c65a704a9cf05d076
|
a29affec12e8b80a1d3beda3a50cde4867b1dee2
|
refs/heads/master
| 2023-07-06T17:10:46.341121
| 2021-08-06T05:38:19
| 2021-08-06T05:38:19
| 267,504,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
'''
Implement the program that returns the letter, which stands after the "\" (backslash) symbol in the ASCII table by a given distance N.
Sample Input 1:
26
Sample Output 1:
v
Sample Input 2:
32
Sample Output 2:
|
'''
n = int(input().strip())
ch = chr(ord('\\') + n)
print(ch)
|
[
"vetohin.vladimir@gmail.com"
] |
vetohin.vladimir@gmail.com
|
7058e2a67c748387a97454219ae8dfc4c3aa410c
|
84bbee73c747763364656b70932a8956271a3dc9
|
/general/dictionary.py
|
b7ba74b83a7bf6388346eb2639213450661a5e54
|
[] |
no_license
|
Nkaka23dev/data-structure-python
|
51b7c99de78c02a79fee294d54136d38a3f59083
|
3093ab0ed156f78d917fb76abacf738c6a033f67
|
refs/heads/master
| 2023-08-18T00:11:05.374168
| 2021-09-23T13:08:24
| 2021-09-23T13:08:24
| 339,941,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
posts={
"author":"Nkaka Eric",
"title":"Greetings",
"body":"Hello all my people how are you.",
"posted_date":"26 June 2020"
}
# #two ways of getting value assigned to a certain key.
# print(posts['author'])
# print(posts.get('title'))
# #getting all keys or values
# print(posts.keys())
# print(posts.values())
# #adding and modifying key and value
# posts["author"]="Shyaka willy"
# posts["viewers"]=["Nkaka,eric","Kubwimana Mourice","Kazitunga"]
# print(posts.keys())
#getting keys and value at the same time use items()
# print(posts.items())
#Removing items we have pop(),popitem() and del
# print(posts.pop("author"))
# print(posts)
# #this remove the last item
# print(posts.popitem())
# print(posts)
# #delete or clear all items of the list
# del posts
# posts.clear()
# print(posts)
# # loop in dictionary
# for key,post in posts.items():
# print(key+":"+post)
# # copying dictionary
# new_post=posts
# print(new_post)
#or
copied=posts.copy()
print(copied)
|
[
"62412678+Nkaka23dev@users.noreply.github.com"
] |
62412678+Nkaka23dev@users.noreply.github.com
|
00f7d91ff741fec35b98f785ed21c2febd03c95a
|
5fe78169aeae80c6ff43c26930eeda7c8f9978ba
|
/MultiNodeSetup/scripts/run.py
|
ba087ea8240a2fb64c2bc7acb2c004220ab3a90d
|
[] |
no_license
|
medhak19/HF-On-K8S
|
cfc3740602b6c9ff2c9aa0fc14314c6544318e08
|
d6c45b7c392796003c8abf0aa508d0c21d5ef8c1
|
refs/heads/master
| 2020-08-01T02:51:52.265919
| 2019-11-05T06:55:23
| 2019-11-05T06:55:23
| 210,834,198
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,953
|
py
|
import os
import time
BASEDIR = os.path.dirname(__file__)
ORDERER = os.path.join(BASEDIR, "../crypto-config/ordererOrganizations") # it must point to the ordererOrgnaizations dir
PEER = os.path.join(BASEDIR, "../crypto-config/peerOrganizations") # it must point to the peerOrgnaizations dir
DESTDIR = os.path.join(BASEDIR, "../deploy-yamls");
### order of run ###
#### orderer
##### namespace(org)
###### single orderer
#### peer
##### namespace(org)
###### ca
####### single peer
def runNamespaces(path):
orgs = os.listdir(path)
for org in orgs:
#print ("Organization : " + org);
if "namespace" in org:
nsYaml = os.path.join(path, org ) #orgYaml namespace.yaml
print ("Orderer YAML: " + nsYaml);
checkAndRun(nsYaml)
time.sleep(5);
def runOrderers(path):
orgs = os.listdir(path)
for org in orgs:
#print ("Organization : " + org);
if ("orderer" in org ) and ("namespace" not in org ):
ordererYaml = os.path.join(path, org )
print ("Orderer YAML: " + ordererYaml);
checkAndRun(ordererYaml)
time.sleep(15);
def runPeers(path):
orgs = os.listdir(path)
print ("Get peer YAMLs..........") ;
for org in orgs:
if "peer" in org:
peerYaml = os.path.join(path, org )
print ("Peer YAML: " + peerYaml);
checkAndRun(peerYaml)
time.sleep(15);
print ("Get cli YAMLs..........") ;
for org in orgs:
if "cli" in org:
cliYaml = os.path.join(path, org )
print ("cli YAML: " + cliYaml);
checkAndRun(cliYaml)
time.sleep(10);
def checkAndRun(f):
if os.path.isfile(f):
os.system("kubectl create -f " + f)
else:
print("file %s no exited"%(f))
if __name__ == "__main__":
print ("Run namespaces.........") ;
runNamespaces(DESTDIR)
print ("Run orderers.........") ;
runOrderers(DESTDIR)
print ("Run peers..........") ;
runPeers(DESTDIR)
|
[
"medhak19@gmail.com"
] |
medhak19@gmail.com
|
0e886a4d40a9e5b4c9fe1dcc857b10bafca18967
|
b4de5f08a92ef804c911f60336ebace7282e2783
|
/innerClass.py
|
9c1890df0c7164820f88c9fdda0d0b4b79a51591
|
[] |
no_license
|
RachanaDontula/beginner-learning
|
201fc6fdf4afa0b47ce91242c6196ec914fd2609
|
f7d5e92c5be95ebc2fc33991e71fdc61bb203d02
|
refs/heads/main
| 2023-01-31T13:23:18.953493
| 2020-12-18T13:59:23
| 2020-12-18T13:59:23
| 322,000,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
""" we can create object of inner class outside the
inner class and inside the outer class
or
we can create object of inner class outside the outer class
but by using outer class name to call it, eg: Student.Laptop()
"""
class Student:
def __init__(self, name, rollno):
self.name = name
self.rollno = rollno
# creating inner class object outside inner
# class and inside outer class
self.lap = self.Laptop()
# show function helps in place of print function outside class or function
def show(self):
print(self.name, self.rollno)
self.lap.show()
# creating inner class
class Laptop:
def __init__(self):
self.config = 'i5'
self.ram = 8
self.gen = 7
def show(self):
print(self.config, self.ram, self.gen)
s1 = Student('Rachana',4)
s2 = Student('Sony', 27)
s1.show()
s2.show()
print(id(s1))
print(id(s2))
# creating inner class object outside
# outer class using outer class name to call it
"""
lap1 = Student.Laptop()
"""
|
[
"noreply@github.com"
] |
RachanaDontula.noreply@github.com
|
741dd3a381d4895e9db8da9f7b3a3cc655a75898
|
e9bf5fb440305c7b17935438fd515ca2541babc4
|
/app.py
|
66c2e919c287c39720a77384ffaff4bf30654c3c
|
[] |
no_license
|
jyothiesai/guvij
|
748518673509d4849803fc22b03cd6b2b0b3392f
|
fdcd29f5548c12095f259ff2f74024317787b110
|
refs/heads/master
| 2020-04-16T01:42:44.848569
| 2019-08-07T13:24:44
| 2019-08-07T13:24:44
| 165,183,822
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
#jyothi
A=raw_input()
B=raw_input()
C=raw_input()
for i in range(1,C+1):
ap=((2*A+((C-1)*B))*C)//2
print(ap)
|
[
"noreply@github.com"
] |
jyothiesai.noreply@github.com
|
9fcfd57118f55b03c24f90b9e897a4733c0869a9
|
fd72c01cb6b3d32995d8e82151d230f805022b9f
|
/misc/grd_utils.py
|
1dcb8a7984913a735dfb0e971d491134923c7f48
|
[
"MIT"
] |
permissive
|
zokooo/Sub-GC
|
1093a51bb19a30283803d0e72fe4264e34a18b37
|
b99ede5163be8378d56b834a66b702b23a76e4e2
|
refs/heads/master
| 2023-05-25T10:43:29.245025
| 2021-06-16T17:36:52
| 2021-06-16T17:36:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,583
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
import json
import random
import time
import os
import sys
def get_grounding_material(infos_path, data, sents, sorted_subgraph_ind, att_weights, sort_ind, \
wd_to_lemma, lemma_det_id_dict, det_id_to_det_wd, \
grd_output, use_full_graph=False, grd_sGPN_consensus=True):
'''
Make up the material that is required by grounding evaluation protocol:
find the object region / graph node that has maximum attention weight for each noun word
'''
# for simplicity, just load the graph file again
f_img_id = data['infos'][0]['id']
mask_path = 'data/flickr30k_graph_mask_1000_rm_duplicate/'+str(f_img_id)+'.npz'
sg_path = 'data/flickr30k_sg_output_64/'+str(f_img_id)+'.npz'
img_wh = np.load('data/flickr30k_img_wh.npy',allow_pickle=True,encoding='latin1').tolist()
w, h = img_wh[f_img_id]
bbox = np.load(sg_path,allow_pickle=True,encoding='latin1')['feat'].tolist()['boxes'] # bbox from SG detector
boxes = bbox * max(w, h) / 592 # resize box back to image size
# select best subgraph / sentence to evaluate
subg_index = 0
if grd_sGPN_consensus: # if True, select the sentence ranked by sGPN+consensus; if False, select best sentence ranked by sGPN
model_path = infos_path.split('/')
consensus_rerank_file = model_path[0] + '/' + model_path[1] + '/consensus_rerank_ind.npy'
rerank_ind = np.load(consensus_rerank_file,allow_pickle=True,encoding='latin1').tolist()
subg_index = rerank_ind[f_img_id][0]
sent_used = sents[subg_index]
grd_wd = sent_used.split()
if not use_full_graph: # sub-graph captioning model
# select best sub-graphs ranked by sGPN or sGPN+consensus-reranking
best_subgraph_ind = sorted_subgraph_ind[subg_index].item() + 5 # the index in sampled sub-graph; first 5 are ground-truth sub-graph
graph_mask = np.load(mask_path,allow_pickle=True,encoding='latin1')['feat'].tolist()['subgraph_mask_list'][best_subgraph_ind]
obj_ind_this = graph_mask[1].nonzero()[0] # the index in full graph
att2_ind = torch.max(att_weights.data[sort_ind[subg_index].item()], dim=1)[1][:len(grd_wd)] # get maximum attention index for each word position
else: # model that use full scene graph
obj_ind_this = np.arange(36).astype('int')
att2_ind = torch.max(att_weights.data[subg_index], dim=1)[1][:len(grd_wd)] # get maximum attention index for each word position
# sentence wd -> lemma -> whether lemma can be matched to detection class words -> if yes, get the detection class name
tmp_result = {'clss':[], 'idx_in_sent':[], 'bbox':[]}
for wd_j in range(len(grd_wd)):
if grd_wd[wd_j] not in wd_to_lemma.keys():
print('\n\n{} is not in wd_to_lemma\n\n'.format(grd_wd[wd_j]))
continue
lemma = wd_to_lemma[grd_wd[wd_j]]
if lemma in lemma_det_id_dict: # lemma_det_dict: 478 detection classes, key is word, value is class id
# att2_ind --> find the subgraph object --> find its position in full graph --> get box
tmp_result['bbox'].append(boxes[obj_ind_this[att2_ind[wd_j]]].tolist()) # bounding box corresponding to maximum attention
tmp_result['clss'].append(det_id_to_det_wd[lemma_det_id_dict[lemma]]) # detection class word
tmp_result['idx_in_sent'].append(wd_j)
grd_output[f_img_id].append(tmp_result)
|
[
"yzhong52@wisc.edu"
] |
yzhong52@wisc.edu
|
0ab6d700dc31f0dbf919f3e4e06303f7b31b39bb
|
8161c66001d548424d47334317d5d6d36715a474
|
/lib/utils.py
|
936fd000a46012a94ef374eeb36da04a579680f9
|
[] |
no_license
|
PenXLa/Machine-Learning-Portable
|
50e255d2fd2db5fe176dce491aafcbcd47d9872f
|
b05cea5e0c61eea9585cd3a9d16792f80ea48fd8
|
refs/heads/main
| 2023-08-30T11:53:02.380286
| 2021-11-10T06:12:32
| 2021-11-10T06:12:32
| 423,283,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,107
|
py
|
import os
from pathlib import Path
from tqdm import tqdm
# 获取当前运行环境
in_colab = False
num_workers = 0
try:
import google.colab as colab # 在Colab上
colab.drive.mount("/content/drive", force_remount=True) #装载google drive
in_colab = True
num_workers = 2
except:
pass
# 路径相关配置
root_path = Path(__file__).parents[1] # 项目根目录
data_path = root_path / "data" # 数据目录
models_path = root_path / "models" # 模型目录
# 持久化目录。在本地就是项目的目录,在colab就是google drive
root_drive = root_path
data_drive = data_path
models_drive = models_path
if in_colab:
root_drive = Path("/content/drive/MyDrive/Machine-Learning-Portable Sync") # google drive 同步根目录
data_drive = root_drive / "data"
models_drive = root_drive / "models"
# 从kaggle下载数据
# 若不提供path,默认下载到data目录下
# 返回下载文件的路径
# 【Warning】由于kaggle api的限制,没有获取真实的文件名。这里单纯地将{key}.zip作为了文件名。不知道会不会出现错误。
def kaggle_download(key, path=data_path):
import subprocess
# 如果是 Colab 上,检查kaggle是否配置
kaggle_path = os.path.expanduser("~/.kaggle/kaggle.json")
if in_colab and not os.path.exists(kaggle_path):
Path(kaggle_path).parent.mkdir(parents=True, exist_ok=True)
from shutil import copyfile
copyfile("/content/drive/MyDrive/kaggle.json", kaggle_path)
subprocess.call(["kaggle", "competitions", "download", "-c", key, "-p", path])
return os.path.join(path, f"{key}.zip")
def unzip(file, targetdir):
from zipfile import ZipFile
with ZipFile(file) as zip_ref:
for file in tqdm(zip_ref.namelist(), desc="Unzip"):
zip_ref.extract(member=file, path=targetdir)
def unrar(file, targetdir):
from rarfile import RarFile
with RarFile(file) as rf:
for file in tqdm(rf.namelist(), desc="Unrar"):
rf.extract(member=file, path=targetdir)
# 如果不提供解压路径,则解压到同名新文件夹里
def extractAll(file, targetdir=None, delete=False):
file = Path(file)
if not file.exists():
raise RuntimeError("Compressed file not exists")
if targetdir is None:
targetdir = Path(file).parent / file.stem
Path(targetdir).mkdir(parents=True, exist_ok=True)
if file.suffix.lower() == ".rar":
unrar(file, targetdir)
elif file.suffix.lower() == ".zip":
unzip(file, targetdir)
else:
raise RuntimeError("Unsupported format")
if delete:
os.remove(file)
# 从kaggle下载数据并解压
# 文件解压后,放到data/dir_name中(也就是说只能放到data中。为了方便放弃了一定的自由度)
# 若不提供dir_name,则与key同名。
# 返回解压目录
def kaggle_download_extract(key, dir_name=None):
if dir_name is None:
dir_name = key
zipfile = kaggle_download(key, data_path) # 临时下载到data目录
extractAll(zipfile, os.path.join(data_path, dir_name), delete=True)
# 定时函数。如果距离上次此函数返回true时间超过t秒,就返回true。
# channel是定时通道,不同的通道计时独立。channel可以是任意类型
_last_t = {}
def time_passed(t, channel=None):
global _last_t
import time
now = time.time()
if channel not in _last_t:
_last_t[channel] = now
if now - _last_t[channel] >= t:
_last_t = now
return True
else:
return False
# 定距函数。每调用n次就返回一次true。
_loop_counter = {}
def loop_passed(n, channel=None):
global _loop_counter
if channel not in _loop_counter:
_loop_counter[channel] = 0
if _loop_counter[channel] < n-1:
_loop_counter[channel] += 1
return False
else:
_loop_counter[channel] = 0
return True
# 若文件夹不存在,创建文件夹
def mksure(path):
p = Path(path)
if not p.is_dir():
p = p.parent
p.mkdir(parents=True, exist_ok=True)
return path
|
[
"921417536@qq.com"
] |
921417536@qq.com
|
2e7e22b17d66826aa86e2735ef5ee4ca5b4eaf57
|
7f097c8aafba8ea79b7d2e5277fa3c8e1826b6b7
|
/app.py
|
9f9c15141cb32b5ef4be221f6e7f090f986037d0
|
[] |
no_license
|
itilakGH/surfs_up
|
50dcdf8a268f6a87c5437ffb8b7de746388cdd1d
|
5b58deb9b7a5bd2662a043bf860272a0252de0c3
|
refs/heads/master
| 2022-07-03T23:16:25.160644
| 2020-05-10T18:56:20
| 2020-05-10T18:56:20
| 261,632,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,112
|
py
|
import datetime as dt
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#import sqlite3
engine = create_engine('sqlite:///hawaii.sqlite')
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
app = Flask(__name__)
@app.route("/")
def welcome():
return(
'''
Welcome to the Climate Analysis API!\n
Available Routes:\n
/api/v1.0/precipitation\n
/api/v1.0/stations\n
/api/v1.0/tobs\n
/api/v1.0/temp/start/end\n
'''
)
@app.route("/api/v1.0/precipitation")
def precipitation():
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
precipitation = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= prev_year).all()
precip = {date: prcp for date, prcp in precipitation}
return jsonify(precip)
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.station).all()
stations = list(np.ravel(results))
return jsonify(stations)
@app.route("/api/v1.0/tobs")
def temp_monthly():
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.tobs).\
filter(Measurement.station == 'USC00519281').\
filter(Measurement.date >= prev_year).all()
temps = list(np.ravel(results))
return jsonify(temps)
@app.route("/api/v1.0/temp/<start>")
@app.route("/api/v1.0/temp/<start>/<end>")
def stats(start=None, end=None):
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
if not end:
results = session.query(*sel).\
filter(Measurement.date <= start).all()
temps = list(np.ravel(results))
return jsonify(temps)
results = session.query(*sel).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
temps = list(np.ravel(results))
return jsonify(temps)
|
[
"irentilak@gmail.com"
] |
irentilak@gmail.com
|
c99f08813a8dcb71e251730f8b08e6097aec3447
|
ef904b441a35e3541f62f3a8745d306604e326d2
|
/profiles/migrations/0005_auto_20170709_1446.py
|
aa141641e76cbcc90ea422635248f4721d8bf1aa
|
[] |
no_license
|
dayoadeyemi/profile_displays
|
51499436714433b55d384b090ca188d66debfc6e
|
ec1d6120fa24983abcec15b14e03a0152b120bf7
|
refs/heads/master
| 2020-12-02T06:39:12.517477
| 2017-07-14T18:07:49
| 2017-07-14T18:07:49
| 96,870,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-09 13:46
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0004_auto_20170709_1430'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='client_types',
),
migrations.AddField(
model_name='profile',
name='client_types',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=256), default=[], size=None),
preserve_default=False,
),
migrations.RemoveField(
model_name='profile',
name='consultation_types',
),
migrations.AddField(
model_name='profile',
name='consultation_types',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=256), default=[], size=None),
preserve_default=False,
),
migrations.RemoveField(
model_name='profile',
name='counselling_areas',
),
migrations.AddField(
model_name='profile',
name='counselling_areas',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=256), default=[], size=None),
preserve_default=False,
),
migrations.DeleteModel(
name='ClientType',
),
migrations.DeleteModel(
name='ConsultationType',
),
migrations.DeleteModel(
name='CounsellingArea',
),
]
|
[
"dayoadeyemi1991@hotmail.com"
] |
dayoadeyemi1991@hotmail.com
|
d7df037cd3bd3a374ebab724073acc2e63500a18
|
35c5c0abdba70dcfe168dbdb4952bc12958df329
|
/program3/animate_poly.py
|
9da552980707a0c5b995d3481c8aada7254dbf31
|
[] |
no_license
|
yellowahra/4553-SpatialDS-ahla
|
9b143332161d3799a28a073637bb9f1bf8c0c6c5
|
b3066e7277ee26ed46cb1cb0d7a7af69e093b005
|
refs/heads/master
| 2020-04-15T07:57:21.713206
| 2016-10-13T03:33:09
| 2016-10-13T03:33:09
| 41,447,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,180
|
py
|
"""
@author - AHLA CHO
@date - 10/05/2015
@description - THREE POLYGONES AND THREE POINTS ARE MOVING
AND IF POINTS ARE IN THE POLYGONES, THEY ARE CHANGED COLOR
AND IF POLYGONES ARE COLLISION, THEY ARE CHANGE THE DIRECTION
"""
import pantograph
import math
import sys
import copy
"""
Point and Rectangle classes.
This code is in the public domain.
Point -- point with (x,y) coordinates
Rect -- two points, forming a rectangle
"""
class Point:
"""A point identified by (x,y) coordinates.
supports: +, -, *, /, str, repr
length -- calculate length of vector to point from origin
distance_to -- calculate distance between two points
as_tuple -- construct tuple (x,y)
clone -- construct a duplicate
integerize -- convert x & y to integers
floatize -- convert x & y to floats
move_to -- reset x & y
slide -- move (in place) +dx, +dy, as spec'd by point
slide_xy -- move (in place) +dx, +dy
rotate -- rotate around the origin
rotate_about -- rotate around another point
"""
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
def __add__(self, p):
"""Point(x1+x2, y1+y2)"""
return Point(self.x+p.x, self.y+p.y)
def __sub__(self, p):
"""Point(x1-x2, y1-y2)"""
return Point(self.x-p.x, self.y-p.y)
def __mul__( self, scalar ):
"""Point(x1*x2, y1*y2)"""
return Point(self.x*scalar, self.y*scalar)
def __div__(self, scalar):
"""Point(x1/x2, y1/y2)"""
return Point(self.x/scalar, self.y/scalar)
def __str__(self):
return "(%s, %s)" % (self.x, self.y)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.x, self.y)
def length(self):
return math.sqrt(self.x**2 + self.y**2)
def distance_to(self, p):
"""Calculate the distance between two points."""
return (self - p).length()
def as_tuple(self):
"""(x, y)"""
return (self.x, self.y)
def clone(self):
"""Return a full copy of this point."""
return Point(self.x, self.y)
def integerize(self):
"""Convert co-ordinate values to integers."""
self.x = int(self.x)
self.y = int(self.y)
def floatize(self):
"""Convert co-ordinate values to floats."""
self.x = float(self.x)
self.y = float(self.y)
def move_to(self, x, y):
"""Reset x & y coordinates."""
self.x = x
self.y = y
def slide(self, p):
'''Move to new (x+dx,y+dy).
Can anyone think up a better name for this function?
slide? shift? delta? move_by?
'''
self.x = self.x + p.x
self.y = self.y + p.y
def slide_xy(self, dx, dy):
'''Move to new (x+dx,y+dy).
Can anyone think up a better name for this function?
slide? shift? delta? move_by?
'''
self.x = self.x + dx
self.y = self.y + dy
def rotate(self, rad):
"""Rotate counter-clockwise by rad radians.
Positive y goes *up,* as in traditional mathematics.
Interestingly, you can use this in y-down computer graphics, if
you just remember that it turns clockwise, rather than
counter-clockwise.
The new position is returned as a new Point.
"""
s, c = [f(rad) for f in (math.sin, math.cos)]
x, y = (c*self.x - s*self.y, s*self.x + c*self.y)
return Point(x,y)
def rotate_about(self, p, theta):
"""Rotate counter-clockwise around a point, by theta degrees.
Positive y goes *up,* as in traditional mathematics.
The new position is returned as a new Point.
"""
result = self.clone()
result.slide(-p.x, -p.y)
result.rotate(theta)
result.slide(p.x, p.y)
return result
def set_direction(self,direction):
assert direction in ['N','NE','E','SE','S','SW','W','NW']
self.direction = direction
def update_position(self):
if self.direction == "N":
self.y -= 1
if self.direction == "NE":
self.y -= 1
self.x += 1
if self.direction == "E":
self.x += 1
if self.direction == "SE":
self.x += 1
self.y += 1
if self.direction == "S":
self.y += 1
if self.direction == "SW":
self.x -= 1
self.y += 1
if self.direction == "W":
self.x -= 1
if self.direction == "NW":
self.y -= 1
self.x -= 1
class Rect:
"""A rectangle identified by two points.
The rectangle stores left, top, right, and bottom values.
Coordinates are based on screen coordinates.
origin top
+-----> x increases |
| left -+- right
v |
y increases bottom
set_points -- reset rectangle coordinates
contains -- is a point inside?
overlaps -- does a rectangle overlap?
top_left -- get top-left corner
bottom_right -- get bottom-right corner
expanded_by -- grow (or shrink)
"""
def __init__(self, pt1, pt2):
"""Initialize a rectangle from two points."""
self.set_points(pt1, pt2)
def set_points(self, pt1, pt2):
"""Reset the rectangle coordinates."""
(x1, y1) = pt1.as_tuple()
(x2, y2) = pt2.as_tuple()
self.left = min(x1, x2)
self.top = min(y1, y2)
self.right = max(x1, x2)
self.bottom = max(y1, y2)
def contains(self, pt):
"""Return true if a point is inside the rectangle."""
x,y = pt.as_tuple()
return (self.left <= x <= self.right and
self.top <= y <= self.bottom)
def overlaps(self, other):
"""Return true if a rectangle overlaps this rectangle."""
return (self.right > other.left and self.left < other.right and
self.top < other.bottom and self.bottom > other.top)
def top_left(self):
"""Return the top-left corner as a Point."""
return Point(self.left, self.top)
def bottom_right(self):
"""Return the bottom-right corner as a Point."""
return Point(self.right, self.bottom)
def expanded_by(self, n):
"""Return a rectangle with extended borders.
Create a new rectangle that is wider and taller than the
immediate one. All sides are extended by "n" points.
"""
p1 = Point(self.left-n, self.top-n)
p2 = Point(self.right+n, self.bottom+n)
return Rect(p1, p2)
def __str__( self ):
return "<Rect (%s,%s)-(%s,%s)>" % (self.left,self.top, self.right,self.bottom)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, Point(self.left, self.top), Point(self.right, self.bottom))
class Polygon:
"""A polygon contains a sequence of points on the 2D plane
and connects them togother.
"""
def __init__(self, pts=[]):
"""Initialize a polygon from list of points."""
self.set_points(pts)
def set_points(self, pts):
"""Reset the poly coordinates."""
self.minX = sys.maxsize
self.minY = sys.maxsize
self.maxX = sys.maxsize * -1
self.maxY = sys.maxsize * -1
self.points = []
for p in pts:
x,y = p
if x < self.minX:
self.minX = x
if x > self.maxX:
self.maxX = x
if y < self.minY:
self.minY = y
if y > self.maxY:
self.maxY = y
self.points.append(Point(x,y))
self.mbr = Rect(Point(self.minX,self.minY),Point(self.maxX,self.maxY))
"""
@function get_points
Return a sequence of tuple of the points of the pology.
"""
def get_points(self):
generic = []
for p in self.points:
generic.append(p.as_tuple())
return generic
"""
@function
determine if a point is inside a given polygon or not
Polygon is a list of (x,y) pairs.
"""
def point_inside_polygon(self, p):
n = len(self.points)
inside =False
p1x,p1y = self.points[0].as_tuple()
for i in range(n+1):
p2x,p2y = self.points[i % n].as_tuple()
if p.y > min(p1y,p2y):
if p.y <= max(p1y,p2y):
if p.x <= max(p1x,p2x):
if p1y != p2y:
xinters = (p.y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or p.x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
def is_collision(self, poly):
"""Check if two pologies collide with each other by examing if their mbrs overlap."""
return self.mbr.overlaps(poly.mbr) or poly.mbr.overlaps(self.mbr)
def set_direction(self, direction):
"""Set direction for all points of the pology."""
for p in self.points:
p.set_direction(direction)
def update_position(self):
"""Update positions of all points of the pology, and the mbr."""
px, py = self.points[0].x, self.points[0].y
for p in self.points:
p.update_position()
dx, dy = self.points[0].x - px, self.points[0].y - py
# update mbr
mbr = self.mbr
mbr.left += dx
mbr.right += dx
mbr.top += dy
mbr.bottom += dy
def __str__( self ):
return "<Polygon \n Points: %s \n Mbr: %s>" % ("".join(str(self.points)),str(self.mbr))
def __repr__(self):
return "%s %s" % (self.__class__.__name__,''.join(str(self.points)))
class Driver(pantograph.PantographHandler):
def setup(self):
"""Set up the points, color, directions....
"""
self.p1 = Point(300, 100)
self.p2 = Point(self.width/2, self.height/2)
self.p3 = Point(700, 200)
self.p1.set_direction("SE")
self.p2.set_direction("N")
self.p3.set_direction("NW")
self.p1.color = "#0F0"
self.p2.color = "#0F0"
self.p3.color = "#0F0"
self.poly1 = Polygon([(405, 367),(444, 413),(504, 384),(519, 307),(453, 248),(380, 250),(365, 278),(374, 325)])
self.poly2 = Polygon([(80,163),(90, 74),(145,60),(210,69)])
self.poly3 = Polygon([(236,144), (317,179), (323,229), (187,299), (150,280)])
self.poly1.set_direction("SE")
self.poly2.set_direction("NE")
self.poly3.set_direction("SW")
def drawShapes(self):
"""Draw points and polygons on the canvas."""
self.draw_rect(0, 0, self.width, self.height, color= "#000")
self.draw_polygon(self.poly2.get_points(), color = "#000")
self.draw_polygon(self.poly1.get_points(), color = "#000")
self.draw_polygon(self.poly3.get_points(), color = "#000")
self.fill_oval(self.p1.x, self.p1.y, 5, 5, self.p1.color)
self.fill_oval(self.p2.x, self.p2.y, 5, 5, self.p2.color)
self.fill_oval(self.p3.x, self.p3.y, 5, 5, self.p3.color)
"""
@function hitWall
Check points or polygons hit a wall. then if yes, change their directions.
"""
def hitWall(self):
for p in [self.p1, self.p2, self.p3]:
axis = self.__point_hit_wall(p)
if axis:
p.set_direction(self.__reflection_direction(p.direction, axis))
for poly in [self.poly1, self.poly2, self.poly3]:
for p in poly.points:
axis = self.__point_hit_wall(p)
if axis:
poly.set_direction(self.__reflection_direction(p.direction, axis))
break
"""
@function __point_hit_wall
Check if a point hit the wall.
"""
def __point_hit_wall(self, p):
axis = None
if p.x >= self.width or p.x <= 0:
axis = 'y'
if p.y >= self.height or p.y <= 0:
axis = 'x'
return axis
"""
@function __reflection_direction
Reflect the moving direction after hit a wall.
"""
def __reflection_direction(self, direction, axis):
return {'E':'W', 'W':'E', 'S':'N', 'N':'S', 'NW':'SW' if axis == 'x' else 'NE', 'NE':'SE' if axis == 'x' else 'NW',\
'SW':'NW' if axis == 'x' else 'SE', 'SE':'NE' if axis == 'x' else 'SW' }[direction]
"""
@function pointsInPolygon
Check if some of the three points are in one of the three polygones.
If yes, change color of the point.
"""
def pointsInPolygon(self):
for p in [self.p1, self.p2, self.p3]:
for poly in [self.poly1, self.poly2, self.poly3]:
if poly.point_inside_polygon(p):
p.color = "#F00"
break
else:
p.color = "#0F0"
"""
@function polygonCollide
Check if one of the three polygons collides with another.
reverse their directions.
"""
def polygonCollide(self, prepoly1, prepoly2, prepoly3):
if self.poly1.is_collision(self.poly2) and not prepoly1.is_collision(prepoly2):
print('collision 1 2')
self.poly1.set_direction(self.__reverse_direction(self.poly1.points[0].direction))
self.poly2.set_direction(self.__reverse_direction(self.poly2.points[0].direction))
elif self.poly1.is_collision(self.poly3) and not prepoly1.is_collision(prepoly3):
print('collision 1 3')
self.poly1.set_direction(self.__reverse_direction(self.poly1.points[0].direction))
self.poly3.set_direction(self.__reverse_direction(self.poly3.points[0].direction))
elif self.poly3.is_collision(self.poly2) and not prepoly3.is_collision(prepoly2):
print('collision 3 2')
self.poly3.set_direction(self.__reverse_direction(self.poly3.points[0].direction))
self.poly2.set_direction(self.__reverse_direction(self.poly2.points[0].direction))
"""
@function __reverse_direction
Reverse the direction after collision.
"""
def __reverse_direction(self, direction):
return {'E':'W', 'W':'E', 'S':'N', 'N':'S', 'NW':'SE', 'NE':'SW', 'SE':'NW', 'SW':'NE'}[direction]
"""
@function update
Update points and polygones after a interval, such as position, direction and color.
"""
def update(self):
self.clear_rect(0, 0, self.width, self.height) # remove entire previous draw
self.p1.update_position()
self.p2.update_position()
self.p3.update_position()
prepoly1, prepoly2, prepoly3 = copy.deepcopy(self.poly1), copy.deepcopy(self.poly2), copy.deepcopy(self.poly3)
for poly in [self.poly1, self.poly2, self.poly3]:
poly.update_position()
self.pointsInPolygon()
self.hitWall()
self.polygonCollide(prepoly1, prepoly2, prepoly3)
self.drawShapes()
if __name__ == '__main__':
app = pantograph.SimplePantographApplication(Driver)
app.run()
|
[
"ahracho216@gmail.com"
] |
ahracho216@gmail.com
|
c763e82f565660d259ebe1f7df8e09c610f1e643
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/F3M4PhqC4JdX28Qmx_2.py
|
e2d2b534da8fcf7ce4a9c3c2f395cac870ce0686
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
"""
**Mubashir** has started his journey from home. Given a string of
`directions` (N=North, W=West, S=South, E=East), he will walk for one minute
in each direction. Determine whether a set of directions will lead him back to
the starting position or not.
### Examples
back_to_home("EEWE") ➞ False
back_to_home("NENESSWW") ➞ True
back_to_home("NEESSW") ➞ False
### Notes
N/A
"""
def back_to_home(directions):
north = 0
south = 0
east = 0
west = 0
for i in directions:
if i == "N":
north = north + 1
if i == "S":
south = south + 1
if i == "E":
east = east + 1
if i == "W":
west = west + 1
if (north - south == 0) and (east - west == 0):
return True
else:
return False
print(back_to_home(NNEESSWW))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
0c90c1e1fcfd54c1febcc38d681e1a711667b562
|
d48261d78cc71adfec615d96308f71d057f5b596
|
/flask/crawling.py
|
c9868b7fac25f18bcb6a2485ae1af6584af02dc0
|
[
"MIT"
] |
permissive
|
dayoungMM/TIL
|
97cc33977d90bcac131460f498a228a50d300cf0
|
b844ef5621657908d4c256cdfe233462dd075e8b
|
refs/heads/master
| 2020-08-16T11:13:08.243190
| 2020-08-10T08:29:40
| 2020-08-10T08:29:40
| 215,495,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
import requests
from bs4 import BeautifulSoup
url = 'http://www.op.gg/summoner/userName=hide+on+bush'
req = requests.get(url).text
data = BeautifulSoup(req,"html.parser")
tier = data.select_one("div.tabItem.Content.SummonerLayoutContent.summonerLayout-summary > div.SideContent > div.TierBox.Box > div > div.TierRankInfo > div.TierRank").text
win = data.select_one("div.tabItem.Content.SummonerLayoutContent.summonerLayout-summary > div.SideContent > div.TierBox.Box > div > div.TierRankInfo > div.TierInfo > span.WinLose > span.wins").text[:-1]
print(tier)
print(win)
|
[
"dalbeetm@gmail.com"
] |
dalbeetm@gmail.com
|
8785a1e1540d68240f224920fcb097fd73a6d483
|
f584f3bfacb51a58b1bb3cdce2c4dd75c637c115
|
/house_price/house_price/middlewares.py
|
6c4083f8b989ef4763e5c6f316a45138c5aa7b46
|
[] |
no_license
|
cherryMonth/machine_learning
|
22d25557807a8fa89c8cbbcbc2998e922ab84787
|
1e8d30add10ae46043b76e664e4250a3e2b22e3f
|
refs/heads/master
| 2021-06-26T15:38:01.521896
| 2020-10-20T07:01:48
| 2020-10-20T07:01:48
| 152,702,488
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,605
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class HousePriceSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class HousePriceDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"1115064450@qq.com"
] |
1115064450@qq.com
|
75c66dcb65846adfe916f3d6a503c31bec74ec4e
|
80780f3e25eaf0fdefd91ba7c22eac09b46e8f8f
|
/bert_encoder/helper.py
|
c542af2013ff62567eb769f0114b259af24fdb3d
|
[
"MIT"
] |
permissive
|
4AI/bert-encoder
|
e4f5d6c06d98ea04f9a2163b16c557fce13db3f0
|
781b663f8e9f87fcdd14fce8469323cc4c60e74f
|
refs/heads/master
| 2020-06-05T05:14:36.413819
| 2019-06-25T04:26:46
| 2019-06-25T04:26:46
| 192,325,235
| 4
| 1
|
MIT
| 2019-06-25T04:26:47
| 2019-06-17T10:26:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,803
|
py
|
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import sys
import uuid
import pickle
__all__ = ['set_logger', 'import_tf']
class NTLogger:
def __init__(self, context, verbose):
self.context = context
self.verbose = verbose
def info(self, msg, **kwargs):
print('I:%s:%s' % (self.context, msg), flush=True)
def debug(self, msg, **kwargs):
if self.verbose:
print('D:%s:%s' % (self.context, msg), flush=True)
def error(self, msg, **kwargs):
print('E:%s:%s' % (self.context, msg), flush=True)
def warning(self, msg, **kwargs):
print('W:%s:%s' % (self.context, msg), flush=True)
def set_logger(context, verbose=False):
if os.name == 'nt': # for Windows
return NTLogger(context, verbose)
logger = logging.getLogger(context)
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter(
'%(levelname)-.1s:' + context + ':[%(filename).3s:%(funcName).3s:%(lineno)3d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG if verbose else logging.INFO)
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
return logger
def import_tf(device_id=-1, verbose=False, use_fp16=False):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if device_id < 0 else str(device_id)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' if verbose else '3'
os.environ['TF_FP16_MATMUL_USE_FP32_COMPUTE'] = '0' if use_fp16 else '1'
os.environ['TF_FP16_CONV_USE_FP32_COMPUTE'] = '0' if use_fp16 else '1'
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.DEBUG if verbose else tf.logging.ERROR)
return tf
|
[
"mitree@sina.com"
] |
mitree@sina.com
|
92c6e398cf0e69452797dfee69cb0b9d2de5cce9
|
308265e5beb273751012988331c9889b25e3b96a
|
/python/basicUse.py
|
526a38a67bee1cc1786a3443f1ccd1f2d8d1cb91
|
[] |
no_license
|
BitterPotato/CodeClips
|
fba257f7bba6e7467a06580d462675f262f87abd
|
bb92f94d46f5866143f788a0b4a24a88f2c40d6f
|
refs/heads/master
| 2020-12-02T22:17:39.455253
| 2017-07-14T12:27:59
| 2017-07-14T12:27:59
| 96,109,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
import urllib.request
import os
def getURLFileSize(url):
request = urllib.request.Request(url, headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)'})
request.get_method = lambda : 'HEAD'
f = urllib.request.urlopen(request)
print(f.headers)
if "Content-Length" in f.headers:
size = int (f.headers["Content-Length"])
else:
size = len (f.read ())
return size
def tranverseFileSize():
dirpath = 'xxxxxxx'
oszlist = os.listdir(dirpath)
for file in oszlist:
print(os.path.getsize(dirpath + '\\' + file))
# ===== file reading/ writing & regular expression =====
file = open("reader.txt")
wfile = open("writer.txt", 'w')
while 1:
line = file.readline()
if not line:
break
match = re.search('[0-9]*', line)
if match:
wfile.write(match.group() + '\n')
else:
wfile.write(line)
|
[
"yangwj116@qq.com"
] |
yangwj116@qq.com
|
f106b31fa6c7eddf6acc9f7db578e28b95f6ca7a
|
0c1e27127922e26d82cb8eb9b474aeba5e5b5452
|
/day21.py
|
22adfa43af3631f58bac038618215ab3b908f473
|
[] |
no_license
|
cbeach512/dailycodingproblem
|
0b3ceec2bca1459ddfdc2c6271e10e419066a8b3
|
c3395799f112adc253e3304613f64e479db9f26d
|
refs/heads/master
| 2020-05-23T12:08:48.854510
| 2019-11-01T19:33:50
| 2019-11-01T19:33:50
| 186,751,915
| 0
| 0
| null | 2019-11-01T19:33:51
| 2019-05-15T04:59:02
|
Python
|
UTF-8
|
Python
| false
| false
| 941
|
py
|
#!/usr/bin/env python3
"""Problem - Day 21
Given an array of time intervals (start, end) for classroom lectures (possibly overlapping), find the minimum number of rooms required.
For example, given [(30, 75), (0, 50), (60, 150)], you should return 2.
"""
def roomsNeeded(times):
if not times:
return 0
rooms_needed = 1
times.sort()
booked = [times[0]]
passed_times = []
for time in times[1:]:
if time[0] >= booked[-1][1]:
booked.append(time)
else:
passed_times.append(time)
if passed_times:
rooms_needed += roomsNeeded(passed_times)
return rooms_needed
def main():
times1 = [(30, 75), (0, 50), (60, 150)]
print(roomsNeeded(times1))
times2 = [(110, 150), (50, 80), (0, 10), (140, 150), (100, 140),
(40, 80), (80, 100), (70, 100), (130, 140), (90, 110)]
print(roomsNeeded(times2))
if __name__ == '__main__':
main()
|
[
"cbeach512@gmail.com"
] |
cbeach512@gmail.com
|
951cd0971275b159e7a187e4260a5809357784d4
|
9b39224a75d23e24ca78a9e6f8f6433077df181f
|
/python/bin/paddle2onnx
|
c0e2041e9f93c30de258648c82403caf97eaaf63
|
[
"MIT"
] |
permissive
|
Yale1417/dazhou-dw
|
a9f1ec9270a9ee0fdec64f11e129bf6673295779
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
refs/heads/main
| 2023-07-13T07:00:34.065368
| 2021-08-20T03:45:56
| 2021-08-20T03:45:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
#!/home/latent-lxx/Latent/Latent/dazhou-dw/python/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from paddle2onnx.command import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"latentsky@gmail.com"
] |
latentsky@gmail.com
|
|
62fe8695d8ef863b66e223fcad6c8acee2c788f6
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Python modules/MR_FRAOption.py
|
a5d4e2002f28b66ffe25911a1ec975bccb15f8cb
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,476
|
py
|
'''
Purpose :Market Risk feed files],[Updated TermNB and TermUNIT]
Department and Desk :[IT],[MR]
Requester: :[Natalie Austin],[Susan Kruger]
Developer :[Douglas Finkel / Henk Nel],[Willie van der Bank]
CR Number :[264536,289168],[816235 04/11/11]
Date CR Requestor Developer Change
----------------------------------------------------------------------------------------
2020-09-11 CHG0128302 Garth Saunders Heinrich Cronje https://absa.atlassian.net/browse/CMRI-776
'''
import ael, string, acm, PositionFile, MR_MainFunctions
InsL = []
# OPENFILE ##########################################################################################################
def OpenFile(temp,FileDir,Filename,PositionName,*rest):
filename = FileDir + Filename
PositionFilename = FileDir + PositionName
outfile = open(filename, 'w')
outfileP = open(PositionFilename, 'w')
outfile.close()
outfileP.close()
del InsL[:]
InsL[:] = []
return filename
# OPENFILE ##########################################################################################################
# WRITE - FILE ######################################################################################################
def Write(i,FileDir,Filename,PositionName,*rest):
filename = FileDir + Filename
PositionFilename = FileDir + PositionName
ins = acm.FInstrument[i.insaddr]
# trade = acm.FTrade[t.trdnbr]
context = acm.GetDefaultContext()
if (i.insaddr) not in InsL:
InsL.append(i.insaddr)
outfile = open(filename, 'a')
#Base record
BASFLAG = 'BAS'
HeaderName = 'Cap/Floor'
OBJECT = 'Cap/FloorSPEC'
TYPE = 'Cap/Floor'
NAME = MR_MainFunctions.NameFix(i.insid)
IDENTIFIER = 'insaddr_'+str(i.insaddr)
CurrencyCAL = ''
CurrencyDAYC = ''
CurrencyPERD = ''
CurrencyUNIT = i.curr.insid
CapFLAG = ''
if i.call_option:
CapFLAG = 'True'
elif not i.call_option:
CapFLAG = 'False'
for l in i.und_insaddr.legs():
EffectiveDATE = MR_MainFunctions.Datefix(l.start_day)
MaturityDATE = MR_MainFunctions.Datefix(l.end_day)
CouponPrepayENUM = 'In Fine'
CapDigitalPayVAL = ''
StateProcFUNC = '@cash flow generator'
TermNB = ''
TermUNIT = ''
'''
for l in i.und_insaddr.legs():
if l.rolling_period not in ('0d','0m','0y'):
TermNB = getattr(l,'rolling_period.count')
TermUNIT = getattr(l,'rolling_period.unit')
else:
TermNB = ''
TermUNIT = 'Maturity'
'''
TermNB = ''
TermUNIT = 'Maturity'
TermCAL = ''
ResetRuleRULE = ''
ResetRuleBUSD = ''
ResetRuleCONV = ''
ResetRuleCAL = ''
CouponGenENUM = 'Backward'
FixedCouponDateNB = ''
BusDayRuleRULE = ''
BusDayRuleBUSD = ''
BusDayRuleCONV = ''
BusDayRuleCAL = ''
try:
cashflow = acm.FCashFlow.Select01("leg = '%s' and startDate <= '%s' and endDate >= '%s'" % (leg.Oid(), acm.Time().TimeNow(), acm.Time().TimeNow()), '')
calc = cashflow.Calculation()
LastResetRateVAL = (calc.ForwardRate(cs) * 100) - cashflow.Spread()
except:
LastResetRateVAL = ''
for c in i.und_insaddr.cash_flows():
for r in c.resets():
LastResetRateVAL = (r.value / 100)
NextResetRateVAL = ''
UndrCrvIndXREF = ''
for l in i.und_insaddr.legs():
UndrCrvIndXREF = 'SCI_' + str(getattr(l, 'float_rate').insid) + '_' + str(getattr(l, 'reset_type')) + '_' + str(getattr(l, 'reset_period.count')) + str(getattr(l, 'reset_period.unit')) + '_' + str(l.reset_day_offset) + str(l.reset_day_method)
try:
DiscountCurveXREF = ins.MappedDiscountLink().Value().Link().YieldCurveComponent().Curve().Name()
except:
DiscountCurveXREF = ins.MappedDiscountLink().Value().Link().YieldCurveComponent().Name()
CouponProratedFLAG = ''
TheoModelXREF = 'CapFloor'
MarketModelXREF = ''
FairValueModelXREF = ''
SettlementProcFUNC = ''
outfile.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n'%(BASFLAG, HeaderName, OBJECT, TYPE, NAME, IDENTIFIER, CurrencyCAL, CurrencyDAYC, CurrencyPERD, CurrencyUNIT, CapFLAG, EffectiveDATE, MaturityDATE, CouponPrepayENUM, CapDigitalPayVAL, StateProcFUNC, TermNB, TermUNIT, TermCAL, ResetRuleRULE, ResetRuleBUSD, ResetRuleCONV, ResetRuleCAL, CouponGenENUM, FixedCouponDateNB, BusDayRuleRULE, BusDayRuleBUSD, BusDayRuleCONV, BusDayRuleCAL, LastResetRateVAL, NextResetRateVAL, UndrCrvIndXREF, DiscountCurveXREF, CouponProratedFLAG, TheoModelXREF, MarketModelXREF, FairValueModelXREF, SettlementProcFUNC))
#Roll Over Cap Strike Rates
BASFLAG = 'rm_ro'
HeaderName = 'Cap/Floor : Cap Strike Rates'
ATTRIBUTE = 'Cap Strike Rates'
OBJECT = 'Cap/FloorSPEC'
CapStrikeRatesDATE = EffectiveDATE
CapStrikeRatesENUM = ''
CapStrikeRatesCAL = ''
for l in i.und_insaddr.legs():
CapStrikeRatesDAYC = MR_MainFunctions.DayCountFix(l.daycount_method)
CapStrikeRatesPERD = 'simple'
CapStrikeRatesUNIT = '%'
CapStrikeRatesVAL = i.strike_price
outfile.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n'%(BASFLAG, HeaderName, ATTRIBUTE, OBJECT, CapStrikeRatesDATE, CapStrikeRatesENUM, CapStrikeRatesCAL, CapStrikeRatesDAYC, CapStrikeRatesPERD, CapStrikeRatesUNIT, CapStrikeRatesVAL))
#Roll Over Cap Notional Principal
BASFLAG = 'rm_ro'
HeaderName = 'Cap/Floor : Cap Notional Principal'
ATTRIBUTE = 'Cap Notional Principal'
OBJECT = 'Cap/FloorSPEC'
CapNotnalPrincDATE = EffectiveDATE
CapNotnalPrincENUM = ''
CapNotnalPrincCAL = ''
CapNotnalPrincDAYC = CapStrikeRatesDAYC
CapNotnalPrincPERD = ''
CapNotnalPrincUNIT = i.curr.insid
CapNotnalPrincVAL = i.contr_size
outfile.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n'%(BASFLAG, HeaderName, ATTRIBUTE, OBJECT, CapNotnalPrincDATE, CapNotnalPrincENUM, CapNotnalPrincCAL, CapNotnalPrincDAYC, CapNotnalPrincPERD, CapNotnalPrincUNIT, CapNotnalPrincVAL))
outfile.close()
#Position
for trades in i.trades():
if MR_MainFunctions.ValidTradeNo(trades) == 0:
if MR_MainFunctions.IsExcludedPortfolio(trades) == False:
PositionFile.CreatePosition(trades, PositionFilename)
return i.insid
|
[
"nencho.georogiev@absa.africa"
] |
nencho.georogiev@absa.africa
|
0bcd4629e096741ddaefb9625d4d79420cbb4f3c
|
872fbbb0ebf65ebafe29c4d03bbb1e1a786f5895
|
/venv/Scripts/pip3-script.py
|
c018fd2503b1b2bc05a63a91f45d501a3f34fb29
|
[] |
no_license
|
imklesley/Processamento-de-Imagens
|
25edbee0979530f2643b69a8e09907fd7d4c944a
|
7e86243c4837e45c442981cd55c750c3cbc267bd
|
refs/heads/master
| 2020-05-14T22:28:31.312499
| 2020-04-22T00:47:29
| 2020-04-22T00:47:29
| 181,979,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
#!C:\Users\klesley.goncalves\PycharmProjects\onedrive\AlgoritmosPI\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"imklesley@gmail.com"
] |
imklesley@gmail.com
|
32f19b66e0a9795e30cd42f69218bf8de8187d57
|
de43b1f24df7bbff757e6c37feffd244331bd05c
|
/LSTM_CRF.py
|
24bf8a2dd6f89f667ddcc767f200f87913736915
|
[] |
no_license
|
yuanxiaoheben/multi_task_ner_intent_analysis
|
4d1fcf01c6cb56b7957c7dfedfb37ae8fd10c62e
|
a710ad58ea73787336dfc501abe912463a93547d
|
refs/heads/master
| 2022-07-17T07:11:52.477006
| 2020-05-23T11:45:53
| 2020-05-23T11:45:53
| 266,324,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,280
|
py
|
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from CRF import crf
max_seq_length = 100
PAD_TAG = "<PAD>"
UNKNOWN_TOKEN = "<UNK>"
START_TAG = "<SOS>"
STOP_TAG = "<EOS>"
tag_to_ix = {PAD_TAG:0,START_TAG:1,STOP_TAG:2,
"O": 3, "B-D": 4, "B-T": 5,"B-S": 6,"B-C": 7,"B-P": 8,"B-B": 9,
"D": 10, "T": 11,"S": 12,"C": 13,"P": 14,"B": 15
}
class BiLSTM(nn.Module):
def __init__(self, vocab_size, max_len, embedding_dim, hidden_dim, batch_size, dropout_prob, word_embeddings=None):
super(BiLSTM, self).__init__()
if not word_embeddings is None:
self.word_embeds,self.word_embedding_dim = self._load_embeddings(word_embeddings)
else:
self.word_embedding_dim = embedding_dim
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.batch_size = batch_size
self.dropout = nn.Dropout(p=dropout_prob)
self.max_len = max_len
self.lstm = torch.nn.LSTM(self.word_embedding_dim,hidden_dim // 2, 2, batch_first=True,bidirectional=True, dropout=dropout_prob)
self.hidden = self.init_hidden()
def init_hidden(self):
return (Variable(torch.zeros(4,self.batch_size, self.hidden_dim // 2).cuda()),
Variable(torch.zeros(4,self.batch_size, self.hidden_dim // 2).cuda()))
def _load_embeddings(self,embeddings):
word_embeddings = torch.nn.Embedding(embeddings.size(0), embeddings.size(1))
word_embeddings.weight = torch.nn.Parameter(embeddings)
emb_dim = embeddings.size(1)
return word_embeddings,emb_dim
def forward(self, sentence):
self.hidden = self.init_hidden()
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
embeds = embeds.view(self.batch_size,self.max_len,-1)
embeds = self.dropout(embeds)
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
return lstm_out
class rnn_crf(nn.Module):
def __init__(self, bilstm, num_tags,batch_size,max_len,dropout_prob):
super().__init__()
self.batch_size = batch_size
self.max_len = max_len
self.bilstm = bilstm
self.crf = crf(num_tags,batch_size)
self.hidden2tag = nn.Linear(bilstm.r + bilstm.lstm_hid_dim, num_tags)
self.dropout = nn.Dropout(p=dropout_prob)
def _get_lstm_features(self, sentence,mask):
_,_,attention = self.bilstm(sentence)
attention = self.dropout(attention)
lstm_feats = self.hidden2tag(attention)
lstm_feats *= mask.unsqueeze(2)
return lstm_feats
def forward(self, x, y):
mask = x.gt(0).float()
lstm_feats = self._get_lstm_features(x,mask)
Z = self.crf.forward(lstm_feats, mask)
score = self.crf.score(lstm_feats, y, mask)
return torch.mean(Z - score) # NLL loss
def decode(self, x): # for inference
mask = x.gt(0).float()
h = self._get_lstm_features(x, mask)
return self.crf.decode(h, mask)
|
[
"noreply@github.com"
] |
yuanxiaoheben.noreply@github.com
|
f93c88a597971bfd272ceb312de511d9bfee132d
|
e60a342f322273d3db5f4ab66f0e1ffffe39de29
|
/parts/zodiac/zope/interface/tests/test_sorting.py
|
f70932c31b7998bc77397830019339d22f8bb160
|
[] |
no_license
|
Xoting/GAExotZodiac
|
6b1b1f5356a4a4732da4c122db0f60b3f08ff6c1
|
f60b2b77b47f6181752a98399f6724b1cb47ddaf
|
refs/heads/master
| 2021-01-15T21:45:20.494358
| 2014-01-13T15:29:22
| 2014-01-13T15:29:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
/home/alex/myenv/zodiac/eggs/zope.interface-4.0.5-py2.7-linux-i686.egg/zope/interface/tests/test_sorting.py
|
[
"alex.palacioslopez@gmail.com"
] |
alex.palacioslopez@gmail.com
|
bc2c4b12f9331ea2a352e78f94593e21b8e803ab
|
2ae84bcfeba7bdb3e234eae4bdb3bce9009f2f98
|
/scripts/contextMiddleware.py
|
933244b3a3cd9b797e48b61c6c1027785f94fe4e
|
[] |
no_license
|
jagorn/adaptive_controller
|
f18691c93e4499a3188af40b68b1a5de15749f75
|
8df4be31734b59213e269b10f87a469476ccf076
|
refs/heads/master
| 2021-01-10T10:31:39.454201
| 2015-06-11T23:25:24
| 2015-06-11T23:25:24
| 36,082,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,085
|
py
|
#!/usr/bin/env python
#
# Author: Francesco Trapani
#
# Class for the estimation of the contextual model.
# This class handles an ASP reasoner, which takes in input context assertions and outputs a contextual model.
# When initialized, the static knowledge stored in an lp file is loaded.
# Every time an input message is received, the new assertions are added to the knowledge, and a new model is estimated.
import rospy
import rospkg
import gringo
import contextCommunication
class ContextMiddleware:
__context_source = '/asp/context.lp'
__communication = None
__publisher = None
__solver = None
__future = None
__interrupted = None
__model = []
def __init__(self):
"""
Class inizialization.
The solver is initialized and the static knowledge is loaded from a file.
"""
self.__communication = contextCommunication.ContextCommunication()
package = rospkg.RosPack()
context_path = package.get_path('adaptive_controller') + self.__context_source
self.__solver = gringo.Control()
self.__solver.load(context_path)
self.__solver.ground([("base", [])])
self.__future = self.__solver.solve_async(None, self.__on_model, self.__on_finish)
self.__future.wait()
self.__publisher = rospy.Publisher(self.__communication.out_topic, self.__communication.out_message, latch=True, queue_size=10)
rospy.Subscriber(self.__communication.in_topic, self.__communication.in_message, self.__on_received_context)
def __on_received_context(self, input_msg):
"""
Callback for ContextInput messages reception.
The assertions contained in the incoming messages are loaded, and a new model is estimated
"""
log_message = "ContextMiddleware - input received:\n"
self.__future.interrupt()
atoms2values = self.__communication.in_message2atoms_values(input_msg)
for atom, value in atoms2values.iteritems():
self.__solver.assign_external(atom, value)
log_message += str(atom) + " = " + str(value) + "\n"
rospy.loginfo(log_message)
self.__interrupted = True
self.__model = []
self.__future = self.__solver.solve_async([], self.__on_model, self.__on_finish)
if self.__future.get() == gringo.SolveResult.SAT and self.__interrupted is False:
self.__publish_context()
def __on_model(self, model):
self.__model[:] = model.atoms(gringo.Model.SHOWN)
def __on_finish(self, result, interrupted):
self.__interrupted = interrupted
def __publish_context(self):
"""
Publishes a message with all the assertions contained in the new current context model
"""
context_msg = self.__communication.atoms2out_message(self.__model)
self.__publisher.publish(context_msg)
# ROS log
log_message = "ContextMiddleWare - model published:\n"
for atom in self.__model:
log_message += atom.__str__() + "\n"
rospy.loginfo(log_message)
|
[
"jagorn90@gmail.com"
] |
jagorn90@gmail.com
|
7f76b6f55e2d2737ce361ccdee423c96d8e1e245
|
e3c69a7715c87313ffcc3d537a48d1fa87fc8d22
|
/Assignment4.6.py
|
c668a8ebd6ddfe6bfc38b84cb5a440ffb2018ae6
|
[] |
no_license
|
JEPHTAH-DAVIDS/Python-for-Everybody-Programming-for-Everybody-
|
3b5c5a05318956e47bdd911d1b1e2f3a60ec60aa
|
81bc27a7a3c38ee05f853c564b450e466185a935
|
refs/heads/main
| 2023-08-05T15:38:18.687307
| 2021-09-22T21:48:14
| 2021-09-22T21:48:14
| 409,170,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
'''4.6 Write a program to prompt the user for hours and rate per hour using input to compute gross pay.
Pay should be the normal rate for hours up to 40 and time-and-a-half for the hourly rate for all hours worked above 40 hours.
Put the logic to do the computation of pay in a function called computepay() and use the function to do the computation.
The function should return a value. Use 45 hours and a rate of 10.50 per hour to test the program (the pay should be 498.75).
You should use input to read a string and float() to convert the string to a number. Do not worry about error checking the user
input unless you want to - you can assume the user types numbers properly. Do not name your variable sum or use the sum() function.'''
def computepay(h,r):
if h <= 40:
pay = h * r
if h > 40:
h = h - 40
pay = 40 * r + (h * r * 1.5)
return pay
hrs = input("Enter Hours:")
rate = input("rate:")
hrs = float(hrs)
rate = float(rate)
p = computepay(hrs, rate)
print("Pay", p)
|
[
"noreply@github.com"
] |
JEPHTAH-DAVIDS.noreply@github.com
|
9613ce7c9e7be3a620ab2d54de4303375a9ff72d
|
4db5677932f5bbaa030ed05aa35a457841646492
|
/DataProcessing/SensorData/splitReadingsByTimestamp.py
|
a41e67ed7dea5103a38f85fa9926ff9c4250f790
|
[] |
no_license
|
magnumresearchgroup/TerrainRoughnessPrediction
|
4dd63ebbb7ad3005b29e3162cee5247546fb558e
|
0a3bb440cc2f5453dcd1b0c0e4d5f877546bdc85
|
refs/heads/master
| 2023-03-24T22:13:00.953074
| 2021-03-10T20:29:55
| 2021-03-10T20:29:55
| 346,482,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,166
|
py
|
import csv
import shutil
from tempfile import NamedTemporaryFile
import os
def proAccelCal(subdir, file):
csv_file = open(os.path.join(subdir, file), "rt")
with open(subdir + '/accelerometer_calibrated_split.csv', 'wb') as new_file:
reader = csv.DictReader(csv_file)
fieldname = ['timestamp (s)', 'timestamp_ms (ms)', 'accel_x (counts)',
'accel_y (counts)', 'accel_z (counts)', 'calibrated_accel_x (g)',
'calibrated_accel_y (g)', 'calibrated_accel_z (g)', 'calibrated_accel_x (m/s^2)',
'calibrated_accel_y (m/s^2)', 'calibrated_accel_z (m/s^2)']
writer = csv.DictWriter(new_file, fieldnames=fieldname)
writer.writeheader()
for row in reader:
msParse = [row['sample_time_offset (ms)'].split('|'), row['accel_x (counts)'].split('|'),
row['accel_y (counts)'].split('|'), row['accel_z (counts)'].split('|'),
row['calibrated_accel_x (g)'].split('|'), row['calibrated_accel_y (g)'].split('|'),
row['calibrated_accel_z (g)'].split('|')]
for period in range(len(msParse[0])):
time_ms = int(float(row['timestamp_ms (ms)'])) + int(msParse[0][period])
time_s = int(float(row['timestamp (s)']))
if time_ms >= 1000:
time_ms = time_ms % 1000
time_s += 1
writer.writerow({
'timestamp (s)': time_s,
'timestamp_ms (ms)': time_ms,
'accel_x (counts)': msParse[1][period],
'accel_y (counts)': msParse[2][period],
'accel_z (counts)': msParse[3][period],
'calibrated_accel_x (g)': msParse[4][period],
'calibrated_accel_y (g)': msParse[5][period],
'calibrated_accel_z (g)': msParse[6][period],
'calibrated_accel_x (m/s^2)': 9.80665 * float(msParse[4][period]),
'calibrated_accel_y (m/s^2)': 9.80665 * float(msParse[5][period]),
'calibrated_accel_z (m/s^2)': 9.80665 * float(msParse[6][period]),
})
csv_file.close()
new_file.close()
def proGyroCal(subdir, file):
csv_file = open(os.path.join(subdir, file), "rt")
with open(subdir + '/gyroscope_calibrated_split.csv', 'wb') as new_file:
reader = csv.DictReader(csv_file)
fieldname = ['timestamp (s)', 'timestamp_ms (ms)', 'gyro_x (counts)',
'gyro_y (counts)', 'gyro_z (counts)', 'calibrated_gyro_x (deg/s)',
'calibrated_gyro_y (deg/s)', 'calibrated_gyro_z (deg/s)']
writer = csv.DictWriter(new_file, fieldnames=fieldname)
writer.writeheader()
for row in reader:
msParse = [row['sample_time_offset (ms)'].split('|'), row['gyro_x (counts)'].split('|'),
row['gyro_y (counts)'].split('|'), row['gyro_z (counts)'].split('|'),
row['calibrated_gyro_x (deg/s)'].split('|'), row['calibrated_gyro_y (deg/s)'].split('|'),
row['calibrated_gyro_z (deg/s)'].split('|')]
for period in range(len(msParse[0])):
time_ms = int(float(row['timestamp_ms (ms)'])) + int(msParse[0][period])
time_s = int(float(row['timestamp (s)']))
if time_ms >= 1000:
time_ms = time_ms % 1000
time_s += 1
writer.writerow({
'timestamp (s)': time_s,
'timestamp_ms (ms)': time_ms,
'gyro_x (counts)': msParse[1][period],
'gyro_y (counts)': msParse[2][period],
'gyro_z (counts)': msParse[3][period],
'calibrated_gyro_x (deg/s)': msParse[4][period],
'calibrated_gyro_y (deg/s)': msParse[5][period],
'calibrated_gyro_z (deg/s)': msParse[6][period],
})
csv_file.close()
new_file.close()
def proMagn(subdir, file):
csv_file = open(os.path.join(subdir, file), "rt")
with open(subdir + '/magnetometer_split.csv', 'wb') as new_file:
reader = csv.DictReader(csv_file)
fieldname = ['timestamp (s)', 'timestamp_ms (ms)', 'mag_x (counts)',
'mag_y (counts)', 'mag_z (counts)']
writer = csv.DictWriter(new_file, fieldnames=fieldname)
writer.writeheader()
for row in reader:
msParse = [row['sample_time (ms)'].split('|'), row['mag_x (counts)'].split('|'),
row['mag_y (counts)'].split('|'), row['mag_z (counts)'].split('|')]
for period in range(len(msParse[0])):
time_ms = int(float(row['timestamp_ms (ms)'])) + int(msParse[0][period])
time_s = int(float(row['timestamp (s)']))
if time_ms >= 1000:
time_ms = time_ms % 1000
time_s += 1
writer.writerow({
'timestamp (s)': time_s,
'timestamp_ms (ms)': time_ms,
'mag_x (counts)': msParse[1][period],
'mag_y (counts)': msParse[2][period],
'mag_z (counts)': msParse[3][period]
})
csv_file.close()
new_file.close()
def main():
# change this path to the directory which contains all the folders of data
filePath = ""
for subdir, dirs, files in os.walk(filePath):
for file in files:
_, folder = os.path.split(subdir)
if file == "magnetometer.csv":
print("\nprocessing: " + os.path.join(folder, file))
proMagn(subdir, file)
elif file == "accelerometer_calibrated.csv":
print("\nprocessing: " + os.path.join(folder, file))
proAccelCal(subdir, file)
elif file == "gyroscope_calibrated.csv":
print("\nprocessing: " + os.path.join(folder, file))
proGyroCal(subdir, file)
if __name__ == "__main__":
main()
|
[
"gabriela.gresenz@gmail.com"
] |
gabriela.gresenz@gmail.com
|
78a6c2fb6f5fe8debd90859769c70eaf59e60a9c
|
909a3702702aa7424640c4cf0f8275eac5e6572c
|
/xmiddleware/__init__.py
|
2e840218bde88b0258aca415b598eb055502352c
|
[] |
no_license
|
xml-star/django5hna8
|
7784c1c7dd8be4c1a82ab1cb8c9d609992fedd35
|
b6032951dff09ee5465d1a4fb1640bc2db99cc7f
|
refs/heads/master
| 2023-04-18T21:18:33.372539
| 2021-05-06T08:31:04
| 2021-05-06T08:31:04
| 364,839,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32
|
py
|
#coding:utf-8
__author__ = "ila"
|
[
"xml@qq.com"
] |
xml@qq.com
|
4b525ed1e0d68aebf75291826bfa4fe07130c8e0
|
46ce4e453cb0abcd3eec97646d2e91d57fdf36e8
|
/5.9_ArrayPrimes.py
|
0f2c9af89e0a48ece490d3267afcf6f1af52750b
|
[] |
no_license
|
dzhang379-school/interview-prep
|
4b1cdbae8d0ae0c0678e7df0a84d55ac00219c73
|
211fa3124e27dbfbcd2b5a46256f87b109ff0124
|
refs/heads/master
| 2021-10-24T18:55:23.555281
| 2019-03-27T21:30:03
| 2019-03-27T21:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
import math
import time
def primes(a):
primes = []
for i in range(2, a):
j = 1
while j <= len(primes) and primes[j-1] <= math.sqrt(i):
if i % primes[j-1] == 0:
j = 0
break
j += 1
if j:
primes.append(i)
return primes
def primes2(a):
primes = list(range(2, a))
i = 0
while i < len(primes):
primes = list(filter(lambda x: x % primes[i] != 0 and x != primes[i], primes))
return primes
start = time.time()
print(primes(180))
print(time.time() - start)
start = time.time()
print(primes2(180))
print(time.time() - start)
|
[
"davis.zhang@vanderbilt.edu"
] |
davis.zhang@vanderbilt.edu
|
4f4792c1cb3ec7a2663e185f597e685b04fe2352
|
27986764b989c474ff70a2a2f7cfc5bb404a1ffe
|
/core/migrations/0019_auto_20180222_1032.py
|
8e20e328c9bde8ee9e406fdaef55d098d820a675
|
[] |
no_license
|
Specialistik/e-market
|
7b326d08bc5878e11bf3847d1c5737a6ed1593cd
|
f44d2e2bcffbf40c638da987efddcaef1da52f41
|
refs/heads/master
| 2020-03-27T13:25:39.283604
| 2018-09-08T02:18:08
| 2018-09-08T02:18:08
| 146,608,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-02-22 07:32
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0018_auto_20180222_1030'),
]
operations = [
migrations.AlterField(
model_name='complexterritory',
name='representative',
field=models.OneToOneField(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u0422\u043e\u0440\u0433\u043e\u0432\u044b\u0439 \u043f\u0440\u0435\u0434\u0441\u0442\u0430\u0432\u0438\u0442\u0435\u043b\u044c'),
),
]
|
[
"widua33@gmail.com"
] |
widua33@gmail.com
|
21a95c049ff1ec133582d4a43892de6659a137ea
|
38e4b7d0ee9f30497ae0b55454483ea72c44d254
|
/jupyter/Data_Analysis.py
|
0b299c8c2ad3bddb88a30da052df45a27c4a51f1
|
[] |
no_license
|
AlexSanford13/Planes_vs_Animals
|
32fff130c9842d83226bbf57b0cb44e3c90130fd
|
767cf6d80f29f4550dc95544ba2f5ffa87ff9a6e
|
refs/heads/master
| 2020-03-22T04:17:12.138404
| 2018-08-01T18:38:08
| 2018-08-01T18:38:08
| 139,487,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,931
|
py
|
# coding: utf-8
# # FAA Data Analysis: Take 1
# ### This is a test analysis from the Hack Day
#
# These first two cells import the file using the CSV loader. This is a much simpler way to load it and parse the data using a tool to understand the CSV file. If I were to manually import this data, I would not as easily be able to parse through this. Using these methods I will be using a tool that much more readily can read the CSV file and show me the outputs I am looking for.
# In[1]:
import csv # Load up the CSV module
with open('Pennsylvania_Condensed.csv') as file_handler:
reader = csv.DictReader(file_handler) # Load the file into the CSV module
animal_strike_dict = [row for row in reader] # Read all the data into a variable as a list
animal_strike_dict[0:1]
# ## Counting the species hit by plane as a dictionary
#
# The folllowing Cell counts through each Species in the Species column of the CSV File. This will count and add the count to my created dictionary for each new value found. It will be unordered (shows up how it counts it in as it is found within the file). I imported this from the Dictionary import, rather than the list import since I can access the column by name within the header. THis allows me to not have to count the column, but rather name it.
# In[2]:
# Create a dictionary to store the animals struck
species_counter = dict()
for row in animal_strike_dict:
animal = row['SPECIES']
if animal not in species_counter:
species_counter[animal] = 1
else:
species_counter[animal] += 1
print(species_counter)
# In[4]:
species_csv = open('species_csv.csv', 'w')
with species_csv:
writer = csv.writer(species_csv)
writer.writerows(species_counter)
print("Write Complete")
# Below is more cells creating dictionaries for a variety of the columns in the CSV file. It is being read as the list within the dictionary.
# In[5]:
# Create a dictionary to store the part of the flight in which the animal was struck
flight_phase = dict()
for row in animal_strike_dict:
phase = row['PHASE_OF_FLT']
if phase not in flight_phase:
flight_phase[phase] = 1
else:
flight_phase[phase] += 1
print(flight_phase)
# In[6]:
# Create a dictionary to store the airport where the strike happened
airport_count = dict()
for row in animal_strike_dict:
airport = row['AIRPORT']
if airport not in airport_count: # makes the rule that creates the new value if it doesn't exist
airport_count[airport] = 1
else: # counts up if the value already exists
airport_count[airport] += 1
print(airport_count)
# Below, I created a dictionary that would import the amount of damage done. This loops over the dataset and adds up the total amount of reported data on each type.
# In[7]:
damage_counter_dictionary = dict()
for row in animal_strike_dict: # Chooses the Damage Cell
damage = row['DAMAGE']
if damage not in damage_counter_dictionary:
damage_counter_dictionary[damage] = 1
else:
damage_counter_dictionary[damage] += 1
print(damage_counter_dictionary)
# Next, I imported and then created a counter that would count using that method. This was for the same output, but would in the end create a more interesting way to analyze the data
# In[8]:
from collections import Counter
damage_counter = Counter() # Empty counter for damage
for row in animal_strike_dict:
damage_counter.update(row['DAMAGE'])
damage_counter
# Here I took the key of damage done within the dataset, here I mapped it to the letter in the key. This way I can then show the data by how much damage was done in terms that would be understandable by people
# In[9]:
#Creating the Data Dictionary from the Key on the FAA Website
damage_lookup_dictionary = {"N": "not any",
"M": "minor",
"M?": "unknown",
"S": "substantial",
"D": "destroyed",
"A": "over $2,000,000 (Military)",
"B": "$500,000 to $2,000,000 (Military)",
"C": "$50,000 - $500,000 (Military)",
"E": "less than $50,000 (Military)",
"?": "an unreported amount",}
# Finally, I took the key and displayed each output in a sentence form. I took the counter and the keys created int he cells above and created strings as outputs. These strings would take the vaiables of each information to show them in a sentence so that we could easily read and understand it.
# In[10]:
for damage_key in damage_counter:
damage_count = damage_counter[damage_key] # Creates Vaiable for the count
damage_name = damage_lookup_dictionary[damage_key] # Creats variable for the key
print("There was {} damage done to {} aircraft".format(damage_name, damage_count))
# Here I take the same method of creating a dictionary of the count of each animal that was struck by a plane. THis is in dictionary form, so it isn't as mutable,.
# In[11]:
species_counter_dictionary = dict()
for row in animal_strike_dict: # Chooses the Damage Cell
species = row['SPECIES']
if species not in species_counter_dictionary:
species_counter_dictionary[species] = 1
else:
species_counter_dictionary[species] += 1
print(species_counter_dictionary)
# Here I attempted to count the number of each animal that was hit by the plane. This was difficult. My first error is displayed here.
#
#
# ````
# ValueError Traceback (most recent call last)
# <ipython-input-66-c51fc6875d37> in <module>()
# 4
# 5 for row in animal_strike_dict:
# ----> 6 species_counter.update(row['SPECIES'])
# 7
# 8 species_counter.most_common() # Displays the counter
#
# ValueError: dictionary update sequence element #0 has length 1; 2 is required
# ````
#
# After some troubleshooting I found that the error was misnaming the species_counter counter in the beginning of the for loop. This returned an error telling me that it was problematic
#
#
# Below, I fixed the issue but it was counting by letter, not by value.
# In[12]:
from collections import Counter # Imports the counter
species_counter = Counter() # Empty counter creator
for row in animal_strike_dict:
species_counter.update(row['SPECIES'])
species_counter # Displays the counter
# I took this code and both the dictionary and the list version and put them in a new notebook, and got the same issues.
#
# After discssing it and some troubleshooting, then seeing the issues it was a problem of mixing the more Pythonig and less Pythonic is the issue. In the previous iterations it was looking at each string as a new list and then computing the numbers one letter at a time. This next block of code is used to actually read through the strings as a whole and then count each as a string not as a letter.
# In[13]:
from collections import Counter # Imports the counter
species_counter = Counter([row['SPECIES'] for row in animal_strike_dict]) # Empty counter creator
species_counter.most_common() # Displays the counter
# In[14]:
species_csv = open('species_csv.csv', 'w')
with species_csv:
writer = csv.writer(species_csv)
writer.writerows(species_counter)
print("Write Complete")
# The below code is for counting the months. I attempted to use the same methods as above, so continuing to just practice this using counters and different variables to see how things change. This is dont using both just a counter and displaying them in order of month. This will give a picture of the chronological timeline of the strikes by month.
# In[15]:
from collections import Counter
month_counter = Counter() # Empty counter for damage
for row in animal_strike_dict:
month_counter.update(row['INCIDENT_MONTH'])
month_counter
# In[16]:
#Creating the Data Dictionary from the Key on the FAA Website
month_lookup_dictionary = {"1": "January",
"2": "February",
"3": "March",
"4": "April",
"5": "May",
"6": "June",
"7": "July",
"8": "August",
"9": "September",
"10": "October",
"11": "November",
"12": "December",
"0": "Not Known"}
# Below is taking the code dictionary and counter displayed abot to see the data and then output sentences to more easily read. These are displayed in order they were added to the list, so there is not a real order. I am hoping to find a way to order these sentences so I can see them in order of either time within the year, or to organize by number of strikes per month.
# In[17]:
for month_key in month_counter:
month_count = month_counter[month_key] # Creates Vaiable for the count
month_name = month_lookup_dictionary[month_key] # Creats variable for the key
print("In {} there were {} animal strikes".format(month_name, month_count))
# I hope to use these tools and eventualy expand it to see per month over time. So maybe see if certain years are more frequent and if the pattern is generally seen throught this framework. I hope to maybe graph these points and see over the three decades within the data set to see the change over time.
# Below is a counter that imports the infomration from month an then counts each incident by month. It then sorts these by most to least frequent months.
# In[18]:
from collections import Counter # Imports the counter
month_counter = Counter([row['INCIDENT_MONTH'] for row in animal_strike_dict]) # Empty counter creator
month_counter.most_common() # Displays the counter
|
[
"alexanderleesanford@gmail.com"
] |
alexanderleesanford@gmail.com
|
ea5881c84e2d7c72a2589b891cc49e7d1d52593a
|
f0143f0a96626fc4f121a65ddf008dca209d6166
|
/backend/settings.py
|
aa39da428a1e1eaf4f59d73f552659907871491d
|
[] |
no_license
|
fmojica30/tektrackerV2
|
bc7727c4c8736f08e9ff9bfcc83256b6cd10c579
|
d2b8cbe7c99826cb4a78ea34bcc861104383dd4f
|
refs/heads/master
| 2023-01-16T04:55:48.039370
| 2020-03-05T20:22:25
| 2020-03-05T20:22:25
| 245,257,299
| 0
| 0
| null | 2023-01-05T09:11:39
| 2020-03-05T20:07:13
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,822
|
py
|
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import django_heroku
import dj_database_url
import dotenv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm=ln4i5q)7gisgurqm^5)t3dkcltcf7(qzla&7mi%86s&lvf-h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
#3rd party apps
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware', # Whitenoise Middleware
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {}
DATABASES['default'] = dj_database_url.config(conn_max_age=600)
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
django_heroku.settings(locals())
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# Place static in the same location as webpack build files
STATIC_ROOT = os.path.join(BASE_DIR, 'build', 'static')
STATICFILES_DIRS = []
# If you want to serve user uploaded files add these settings
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'build', 'media')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
del DATABASES['default']['OPTIONS']['sslmode']
|
[
"fernando.mojica@utexas.edu"
] |
fernando.mojica@utexas.edu
|
b9839a83fa04c3c7d5e63958af93be0800935ab9
|
5accec0b658076d65f2294e28ff51717a2c4e3f5
|
/account/admin.py
|
16085186e3da4ad9b04d615d926e102b40be8327
|
[] |
no_license
|
boundshunter/myblog_12
|
5d35f9b04b205a3b8bbcbe6b2eacb170074c6e2f
|
ac46c69d4de0ddec3f4c489abc734fc594ea3f8b
|
refs/heads/master
| 2020-05-09T03:15:47.898537
| 2019-05-31T10:40:56
| 2019-05-31T10:40:56
| 180,981,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
from django.contrib import admin
from .models import UserProfile
# Register your models here.
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'birth', 'phone', )
list_filter = ('user', 'phone',)
admin.site.register(UserProfile, UserProfileAdmin)
|
[
"76686094@qq.com"
] |
76686094@qq.com
|
9d09983250e3b34f7c8c3e3ece8a0d63845d942d
|
2d7820a82ae720dbd160f8a1c196eec838937c83
|
/tests/tsoc_c_test.py
|
cc577d97a074c4072202b86bd32ca7d3a4c71b9b
|
[] |
no_license
|
uglyluigi/TediouSMS
|
d0b0639c960b4aa71a0b67f869025baf84d0f62c
|
9cd48d0e02fd58b4b062debafca317f7c8286069
|
refs/heads/master
| 2020-03-10T19:32:01.779346
| 2018-05-14T18:36:24
| 2018-05-14T18:36:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
import sys
sys.path.append('../')
from time import sleep as shleep
from tediousms import telesocket
if __name__ == "__main__":
big_socc = telesocket.Telesocket("localhost", 42069, "localhost", 42069, print)
big_socc.send_message("Hey! A message!")
|
[
"5006946+BillPepsiOfPepsiCo@users.noreply.github.com"
] |
5006946+BillPepsiOfPepsiCo@users.noreply.github.com
|
e2102b7e1f8e067942746eb5d16acd0f0e6aef02
|
dab52390ea50d5e68d1034ee67d08e2a10483d30
|
/baseCode.py
|
4f9b61da378fcce4fe33fb9d52417c12ec64010a
|
[] |
no_license
|
JesseLee-zj/Jesse
|
1614c1b32d56db704a49d18f1b90e46f5763e39b
|
70ddb0c63d132e63e038b44e7eb052bedb29e77d
|
refs/heads/master
| 2020-09-30T14:44:53.677289
| 2019-12-11T09:42:56
| 2019-12-11T09:42:56
| 227,309,324
| 1
| 0
| null | 2019-12-11T09:42:57
| 2019-12-11T08:03:34
| null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
import os
import sys
def hello():
print("hello!")
if __name__=="__main__":
hello()
|
[
"noreply@github.com"
] |
JesseLee-zj.noreply@github.com
|
2948fb4b17c532e41fdf6e31a6c6b2e8cb282c3e
|
3a33cb14cf2ed7c1651532a975401f6927c361d0
|
/importer.py
|
858f2efcaee97b472bed0ad2070d86c65097c313
|
[] |
no_license
|
Ezzara/Mango-leaf-identification
|
65747482e71bf0295ae9ae98717b77ce2ecbebcc
|
3f4e2da06c8ac3cea1c7a1a155316f27ccf219ce
|
refs/heads/master
| 2023-01-02T08:50:14.970731
| 2020-10-22T08:33:02
| 2020-10-22T08:33:02
| 266,675,221
| 0
| 1
| null | 2020-10-22T08:33:04
| 2020-05-25T03:33:24
|
Python
|
UTF-8
|
Python
| false
| false
| 632
|
py
|
#importing array of image
import cv2
#import numpy as np
import glob
X_data = []
files = glob.glob ("dataset/*.jpg")
for myFile in files:
image = cv2.imread (myFile)
#resize
scale_percent = 10 # percent of original size
width = int(image.shape[1] * scale_percent / 100)
height = int(image.shape[0] * scale_percent / 100)
dim = (width, height)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
#append resized image
X_data.append (resized)
'''
#to test the imported image
cv2.imshow('Hello World', X_data[1])
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
|
[
"noreply@github.com"
] |
Ezzara.noreply@github.com
|
b139c4cbdcbe9c6bdafe3a4242857a14a8267202
|
445d171f940bf44f169d9fbf46b03df3a5e2b1d2
|
/Waldenpy/05_Time.py
|
950d8b41718f29b26088ff3551cda8937807239d
|
[] |
no_license
|
YancarloOjeda/GitWalden
|
c804f8d62d7d46b1a65bf838940129614a3808b9
|
350f9a7e0003e644d430f171e2d1c44950219c2a
|
refs/heads/master
| 2023-03-05T19:59:09.751544
| 2021-02-17T18:34:37
| 2021-02-17T18:34:37
| 278,161,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
"""
Walden Modular Equipment SAS
Time
2020
"""
import Walden as w
#Variables
SesionTime = 10
#Control
CounterTime = 0
#Program
#Use Get_Time() to start time recording
TempTime = w.Get_Time()
while(CounterTime <= SesionTime):
#Use Timer to calculate the log time - Timer(Counter,Temporal time, rate seconds)
CounterTime = w.Timer(CounterTime,TempTime,.05)
#Use Get_Time() get current time
TempTime = w.Get_Time()
print(CounterTime)
|
[
"laurent.ac.ags@gmail.com"
] |
laurent.ac.ags@gmail.com
|
dc0755a55ce75ca7b9b98acb9d32c4c04663b834
|
f4e7385ddfe8c93612542a7f40ca39e0ad7ee803
|
/SANDBOX/python3/5_loops_branches/break_continue.py
|
79c4541a06eb52eb032923a4bc8989a1757f1bf9
|
[] |
no_license
|
glennlopez/Python.Playground
|
f266f8a0427d6ab07d006ebf9619976e49d61b48
|
e7e013b2d446d60d98bb1a1e611398eec4a2f3b8
|
refs/heads/master
| 2021-01-02T08:46:44.149994
| 2019-09-24T22:03:50
| 2019-09-24T22:03:50
| 21,843,220
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
starting = 0
ending = 20
current = starting
step = 6
while current < ending:
if current + step > ending:
# breaks out of loop if current next step is larger than ending
break
if current % 2:
# skips the while loop if number is divisible by 2
continue
current += step
print(current)
|
[
"glennlopez@gmail.com"
] |
glennlopez@gmail.com
|
37cdd12e404a77afe13de58fb43b1400d1784fdf
|
5e39212b8c5f0321da2389daeadad0641bdb13c3
|
/cogs/diagnostics.py
|
185895de428f94a1111573a7ad58c3a54fa86a4a
|
[] |
no_license
|
tnorico/inferno-bot
|
048d5e3af40f74d23bc7fbb54fd8211d5616fce9
|
88f4765a298fdc28b121e9a3bd4158e42e71d056
|
refs/heads/master
| 2022-09-17T07:51:19.347584
| 2020-06-01T08:35:12
| 2020-06-01T08:35:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,214
|
py
|
from discord.ext import commands
import datetime
import discord
start_time = datetime.datetime.utcnow()
class diagnostics(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(" ")
print(" ██▓ ███▄ █ █████▒▓█████ ██▀███ ███▄ █ ▒█████ ")
print("▓██▒ ██ ▀█ █ ▓██ ▒ ▓█ ▀ ▓██ ▒ ██▒ ██ ▀█ █ ▒██▒ ██▒")
print("▒██▒▓██ ▀█ ██▒▒████ ░ ▒███ ▓██ ░▄█ ▒▓██ ▀█ ██▒▒██░ ██▒")
print("░██░▓██▒ ▐▌██▒░▓█▒ ░ ▒▓█ ▄ ▒██▀▀█▄ ▓██▒ ▐▌██▒▒██ ██░")
print("░██░▒██░ ▓██░░▒█░ ░▒████▒░██▓ ▒██▒▒██░ ▓██░░ ████▓▒░")
print("░▓ ░ ▒░ ▒ ▒ ▒ ░ ░░ ▒░ ░░ ▒▓ ░▒▓░░ ▒░ ▒ ▒ ░ ▒░▒░▒░ ")
print(" ▒ ░░ ░░ ░ ▒░ ░ ░ ░ ░ ░▒ ░ ▒░░ ░░ ░ ▒░ ░ ▒ ▒░ ")
print(" ▒ ░ ░ ░ ░ ░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ ░ ▒ ")
print(" ░ ░ ░ ░ ░ ░ ░ ░ ")
print(" ")
print(f"[{self.bot.user.name}] Is Ready!")
servers = self.bot.guilds
#guilds = len(self.bot.guilds)
servers.sort(key=lambda x: x.member_count, reverse=True)
y = 0
for x in self.bot.guilds:
y += x.member_count
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"{y} Users! | i/help"))
@commands.command()
async def uptime(self, ctx):
uptime = datetime.datetime.utcnow() - start_time
uptime = str(uptime).split('.')[0]
await ctx.send(f"`Current Uptime:` "+''+uptime+'')
@commands.command()
async def ping(self, ctx):
message = await ctx.send(content="`Pinging...`")
await message.edit(content=f":ping_pong: `PONG!` - Latency is {round(self.bot.latency * 1000)}ms")
@commands.command()
async def invite(self, ctx):
embed = discord.Embed(
description=f"[Click This Link To Add Inferno To Your Server!](https://discordapp.com/api/oauth2/authorize?client_id=691776029899096065&permissions=8&scope=bot)",
timestamp=datetime.datetime.utcnow(),
colour=0xc57694
)
embed.add_field(name="Requested By", value="{}".format(ctx.author.mention), inline=True)
embed.set_author(name=f"Inferno Invite Requested!", icon_url=ctx.guild.icon_url)
embed.set_footer(text=f"{ctx.guild.name}")
embed.set_thumbnail(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
@commands.command()
async def stats(self, ctx):
servers = self.bot.guilds
guilds = len(self.bot.guilds)
servers.sort(key=lambda x: x.member_count, reverse=True)
y = 0
for x in self.bot.guilds:
y += x.member_count
embed = discord.Embed(
description=f"These Are The General Statistics and Other Information For Inferno, I Have Included Some Additional Information To Anyone Who Wishes To Know More About Inferno!",
timestamp=datetime.datetime.utcnow(),
colour=0xc57694
)
embed.set_author(name=f"Inferno Information!", icon_url=self.bot.user.avatar_url)
embed.add_field(name="**General Statistics**", value=f"`Current Users` — {y}\n`Current Guilds` — {guilds}\n`Created Date` — 23/03/2020\n`Creator` — <@183554278735085568>", inline=False)
embed.add_field(name="**Additional Information**", value=f"`Language` — Python\n`Version` — 3.8.2\n`OS` — Windows 10\n", inline=False)
embed.set_footer(text=f"{ctx.guild.name}")
embed.set_thumbnail(url=self.bot.user.avatar_url)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(diagnostics(bot))
|
[
"noreply@github.com"
] |
tnorico.noreply@github.com
|
60b91f0e8caf15f502c8af1ad0aa0bb67367b4ea
|
f959c9fd84e8a3e15289c1870c0c3ac48e0f56fd
|
/main.py
|
9d53805ce172e079ea46065fb75fb17a50de679d
|
[] |
no_license
|
Souta-Hayashi/Madlib
|
689cc9f98043eddab72ea1808e1f0f76d5bc15b7
|
62fc56c1388346b46690663ee75dbe7c5402c31d
|
refs/heads/main
| 2023-09-05T20:05:37.908102
| 2021-10-31T07:00:18
| 2021-10-31T07:00:18
| 423,075,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
from sample_madlibs import love, motorcar
import random
if __name__ == "__main__":
m = random.choice([love, motorcar])
m.madlib()
|
[
"you@example.com"
] |
you@example.com
|
1e810e0adfdae37c1b9071bef543f420e28849b9
|
98283ca88d19fe200c2ab065d5d43a8df03c7883
|
/project1/settings.py
|
85af0d6fdf8b56e87cc41b31f2b5e975424b4bdf
|
[] |
no_license
|
akashpassi007/Django1
|
2181c6dfbe117440c659b2586ce72fb0fde36f0a
|
38e37c7cb1708a6566f9b5eb60b83472b534610c
|
refs/heads/master
| 2022-11-17T08:25:36.802065
| 2020-07-20T09:54:55
| 2020-07-20T09:54:55
| 281,070,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,292
|
py
|
"""
Django settings for project1 project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f4fb3e4m0l+loe*0zl3k4ynm8_sg6u31n(ky@efstxwu&zf_n3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'Home.apps.HomeConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# Added Manually
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
|
[
"akashpassi0@gmail.com"
] |
akashpassi0@gmail.com
|
9ce373b18f9684ed69a1d96120742cae51b6aa2f
|
8c1ea60783fff7dc9e8158db6dc430c77069d962
|
/trove/tests/unittests/mgmt/test_datastore_controller.py
|
fc36c7adc43d8799712f88e6ec9cfa17be3fb13a
|
[
"Apache-2.0"
] |
permissive
|
zjtheone/trove
|
d10d7d4a259dc4a802228362e3864d2df1648775
|
3508e3e4d7925b59ab8d344e4d4952fd79366e15
|
refs/heads/master
| 2020-04-05T23:08:25.284121
| 2017-04-26T10:20:24
| 2017-04-26T10:20:24
| 50,925,493
| 0
| 0
| null | 2016-02-02T14:20:01
| 2016-02-02T14:20:01
| null |
UTF-8
|
Python
| false
| false
| 6,930
|
py
|
# Copyright [2015] Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from mock import Mock, patch, MagicMock, PropertyMock
from testtools.matchers import Is, Equals
from trove.common import exception
from trove.common import remote
from trove.datastore import models as datastore_models
from trove.extensions.mgmt.datastores.service import DatastoreVersionController
from trove.tests.unittests import trove_testtools
class TestDatastoreVersionController(trove_testtools.TestCase):
def setUp(self):
super(TestDatastoreVersionController, self).setUp()
self.controller = DatastoreVersionController()
self.version = {
"version": {
"datastore_name": "test_dsx",
"name": "test_vr1",
"datastore_manager": "mysql",
"image": "154b350d-4d86-4214-9067-9c54b230c0da",
"packages": ["mysql-server-5.6"],
"active": True,
"default": False
}
}
self.tenant_id = Mock()
context = Mock()
self.req = Mock()
self.req.environ = Mock()
self.req.environ.__getitem__ = Mock(return_value=context)
def test_get_schema_create(self):
schema = self.controller.get_schema('create', self.version)
self.assertIsNotNone(schema)
self.assertTrue('version' in schema['properties'])
def test_validate_create(self):
body = self.version
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_create_blankname(self):
body = self.version
body['version']['name'] = " "
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(len(errors), Is(1))
self.assertThat(errors[0].message,
Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'"))
def test_validate_create_blank_datastore(self):
body = self.version
body['version']['datastore_name'] = ""
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
self.assertThat(len(errors), Is(2))
self.assertIn("'' is too short", error_messages)
self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages)
@patch.object(remote, 'create_nova_client')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load',
side_effect=exception.DatastoreVersionNotFound)
@patch.object(datastore_models, 'update_datastore_version')
def test_create_datastore_versions(self, mock_ds_version_create,
mock_ds_version_load,
mock_ds_load, mock_nova_client):
body = self.version
mock_ds_load.return_value.name = 'test_dsx'
self.controller.create(self.req, body, self.tenant_id)
mock_ds_version_create.assert_called_with(
'test_dsx', 'test_vr1', 'mysql',
'154b350d-4d86-4214-9067-9c54b230c0da',
'mysql-server-5.6', True)
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
def test_show_ds_version(self, mock_ds_version_load):
id = Mock()
self.controller.show(self.req, self.tenant_id, id)
mock_ds_version_load.assert_called_with(id)
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
def test_delete_ds_version(self, mock_ds_version_load, mock_ds_load):
ds_version_id = Mock()
ds_version = Mock()
mock_ds_version_load.return_value = ds_version
self.controller.delete(self.req, self.tenant_id, ds_version_id)
ds_version.delete.assert_called_with()
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
@patch.object(datastore_models.DatastoreVersions, 'load_all')
def test_index_ds_version(self, mock_ds_version_load_all,
mock_ds_version_load_by_uuid):
mock_id = Mock()
mock_ds_version = Mock()
mock_ds_version.id = mock_id
mock_ds_version_load_all.return_value = [mock_ds_version]
self.controller.index(self.req, self.tenant_id)
mock_ds_version_load_all.assert_called_with(only_active=False)
mock_ds_version_load_by_uuid.assert_called_with(mock_id)
@patch.object(remote, 'create_nova_client')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
@patch.object(datastore_models, 'update_datastore_version')
def test_edit_datastore_versions(self, mock_ds_version_update,
mock_ds_version_load,
mock_nova_client):
body = {'image': '21c8805a-a800-4bca-a192-3a5a2519044d'}
mock_ds_version = MagicMock()
type(mock_ds_version).datastore_name = PropertyMock(
return_value=self.version['version']['datastore_name'])
type(mock_ds_version).name = PropertyMock(
return_value=self.version['version']['name'])
type(mock_ds_version).image_id = PropertyMock(
return_value=self.version['version']['image'])
type(mock_ds_version).packages = PropertyMock(
return_value=self.version['version']['packages'])
type(mock_ds_version).active = PropertyMock(
return_value=self.version['version']['active'])
type(mock_ds_version).manager = PropertyMock(
return_value=self.version['version']['datastore_manager'])
mock_ds_version_load.return_value = mock_ds_version
self.controller.edit(self.req, body, self.tenant_id, Mock())
mock_ds_version_update.assert_called_with(
'test_dsx', 'test_vr1', 'mysql',
'21c8805a-a800-4bca-a192-3a5a2519044d',
'mysql-server-5.6', True)
|
[
"skm.net@gmail.com"
] |
skm.net@gmail.com
|
cdaa718eda61ecdac959615d75b705025165eefd
|
0d8e4859792c76ec5376ecf26f37c7e3c2acefb7
|
/hw3/src/my_model.py
|
ea016f1220ef544d0ab457a7f1e06d53c31a49e0
|
[] |
no_license
|
bugaosuni59/Deep-Learning-homework
|
ac7612e381c1351060dc951ff7563c59b2204474
|
6ba057db8eaebbbeb88d442ecb10064b95f01d31
|
refs/heads/master
| 2020-07-15T23:27:45.031932
| 2019-06-17T14:46:15
| 2019-06-17T14:46:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,725
|
py
|
import math
import torch
import torch.nn as nn
from torch.nn import Parameter
class MyGRUModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, n_token, n_input, n_hid, n_layers, dropout=0.5):
super(MyGRUModel, self).__init__()
self.drop_rate = dropout
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(n_token, n_input)
self.decoder = nn.Linear(n_hid, n_token)
self.rnn_type = 'GRU'
self.n_hid = n_hid
self.n_layers = n_layers
self.training = True
# init RNN
self.w_inputs = []
self.w_hiddens = []
self.b_inputs = []
gate_size = 3 * n_hid
self._all_weights = []
for layer in range(n_layers):
layer_input_size = n_input if layer == 0 else n_hid
w_ih = Parameter(torch.Tensor(gate_size, layer_input_size))
w_hh = Parameter(torch.Tensor(gate_size, n_hid))
b_ih = Parameter(torch.Tensor(gate_size))
setattr(self, 'w_ih' + str(layer), w_ih)
setattr(self, 'w_hh' + str(layer), w_hh)
setattr(self, 'b_ih' + str(layer), b_ih)
self.w_inputs.append(w_ih)
self.w_hiddens.append(w_hh)
self.b_inputs.append(b_ih)
self.reset_parameters()
self.init_weights()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.n_hid)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
encoded = self.encoder(input)
emb = self.drop(encoded)
hidden, output = self.process_layers(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
return next(self.parameters()).new_zeros(self.n_layers, bsz, self.n_hid)
def process_layers(self, input, hidden):
next_hidden = []
for l in range(self.n_layers):
all_output = []
hy, output = self.process_states(input, hidden[l], l)
next_hidden.append(hy)
all_output.append(output)
input = torch.cat(all_output, input.dim() - 1)
if l < self.n_layers - 1:
input = self.drop(input)
next_hidden = torch.cat(next_hidden, 0).view(self.n_layers, *next_hidden[0].size())
return next_hidden, input
def process_states(self, input, hidden, layer):
output = []
steps = range(input.size(0))
for i in steps:
hidden = self.gru_cell(input[i], hidden, layer)
output.append(hidden)
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return hidden, output
def gru_cell(self, input, hidden, layer):
w_input = self.w_inputs[layer]
w_hidden = self.w_hiddens[layer]
b_input = self.b_inputs[layer]
gi = torch.addmm(b_input, input, w_input.t())
gh = hidden.matmul(w_hidden.t())
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate = (i_r + h_r).sigmoid()
updategate = (i_i + h_i).sigmoid()
temp_hidden = (i_n + resetgate * h_n).tanh()
hew_hidden = temp_hidden + updategate * (hidden - temp_hidden)
return hew_hidden
|
[
"13654840887@163.com"
] |
13654840887@163.com
|
6e4d29b3fc199f844faaea54ac101743bff00933
|
870208962d013032316e82eaed342d570cb88212
|
/collector/url_collector.py
|
3d5abd2da874014195bdec79ae4fd5c9ba48955b
|
[] |
no_license
|
cerulean85/ttawb
|
b86c3f2a0f3b8f39dae272f3e350ecd63d5878fc
|
5a0b59516bb9137e86d8b057a1100a5ae24fe77b
|
refs/heads/main
| 2023-08-31T14:52:52.516128
| 2021-10-08T07:44:09
| 2021-10-08T07:44:09
| 414,887,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,445
|
py
|
import time
from multiprocessing import Process
from bs4 import BeautifulSoup
import parallel as prl
import config as cfg
import kkconn
import modules.collect.dir as dir
from datetime import datetime, timedelta
conf = cfg.get_config(path=dir.config_path)
chromeDriver = cfg.get_chrome_driver(config_path=dir.config_path)
def date_range(start, end):
start = datetime.strptime(start, "%Y-%m-%d")
end = datetime.strptime(end, "%Y-%m-%d")
dates = [(start + timedelta(days=i)).strftime("%Y-%m-%d") for i in range((end-start).days+1)]
return dates
def collect_urls(work):
work_type = "collect_url"
print(work_type)
# for work in works:
# prl.stop_procs(work)
# for work in works:
channel = work["channel"]
direct_page_download = conf[channel]["direct_page_download"]
data_load_method = conf[channel]["data_load_method"]
if not direct_page_download:
if data_load_method == "page_nav":
collect_urls_by_page_nav(work)
# prl.add_proc(work, Process(target=collect_urls_by_page_nav, args=(work,)))
else:
collect_urls_by_inf_scorll(work)
# prl.add_proc(work, Process(target=collect_urls_by_inf_scorll, args=(work,)))
# prl.start_procs(work_type)
def get_url(work, target_page_no):
url_set = set([])
if int(target_page_no) > 0:
channel = work["channel"]
keyword = work["keyword"]
start_date = work["start_date"]
end_date = work["end_date"]
url = cfg.get_collect_url(channel, target_page_no, keyword, start_date, end_date, config_path=dir.config_path)
chromeDriver.get(url)
conf = cfg.get_config(path=dir.config_path)
time.sleep(conf[channel]["delay_time"]) # Crome Drive가 소스를 받는데 시간이 필요함
soup = BeautifulSoup(chromeDriver.page_source, "html.parser")
try:
items = soup.find_all('a')
for item in items:
url_set.add(item["href"])
except Exception as e:
print(e)
return url_set
def collect_urls_by_page_nav(work):
target_page_no = 1
while True:
try:
url_list = []
url_set = get_url(work, target_page_no)
for url in list(url_set):
url_list.append(url)
if len(url_list) > 0:
kkconn.kafka_producer(url_list, work)
print("Inserted {} URLS: {}".format(work["channel"], len(url_list)))
target_page_no += 1
except Exception as e:
print(e)
def collect_urls_by_inf_scorll(work):
pass
if __name__ == "__main__":
work_list = [{
"channel": "nav",
"keyword": "코로나 백신",
"start_date": "2021-09-26",
"end_date": "2021-09-28",
"work_type": "collect_url",
"work_group_no": 2,
# "work_no": 1
}]
# print(work_list)
# exit()
for work in work_list:
date_list = date_range("2021-09-26", "2021-09-28")
for date in date_list:
work["start_date"] = date
work["end_date"] = date
collect_urls(work)
# class Collector:
#
# def __init__(self):
# self.conf = cfg.get_config(path=dir.config_path)
# self.chromeDriver = cfg.get_chrome_driver(config_path=dir.config_path)
# else:
# Isnert 후 바로 웹 페이지 수집 시작
# self.collect_urls3(work)
# def extract_texts(self, works):
# work_type = "extract_text"
# print(work_type)
#
# for work in works:
# prl.stop_procs(work)
#
# p_count = 4
# conf = cfg.get_config(path=dir.config_path)
# for work in works:
# channel = work["channel"]
# keyword = work["keyword"]
# work_group_no = work["work_group_no"]
# work_no = work["work_no"]
#
# target_path = conf["storage"]["save_dir"] + channel + '/'
# file_list = [file for file in os.listdir(target_path) if ".html" in file]
# total_file_count = len(file_list)
# finished_file_count = Value('i', 0)
# unit_count = int(len(file_list) / p_count)
# remain_count = len(file_list) % p_count
#
# ei = 0
# for i in range(0, p_count):
# si = unit_count * i
# ei = unit_count * (i + 1)
#
# prl.add_proc(work, Process(target=self.extract_feature, args=(channel, target_path, file_list[si:ei],
# finished_file_count, total_file_count,
# keyword, work_group_no, work_no)))
#
# if remain_count > 0:
# si = ei
# ei = ei + remain_count
# prl.add_proc(work, Process(target=self.extract_feature, args=(channel, target_path, file_list[si:ei],
# finished_file_count, total_file_count,
# keyword, work_group_no, work_no)))
#
# prl.start_procs(work_type)
# def extract_feature(self, channel, target_path, file_list, finished_file_count, total_file_count, keyword, work_group_no, work_no):
# tfe.create_doc_text_blocks(channel, target_path, file_list, finished_file_count, total_file_count, keyword, work_group_no, work_no)
# # DBHandler().map_reduce(channel, "text", db_name="whateverdot", collection_name="docs")
#
#
# def extract_contents(self, works):
#
# work_type = "extract_content"
# print(work_type)
#
# for work in works:
# prl.stop_procs(work)
#
# dbh = DBHandler()
# zhpk = ZHPickle()
# for work in works:
# work_group_no = work["work_group_no"]
# work_no = work["work_no"]
# channel = work["channel"]
# if channel == "nav":
# continue
#
# # 본문 부모 태그 경로 호출
# # 학습된 {텍스트:순위} 딕셔너리 가져오기
# dy1_ptp = zhpk.load("./modules/eda/statics_result/pickles/" + channel + "_y1_ptp_list.pickle")
# text_rank_dct = zhpk.load("./modules/eda/statics_result/pickles/" + channel + "_text_rank_dct.pickle")
#
# # DB에서 대상 피처 가져오기 Pandas DataFrame 만들기
# # target_ds에 freq_rank 붙이기기
# target_ds = dbh.find_item({"work_group_no": work_group_no, "work_no": work_no}, db_name="whateverdot", collection_name="docs")
# zhpd = ZHPandas()
# text_list = []
# ptp_list = []
# for d in target_ds:
# text_list.append(d["text"])
# ptp_list.append(d["ptp"])
#
# target_ds = zhpd.create_data_frame_to_dict({
# "text": text_list,
# "ptp": ptp_list
# })
#
# target_rank_list = []
# for i in range(len(target_ds)):
# text = target_ds.loc[i, "text"]
# target_rank_list.append(1 if text_rank_dct.get(text) is None else text_rank_dct[text])
#
# df_rank_list = pd.DataFrame(target_rank_list, columns=["freq_rank"])
# df_added_rank = pd.concat([target_ds, df_rank_list], axis=1)
#
# # 본문 모 태그 경로로 거르기 Pandas ["text", "ptp"]
# result = df_added_rank.query("ptp in " + str(dy1_ptp))
#
# # freq_nav = 200
# # freq_jna = 4
#
# result = result[result.freq_rank <= 4].reset_index()
#
# data_list = []
# for index in range(len(result)):
# print(result.loc[index, ["text", "ptp"]])
# data_list.append({"work_group_no": work_group_no, "work_no": work_no, "text": result.loc[index, "text"]})
#
# dbh.insert_item_many(data_list, db_name="whateverdot", collection_name="contents")
# print("Inserted {} - {} contents".format(channel, len(data_list)))
# if __name__ == "__main__":
#
# dbh = DBHandler()
# zhpd = ZHPandas()
# collector = Collector()
#
# target_ds = dbh.find_item({"work_group_no": 9, "work_no": 49},
# db_name="whateverdot", collection_name="docs")
# text_list = []
# ptp_list = []
# for d in target_ds:
# text_list.append(d["text"])
# ptp_list.append(d["ptp"])
#
# target_ds = zhpd.create_data_frame_to_dict({
# "text": text_list,
# "ptp": ptp_list
# })
# print(target_ds.head())
#
# zhpk = ZHPickle()
# target_rank_list = []
# dy1_ptp = zhpk.load("../eda/statics_result/pickles/nav_y1_ptp_list.pickle")
#
# text_rank_dct = zhpk.load("../eda/statics_result/pickles/nav_text_rank_dct.pickle")
# for i in range(len(target_ds)):
# text = target_ds.loc[i, "text"]
# target_rank_list.append(1 if text_rank_dct.get(text) is None else text_rank_dct[text])
#
# df_rank_list = pd.DataFrame(target_rank_list, columns=["freq_rank"])
# df_added_rank = pd.concat([target_ds, df_rank_list], axis=1)
#
# # 본문 모 태그 경로로 거르기 Pandas ["text", "ptp"]
# result = df_added_rank.query("ptp in " + str(dy1_ptp))
# result = result[result.freq_rank <= 200].reset_index()
#
# data_list = []
# for index in range(len(result)):
# data_list.append({"work_group_no": 9, "work_no": 49, "text": result.loc[index, "text"]})
#
# dbh.insert_item_many(data_list, db_name="whateverdot", collection_name="contents")
#
#
#
#
#
#
#
# {"work_group_no": work_group_no, "work_no": work_no, "text": result.loc[index, "text"]}
# print(result.text)
# collector.migrate_labeling_data()
# work_list2 = []
# work3 = {
# "channel": "nav",
# "keyword": "코로나_백신",
# "start_dt": "2021-01-01",
# "end_dt": "2021-01-03",
# "work_type": "extract_feature",
# "work_group_no": 9,
# "work_no": 49
# }
#
# work_list2.append(work3)
#
# collector = Collector()
# collector.extract_features(work_list2)
# time.sleep(30)
#
# work_list2 = []
# work2 = {}
# work2["channel"] = "nav"
# work2["keyword"] = "코로나_백신"
# work2["start_dt"] = "2021-01-01"
# work2["end_dt"] = "2021-01-03"
# work2["work_type"] = "collect_doc"
# work2["work_group_no"] = "11"
# work2["work_no"] = "100"
# work_list2.append(work2)
#
# work3 = {}
# work3["channel"] = "jna"
# work3["keyword"] = "코로나_백신"
# work3["start_dt"] = "2021-01-01"
# work3["end_dt"] = "2021-01-03"
# work3["work_type"] = "collect_doc"
# work3["work_group_no"] = "12"
# work3["work_no"] = "100"
# work_list2.append(work3)
#
# collector.collect_docs(work_list2)
|
[
"zhwan85@gmail.com"
] |
zhwan85@gmail.com
|
9939a9df96b6bbddda388438ccb089d2b008617b
|
8dfd4877abd96bfa5f03aa9231bd68782b0424fd
|
/entertainment.py
|
1c030cc08ef615a52ac1599823bac87a132c76a6
|
[] |
no_license
|
aditya6jvas/Udacity-Trailer-website
|
b15a966c823c9e9e05ffe50c36132e593cb619f8
|
aba4533e76c2c273fbe44fd930accf3b555be8fe
|
refs/heads/master
| 2021-01-13T16:48:43.314776
| 2017-06-23T18:20:15
| 2017-06-23T18:20:15
| 95,019,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,336
|
py
|
import mvie
import fresh_tomatoes
# mvie.Movie(Title,Plot,Poster Link,youtube link)
# First Movie
usual_suspects = mvie.Movie("The Usual Suspects",
"Five criminals meet during a routine police line-\
up.Upon their release, they plan to pull off a dangerous\
heist involving precious emeralds worth three million dollars.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BYTViNjMyNmUtNDFkNC00ZDRlLThmMDUtZDU2YWE4NGI2ZjVmXkEyXkFqcGdeQXVyNjU0OTQ0OTY@._V1_UX182_CR0,0,182,268_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=oiXdPolca5w")
# Second Movie
sixth_sense = mvie.Movie("Sixth Sense",
"Child psychologist, starts treating a young boy who acts\
as a medium of communication between Crowe and a slew of unhappy spirits.",
"http://www.impawards.com/1999/posters/sixth_sense_ver1.jpg", # noqa
"https://www.youtube.com/watch?v=VG9AGf66tXM")
# Third Movie
perks_ob_wallflower = mvie.Movie("The Perks of being wallflower",
"Charlie, a 15-year-old introverted\
bright Pittsburgh boy, enters high school\
and is nervous about his new life.\
He is befriended by his seniors who\
show him the way to the real world.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMzIxOTQyODU1OV5BMl5BanBnXkFtZTcwMDQ4Mjg4Nw@@._V1_UX182_CR0,0,182,268_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=XO3-PumyjoI")
# Fourth Movie
fight_club = mvie.Movie("Fight Club",
"Discontented with his capitalistic lifestyle, a\
white-collared insomniac forms an underground fight club with Tyler, a \
careless soap salesman.\
The project soon spirals down into something sinister.",
"http://www.gstatic.com/tv/thumb/movieposters/23069/p23069_p_v8_ad.jpg", # noqa
"https://www.youtube.com/watch?v=BdJKm16Co6M")
# Fifth Movie
pursuit_of_happiness = mvie.Movie("The Pursuit of Happiness",
"Chris Gardner takes up an unpaid internship in\
a brokerage firm after he loses his life's earnings selling a product he\
invested in. His wife leaves him and he is left with the custody of his son.",
"http://www.gstatic.com/tv/thumb/movieposters/162523/p162523_p_v8_ad.jpg", # noqa
"https://www.youtube.com/watch?v=89Kq8SDyvfg")
# Sixth Movie
wolf_of_wallstreet = mvie.Movie("The wolf og wall street",
"Introduced to life in the fast lane through\
stockbroking, Jordan Belfort takes a hit after a Wall Street crash.\
He teams up with Donnie Azoff, cheating his way to the\
top as his relationships slide.",
"http://sociologylegacy.pbworks.com/f/1426548534/6738.jpg", # noqa
"https://www.youtube.com/watch?v=idAVRvQeYAE")
movies = [usual_suspects,
sixth_sense,
perks_ob_wallflower,
fight_club,
pursuit_of_happiness,
wolf_of_wallstreet] # List of all movies
fresh_tomatoes.open_movies_page(movies) # Link to fresh_tomatoes file
|
[
"noreply@github.com"
] |
aditya6jvas.noreply@github.com
|
737b34e8ff42a42d8849a58daf7968e491fef8c3
|
15e49badc73de83095d85400a2c5c8be41ea01b9
|
/workspace-2/Ex-2/ex2/eval_space.py
|
f5e43c871a877d1fa88371421b1dba52c1445551
|
[] |
no_license
|
vishalkr92/NLP
|
5452bd1a79bf12ba64b827465e93a0e081657368
|
1ddecd095eb340c21b43b73d5708604d3259812b
|
refs/heads/master
| 2020-03-08T11:47:32.893096
| 2017-06-11T00:42:55
| 2017-06-11T00:42:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
#!/usr/bin/env python
from __future__ import division
import sys
from collections import defaultdict
tot1 = match = tot2 = 0
for line1, line2 in zip(open(sys.argv[1]), open(sys.argv[2])):
words1 = line1.split()
words2 = line2.split()
counts1 = defaultdict(int)
counts2 = defaultdict(int)
for word in words1:
counts1[word] += 1
for word in words2:
counts2[word] += 1
for word in counts1:
tot1 += counts1[word]
match += min(counts1[word], counts2[word])
for word in counts2:
tot2 += counts2[word]
print "recall= %.3f precision= %.3f F1= %.3f" % (match / tot1,
match / tot2,
2*match / (tot1+tot2) )
|
[
"chattepu@oregonstate.edu"
] |
chattepu@oregonstate.edu
|
5941093a6396fbedbe78998fc2240dbf6e599f76
|
4770c6f3aa2e7f2f861ddb65c87eaed3080e273a
|
/BoostedProducer/crab/config/QCD_Pt_170to300_TuneCUETP8M1_13TeV_pythia8.py
|
78f205b331cf60868c06d8f49d49445b27bafbcb
|
[] |
no_license
|
skudella/BoostedTTH
|
e62a6f0def7cb89d74405d9ce336ccb8641d2952
|
594abe59f50bee45411970d1848f4b935fc29aea
|
refs/heads/CMSSW_8_0_8
| 2020-12-28T09:27:03.643477
| 2016-08-16T08:03:54
| 2016-08-16T08:03:54
| 65,825,294
| 0
| 0
| null | 2016-08-16T14:05:12
| 2016-08-16T14:05:12
| null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'QCD_Pt_170to300_TuneCUETP8M1_13TeV_pythia8'
config.General.workArea = 'crab_projects'
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = '/nfs/dust/cms/user/shwillia/AddHiggsTagger/CMSSW_7_4_6_patch6/src/BoostedTTH/BoostedProducer/test/boostedProducer_cfg.py'
config.JobType.outputFiles = ['BoostedTTH_MiniAOD.root']
config.section_("Data")
config.Data.inputDataset = '/QCD_Pt_170to300_TuneCUETP8M1_13TeV_pythia8/RunIISpring15DR74-Asympt25ns_MCRUN2_74_V9-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.publication = False
#config.Data.totalUnits = 5
config.Data.publishDBS = 'phys03'
config.Data.publishDataName = 'BoostedTTH_MiniAOD'
config.section_("Site")
config.Site.storageSite = 'T2_DE_DESY'
|
[
"shawn.williamson@kit.edu"
] |
shawn.williamson@kit.edu
|
1fb2085c8a73bd8f6f079618abbb96103995372c
|
49061a53c82687f1ba506c4b8bb17d27fdf51ab5
|
/btre/urls.py
|
42dd23968184d48b83bea0460e5f02402e3e73c4
|
[] |
no_license
|
88tuts/btre_project
|
6c0aa138bf7f8c0589a22760db4f5f5004556844
|
23644f0f5b7b868e6a398ddeb256233e0f4bab06
|
refs/heads/master
| 2020-08-10T14:35:27.076558
| 2019-10-12T15:52:59
| 2019-10-12T15:52:59
| 214,361,018
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('pages.urls')),
path('listings/', include('listings.urls')),
path('accounts/', include('accounts.urls')),
path('contacts/', include('contacts.urls')),
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"88tuts@gmail.com"
] |
88tuts@gmail.com
|
373319cf154006e1ed50f343d88ca46f56004103
|
4b653379f3d9a3493004605df2ccf05df188c6c2
|
/set2/ch09.py
|
f0febc69afc36612d04aeea11be8a394f5ea210b
|
[] |
no_license
|
mohamed-aziz/cryptopals
|
076755cc75afbe61ade9b76e98dc47b923ebf4ce
|
71a340c1645a1a3466391fb997982f9cfd7437bf
|
refs/heads/master
| 2021-05-07T08:56:18.964338
| 2019-12-07T20:09:59
| 2019-12-07T20:09:59
| 109,444,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
def paddpkcs7(s, n=16):
s = str(s)
char = ((n - len(s)) % n) or n
return s + char * chr(char)
|
[
"medazizknani@gmail.com"
] |
medazizknani@gmail.com
|
fda95dbe3b911f374bef78d6ff5916c46e598e45
|
88283b3b03993c3e6f3af58442768f7908b596b9
|
/trust_production/models.py
|
b45ac0937219f071e8b13d454026755c3c677b84
|
[
"MIT"
] |
permissive
|
KenjiMoreno/otree_trust_example
|
da9e48a7b20f85af23d3dbe21cd23dd39a27a907
|
cee815806668f8562034eb1a52f0353574f517bd
|
refs/heads/master
| 2022-12-07T13:01:56.320561
| 2020-08-19T22:12:08
| 2020-08-19T22:12:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
doc = """
This is a standard 2-player trust game where the amount sent by player 1 gets
tripled. The trust game was first proposed by
<a href="http://econweb.ucsd.edu/~jandreon/Econ264/papers/Berg%20et%20al%20GEB%201995.pdf" target="_blank">
Berg, Dickhaut, and McCabe (1995)
</a>.
"""
class Constants(BaseConstants):
name_in_url = 'trust_prod'
players_per_group = 2
num_rounds = 2
instructions_template = 'trust_production/instructions.html'
instructions_button = "trust_production/Instructions_Button.html"
contact_template = "initial_quiz/Contactenos.html"
# Initial amount allocated to each player
endowment = c(100)
multiplier = 3
n_rounds = num_rounds # for using it on the instructions in initial quiz
class Subsession(BaseSubsession):
def vars_for_admin_report(self):
sent_amounts = [
g.sent_amount for g in self.get_groups() if g.sent_amount != None
]
sent_back_amounts = [
g.sent_back_amount for g in self.get_groups() if g.sent_back_amount != None
]
if sent_amounts and sent_back_amounts:
return dict(
avg_sent_amount=sum(sent_amounts) / len(sent_amounts),
range_sent_amount = str([int(min(sent_amounts)), int(max(sent_amounts))]) + " puntos",
avg_sent_back_amount=sum(sent_back_amounts) / len(sent_back_amounts),
range_sent_back_amount = str([int(min(sent_back_amounts)), int(max(sent_back_amounts))]) + " puntos"
)
else:
return dict(
avg_sent_amount='(no data)',
range_sent_amount='(no data)',
avg_sent_back_amount='(no data)',
range_sent_back_amount='(no data)',
)
class Group(BaseGroup):
sent_amount = models.CurrencyField(
min=0, max=Constants.endowment, doc="""Amount sent by P1"""
)
sent_back_amount = models.CurrencyField(doc="""Amount sent back by P2""", min=c(0))
def sent_back_amount_max(self):
return self.sent_amount * Constants.multiplier
def set_payoffs(self):
p1 = self.get_player_by_id(1)
p2 = self.get_player_by_id(2)
p1.payoff = Constants.endowment - self.sent_amount + self.sent_back_amount
p2.payoff = self.sent_amount * Constants.multiplier - self.sent_back_amount
class Player(BasePlayer):
def role(self):
return {1: 'A', 2: 'B'}[self.id_in_group]
|
[
"a20141676@pucp.pe"
] |
a20141676@pucp.pe
|
5fea4201e6dd6434e660e167ed3e389c643f7bbb
|
3fbb03e707435c9c4c77ecac8861de68f083db12
|
/manage.py
|
4bbca4be9b1028a906b0148bd3ab77a14cb5093e
|
[] |
no_license
|
Renata77/nhfpc_v2
|
19a1a10ff61989b7af635e8cc918c40bbf7ba450
|
a90f5a8d9718dffc77ed4eeea8fcf013bf323e7a
|
refs/heads/master
| 2021-08-19T01:24:44.412453
| 2017-11-24T10:08:15
| 2017-11-24T10:08:28
| 111,884,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nhfpc_v2.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"yq_lv@outlook.com"
] |
yq_lv@outlook.com
|
6a4167fc2b2971c71a90f7c81342dcc470001773
|
99b08c082acf651799e2bbc7c8c7678d01a6b850
|
/esque/clients.py
|
af69cdc2c339f537f0cc0e24f7f47e5148dbef63
|
[
"MIT"
] |
permissive
|
Bibob7/esque
|
3324d8e6620093aac8df8bb6eb9fd1c3eedcedc7
|
759a244bde453020690e50e418bed93a91988a23
|
refs/heads/master
| 2020-05-28T10:28:30.693391
| 2019-05-17T13:04:50
| 2019-05-17T13:04:50
| 188,970,189
| 0
| 0
| null | 2019-05-28T06:45:14
| 2019-05-28T06:45:14
| null |
UTF-8
|
Python
| false
| false
| 2,336
|
py
|
from typing import Optional, Tuple
import click
import confluent_kafka
import pendulum
from confluent_kafka import TopicPartition
from esque.config import Config, PING_GROUP_ID, PING_TOPIC
from esque.errors import raise_for_kafka_error, raise_for_message
from esque.helpers import delivery_callback, delta_t
DEFAULT_RETENTION_MS = 7 * 24 * 60 * 60 * 1000
class Consumer:
def __init__(self):
self._config = Config().create_confluent_config()
self._config.update(
{
"group.id": PING_GROUP_ID,
"error_cb": raise_for_kafka_error,
# We need to commit offsets manually once we"re sure it got saved
# to the sink
"enable.auto.commit": True,
"enable.partition.eof": False,
# We need this to start at the last committed offset instead of the
# latest when subscribing for the first time
"default.topic.config": {"auto.offset.reset": "latest"},
}
)
self._consumer = confluent_kafka.Consumer(self._config)
self._assign_exact_partitions(PING_TOPIC)
def consume_ping(self) -> Optional[Tuple[str, int]]:
msg = self._consumer.consume(timeout=10)[0]
raise_for_message(msg)
msg_sent_at = pendulum.from_timestamp(float(msg.value()))
delta_sent = pendulum.now() - msg_sent_at
return msg.key(), delta_sent.microseconds / 1000
def _assign_exact_partitions(self, topic: str) -> None:
self._consumer.assign([TopicPartition(topic=topic, partition=0, offset=0)])
class Producer:
def __init__(self):
self._config = Config().create_confluent_config()
self._config.update(
{"on_delivery": delivery_callback, "error_cb": raise_for_kafka_error}
)
self._producer = confluent_kafka.Producer(self._config)
def produce_ping(self):
start = pendulum.now()
self._producer.produce(
topic=PING_TOPIC, key=str(0), value=str(pendulum.now().timestamp())
)
while True:
left_messages = self._producer.flush(1)
if left_messages == 0:
break
click.echo(
f"{delta_t(start)} | Still {left_messages} messages left, flushing..."
)
|
[
"jannik.hoffjann@real-digital.de"
] |
jannik.hoffjann@real-digital.de
|
ab80d0a8e6f5b52411767cfc9ad0aa34dc2b9eb0
|
30606312289c221030be643f75f51562c5f8f090
|
/assignment4.py
|
79a4637bf0c5dcf3c892339054669cca3efd9910
|
[] |
no_license
|
keerthiradelli/assignment-4
|
39dea4c8ab2966b8fa423d51dd960fb0396f05e3
|
9cfedf46d80655c4ed7d0aede3820e3f22516220
|
refs/heads/master
| 2020-06-05T09:31:24.109102
| 2019-06-17T18:01:36
| 2019-06-17T18:01:36
| 192,393,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,475
|
py
|
Python 3.6.7 (v3.6.7:6ec5cf24b7, Oct 20 2018, 13:35:33) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> Python 3.6.7 (v3.6.7:6ec5cf24b7, Oct 20 2018, 13:35:33) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import numpy
>>> from keras.datasets import mnist
Using TensorFlow backend.
>>> from keras.models import Sequential
>>> from keras.layers import Dense
>>> from keras.layers import Dropout
>>> from keras.layers import Flatten
>>> from keras.layers.convolutional import Conv2D
>>> from keras.layers.convolutional import MaxPooling2D
>>> from keras.utils import np_utils
>>> from keras import backend as K
>>> K.set_image_dim_ordering('th')
>>> # fix random seed for reproducibility
... seed = 7
>>> numpy.random.seed(seed)
>>> # load data
... (X_train, y_train), (X_test, y_test) = mnist.load_data()
>>> # reshape to be [samples][pixels][width][height]
... X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
>>> X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
>>> # normalize inputs from 0-255 to 0-1
... X_train = X_train / 255
>>> X_test = X_test / 255
>>> # one hot encode outputs
... y_train = np_utils.to_categorical(y_train)
>>> y_test = np_utils.to_categorical(y_test)
>>> num_classes = y_test.shape[1]
>>> def baseline_model():
... # create model
... model = Sequential()
... model.add(Conv2D(32, (5, 5), input_shape=(1, 28, 28), activation='relu'))
... model.add(MaxPooling2D(pool_size=(2, 2)))
... model.add(Dropout(0.2))
... model.add(Flatten())
... model.add(Dense(128, activation='relu'))
... model.add(Dense(128, activation='relu'))
... model.add(Dense(num_classes, activation='softmax'))
... # Compile model
... model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
... return model
...
>>> # build the model
... model = baseline_model()
WARNING:tensorflow:From C:\Users\Keerthi\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
2019-06-17 23:04:55.217456: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
WARNING:tensorflow:From C:\Users\Keerthi\AppData\Local\Programs\Python\Python36\lib\site-packages\keras\backend\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
>>> # Fit the model
... model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)
WARNING:tensorflow:From C:\Users\Keerthi\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Train on 60000 samples, validate on 10000 samples
Epoch 1/10
- 52s - loss: 0.2448 - acc: 0.9289 - val_loss: 0.0799 - val_acc: 0.9754
Epoch 2/10
- 51s - loss: 0.0696 - acc: 0.9789 - val_loss: 0.0540 - val_acc: 0.9829
Epoch 3/10
- 51s - loss: 0.0485 - acc: 0.9848 - val_loss: 0.0446 - val_acc: 0.9857
Epoch 4/10
- 51s - loss: 0.0367 - acc: 0.9887 - val_loss: 0.0388 - val_acc: 0.9874
Epoch 5/10
- 58s - loss: 0.0297 - acc: 0.9908 - val_loss: 0.0384 - val_acc: 0.9870
Epoch 6/10
- 55s - loss: 0.0249 - acc: 0.9918 - val_loss: 0.0358 - val_acc: 0.9883
Epoch 7/10
- 56s - loss: 0.0209 - acc: 0.9933 - val_loss: 0.0283 - val_acc: 0.9905
Epoch 8/10
- 55s - loss: 0.0164 - acc: 0.9945 - val_loss: 0.0323 - val_acc: 0.9900
Epoch 9/10
- 57s - loss: 0.0153 - acc: 0.9948 - val_loss: 0.0333 - val_acc: 0.9894
Epoch 10/10
- 54s - loss: 0.0129 - acc: 0.9955 - val_loss: 0.0340 - val_acc: 0.9887
<keras.callbacks.History object at 0x0000021F3FA1D780>
>>> # Final evaluation of the model
... scores = model.evaluate(X_test, y_test, verbose=0)
>>> print("CNN Error: %.2f%%" % (100-scores[1]*100))
CNN Error: 1.13%
>>>
|
[
"noreply@github.com"
] |
keerthiradelli.noreply@github.com
|
ca5f582568cb95a608ee1042b2354ed50a7f7cd7
|
a8ffa5c777b56a339bd23e35ddce40bbe1bb0fa5
|
/q56_circle.py
|
f49cbbbc46c3322ba29ba17d803fe403c03c054a
|
[] |
no_license
|
rbmiao/python_tutorials
|
b9ae455cdd2d874751d29b90541b0d301b5e7dc7
|
53eaa85a965fa9d9a217a20f17e48bcfbc929759
|
refs/heads/master
| 2020-05-20T07:57:55.041901
| 2019-05-07T19:20:57
| 2019-05-07T19:20:57
| 185,463,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == '__main__':
from Tkinter import *
canvas = Canvas(width=800, height=600, bg='yellow')
canvas.pack(expand=YES, fill=BOTH)
k = 1
j = 1
for i in range(0,26):
canvas.create_oval(310 - k,250 - k,310 + k,250 + k, width=1)
k += j
j += 0.3
mainloop()
|
[
"rbmiao@gmail.com"
] |
rbmiao@gmail.com
|
44d0c455fe11971444f813e7f51388585af62ee3
|
32cf24b3865bfe5a11f8cf2c0a8902f8e1be9e67
|
/switch/clapper/clapper.py
|
443b8427743ebe7a3365b065efc559701747a769
|
[] |
no_license
|
mayhem/hippomanor-automation
|
e98d89c41b7de28530b5993742de88228b2a88f9
|
b81dbb1011119947b0261399dc986f0b474122d4
|
refs/heads/master
| 2023-05-24T23:48:04.802177
| 2021-02-04T20:17:59
| 2021-02-04T20:17:59
| 162,196,327
| 0
| 0
| null | 2023-05-22T21:46:36
| 2018-12-17T22:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 5,196
|
py
|
from math import fmod, fabs
from utime import sleep_ms as sleep, ticks_ms as ticks
import ubinascii
import machine
import network
import json
from umqtt.simple import MQTTClient
from machine import I2C
from neopixel import NeoPixel
import net_config
import _config as config
CLIENT_ID = ubinascii.hexlify(machine.unique_id())
# modified
COMMAND_TOPIC = b"lips/command"
#COMMAND_TOPIC = b"home/%s/set" % config.NODE_ID
STATE_TOPIC = b"home/%s/state" % config.NODE_ID
DISCOVER_TOPIC = b"homeassistant/switch/%s/config" % config.NODE_ID
REDISCOVER_TOPIC = b"rediscover"
LED_PIN = 14
# Global object handle
cl = None
def handle_message(topic, msg):
cl.handle_message(topic, msg)
class ServiceObject(object):
FUCK_IT_DROP_EVERYTHING = 850
def __init__(self):
self.client = MQTTClient(CLIENT_ID, net_config.MQTT_SERVER, net_config.MQTT_PORT)
self.np = NeoPixel(machine.Pin(LED_PIN, machine.Pin.OUT), 1)
self.sensor_total = 0
self.sensor_count = 0
self.sensor_floor = 0
self.train_count = 0
self.led_off_time = 0
self.cool_off_time = 0
self.buffer = []
self.buffer_size = 6 # samples
self.peaks = []
self.states = [ False, False ]
def handle_message(self, topic, msg):
pass
# if topic == REDISCOVER_TOPIC:
# self.send_discover_msg()
def add_or_replace_sample(self, value):
self.buffer.append(value)
while len(self.buffer) > self.buffer_size:
self.buffer.pop(0)
def calculate_buffer_stats(self):
""" Returns a tuple (total energy, avg energy) """
total = 0
for sample in self.buffer:
total += sample
return (total, float(total) / len(self.buffer))
def set_color(self, red, green, blue):
self.np[0] = (red, green, blue)
self.np.write()
def clear(self):
self.set_color(0,0,0)
def clear_state(self):
self.peaks = []
self.set_color(2, 2, 2)
sleep(25)
self.clear()
sleep(150)
self.clear()
def send_discover_msg(self):
self.client.publish(DISCOVER_TOPIC,
json.dumps({
"command_topic": COMMAND_TOPIC,
"name" : config.NODE_NAME,
}))
def setup(self):
for i in range(5):
self.set_color(128, 60, 0)
sleep(100)
self.set_color(128, 0, 128)
sleep(100)
self.clear()
ap_if = network.WLAN(network.AP_IF)
ap_if.active(False)
sta_if = network.WLAN(network.STA_IF)
sta_if.active(True)
sta_if.connect(net_config.WIFI_SSID, net_config.WIFI_PASSWORD)
print("connecting to wifi....")
led = 0
while not sta_if.isconnected():
if led:
self.set_color(16, 0, 16)
else:
self.clear()
led = not led
sleep(200)
print("Connected with IP", sta_if.ifconfig()[0])
self.clear()
self.client.set_callback(handle_message)
self.client.connect()
# self.send_discover_msg()
def loop(self):
if ticks() >= self.led_off_time:
self.led_off_time = 0
self.clear()
if ticks() < self.cool_off_time:
return
try:
sensor_value = machine.ADC(0).read()
except OSError as err:
print("Cannot read sensor:", err)
return
self.sensor_total += sensor_value
self.sensor_count += 1
if self.sensor_count == 1000:
self.sensor_floor = self.sensor_total / self.sensor_count
#print("F ", self.sensor_floor)
self.sensor_count = 0
self.sensor_total = 0
if self.sensor_floor == 0:
return
sensor_value = sensor_value - self.sensor_floor
self.add_or_replace_sample(sensor_value)
total, avg = self.calculate_buffer_stats()
if total > 10:
self.peaks.append(ticks())
if len(self.peaks) == 1:
self.set_color(16, 0, 0)
elif len(self.peaks) == 2:
self.set_color(0, 16, 0)
elif len(self.peaks) == 3:
self.set_color(0, 0, 16)
else:
self.clear_state()
return
self.buffer = []
sleep(150)
self.clear()
if self.peaks:
# check for the clock wrapping round
if self.peaks[-1] > ticks():
print("Clock wrap detected!")
self.peaks = []
self.clear()
if self.peaks[-1] + self.FUCK_IT_DROP_EVERYTHING <= ticks():
if len(self.peaks) == 2:
self.states[0] = not self.states[0]
self.client.publish(COMMAND_TOPIC, "TOGGLE")
if len(self.peaks) == 3:
self.client.publish(COMMAND_TOPIC, "MODE")
self.clear_state()
if __name__ == "__main__":
cl = ServiceObject()
cl.setup()
while True:
cl.loop()
|
[
"rob@musicbrainz.org"
] |
rob@musicbrainz.org
|
9331b0babc7068e6005a3257b9c921948c944d2d
|
a86a216665c29e957e05600a1be07110d1b2cda6
|
/journey/blog/migrations/0001_initial.py
|
70508b9107bb33880bc6b163852409aed2130d79
|
[
"MIT"
] |
permissive
|
jfondeur/vaccine
|
2f6e8aad5c10be49285ca8801111c0c4b6927db8
|
fba4b4980803073f8e1934331df6032bdf79f090
|
refs/heads/main
| 2023-01-22T05:38:24.802359
| 2020-11-27T20:58:31
| 2020-11-27T20:58:31
| 315,332,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
# Generated by Django 3.0.8 on 2020-07-16 23:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"jean@fondeurs.com"
] |
jean@fondeurs.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.