blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a54e6ee53269fe07a76bb0cdfd2d888cd08d3167
|
ede3dcfe6093a6c726949832ca5be6d3883c5945
|
/Milestone2/hogwild2/tests/test_svm.py
|
70a28cc4d2f8200d5f3318a7e6e38dd310f2069f
|
[] |
no_license
|
JustineWeb/project_systems
|
26cb5236ecbbe79fdae51fd7dc1c3e7bc93e05cb
|
c579d2a749bfab79bbcdd2a9a46479a39401bb08
|
refs/heads/master
| 2022-01-06T22:45:14.463467
| 2019-05-16T12:06:14
| 2019-05-16T12:06:14
| 177,611,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
from hogwild.svm import SVM
mock_data = [{1: 0.1, 2: 0.2},
{1: 0.2, 4: 0.9},
{3: 0.9, 8: 1},
{4: 0.4, 5: 0.7}]
mock_labels = [1, -1, 1, -1]
mock_delta_w = {1: 0.01, 2: 0.02, 3: 0.03, 4: 0.04, 5: 0.05, 6: 0, 7: 0, 8: 0.08}
def test_fit():
svm = SVM(1, 1e-5, 9)
expected_result = {1: -0.100001,
2: 0.2,
3: 0.9,
4: -1.29999199999,
5: -0.6999909999899999,
8: 1.0}
result = svm.fit(mock_data, mock_labels)
assert expected_result == result
def test_predict():
svm = SVM(1, 1e-5, 9)
svm.fit(mock_data, mock_labels)
expected_result = [1]
result = svm.predict([{2: 0.8, 3: 0.9}])
assert expected_result == result
|
[
"alexis.mermet@epfl.ch"
] |
alexis.mermet@epfl.ch
|
14968dd918294b12c09c5485726652af73463636
|
580d8c8ee860ea8d6c522fd943b37f37a6a31712
|
/Week1/05-aa-nt-converter.py
|
563953c5a4c3f48005902efabb19f344c9fc58f3
|
[] |
no_license
|
charmquark1/cmdb-lab
|
4e485f973eec2f5473760722dc1bd0401485fb76
|
3a82fe708f42f9d13493cb99f580a5587881a125
|
refs/heads/master
| 2021-01-11T19:34:54.462104
| 2016-12-19T14:54:42
| 2016-12-19T14:54:42
| 68,961,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
#!/usr/bin/env python
"""
Parse amino acid sequence in FASTA file.
Convert amino acid to 3 nucleotide seq, append to variable 'new'
When you see a -, replace with ---
Print gene id (ident), converted seq, new line \n, print new seq
Usage: xxx.py 04-AA.fa 02-nt.fa
"""
import fasta_fixed
import sys
import itertools
#inputs: 1) amino acid FASTA, 2) original nt FASTA
AA_query = open(sys.argv[1])
nt_query = open(sys.argv[2])
#prepare inputs for parallel parsing
AA_seq = []
nt_seq = []
for ident, sequence in fasta_fixed.FASTAReader(AA_query):
AA_seq.append(sequence)
for ident1, sequence in fasta_fixed.FASTAReader(nt_query):
nt_seq.append(sequence)
# parse parallel
# read ith element of aa sequence. If not "-", then take three first elements from nt_seq file and add to empty string, new
# at the end of the gene, append string new to list. Then restart for loop for next gene.
# I made list to make it easier to format for later.
list=[]
for aa, nt in itertools.izip(AA_seq, nt_seq):
new = ''
nt_pos = 0
for i in range(0, len(aa)):
if aa[i] == '-':
new = new + ("---")
else:
codon = nt[nt_pos:nt_pos+3] #take 3 characters
new = new + codon
nt_pos = nt_pos + 3
#print new
list.append(new)
print ">x\n" +"\n>x\n".join(list)
|
[
"ninarao42@gmail.com"
] |
ninarao42@gmail.com
|
8a961656b96feb84ed09e52341f6db90ae7faeee
|
30272f4069293049848369f674ff7a8e88e30ac9
|
/PowerSpectrumFunctions.py
|
8e19c74085abafde3847571c977825abf8d71e79
|
[] |
no_license
|
AstroJames/anisoReconstruct
|
e40121fdfdf0e90575c210c20a3ae471a46fa558
|
9b79fd78eb47e44592a5487bd1df9b81658dd31f
|
refs/heads/master
| 2022-04-06T23:47:07.820106
| 2020-03-02T00:38:31
| 2020-03-02T00:38:31
| 235,967,999
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,628
|
py
|
from header import *
def PowerSpectrum(data,type=None,variance=None):
"""
Calculate the power spectrum.
INPUTS:
----------
data - The 2D image data
n - the coef. of the fourier transform = grid size.
type - either '2D' or 'aziAverage' for 2D or averaged power spectrum calculations.
variance - passed to the azimuthal averaging if you want to calculate the variance of
the average.
OUTPUTS:
----------
Pspec - the 2D power spectrum
k - the 2D k vector array
"""
data = data.astype(float) # make sure the data is float type
# This removes the k = 0 wavevalue and makes integrating easier.
data = data/data.mean() - 1 # Pre-processing following Federrath et al. 2016
ft = (1./ (data.shape[0]*data.shape[1] ) )*fftpack.fft2(data) # 2D fourier transform
ft_c = fftpack.fftshift(ft) # center the transform so k = (0,0) is in the center
PSpec = np.abs(ft_c*np.conjugate(ft_c)) # take the power spectrum
# Take the azimuthal average of the powr spectrum, if required.
if type == 'aziAverage':
if not variance:
PSpec = azimuthalAverage(PSpec)
return data
else:
PSpec, var = azimuthalAverage(PSpec,variance=True)
return PSpec, var
# Create the kx and ky vector components.
kx = np.round(np.linspace( -( PSpec.shape[0] + 1 )/2, ( PSpec.shape[0] + 1 )/2, PSpec.shape[0]))
ky = np.round(np.linspace( -( PSpec.shape[1] + 1 )/2, ( PSpec.shape[1] + 1 )/2, PSpec.shape[1]))
kx, ky = np.meshgrid(kx,ky,indexing="xy")
k = np.hypot(kx,ky)
return PSpec, k
def PowerSpectrumAveraging(files,densOrderd,run):
"""
this functions averages over the power spectrums and returns a dictionary with the averaged
power spectrums in it.
INPUTS:
----------
files - all of the file names for each simulations
densOrdered - the density dictionary ordered by plot order
run - if the function needs to be rerun for recompiling of the density plots
OUTPUTS:
----------
PSpecAver - the average power spectrum as a dictionary, for each of the simulations
PSpecVar - the variance power spectrum as a dictionary, for each of the simulations
"""
if run == 0: # if the power spectrum need to be recompiled.
PSpecAver = {}
PSpecVar = {}
fileCounter = 0
# Average the power spectrum, from 5T to 10T
for iter in xrange(50,101):
#print("Currently on iteration {}".format(iter))
# Load the density files
try:
density = LoadPickles(files,iter)
except IndexError:
#print("Index error, I'm going to break the loop.")
break
plotCounter = 0
for i in xrange(0,5):
for j in xrange(0,4):
if fileCounter == 0:
dens = density[densOrderd[plotCounter]] # column density
PSpec, k = PowerSpectrum(dens) # the power spectrum and wavevector
PSpecAver[densOrderd[plotCounter]] = PSpec # add a new power spectrum to the dictionary
PSpecVar[densOrderd[plotCounter]] = PSpec**2 # for constructing the variance
else:
dens = density[densOrderd[plotCounter]] # column density
PSpec, k = PowerSpectrum(dens) # the power spectrum and wavevector
PSpecAver[densOrderd[plotCounter]] += PSpec # add the power spectrum together
PSpecVar[densOrderd[plotCounter]] += PSpec**2 # for constructing the variance
plotCounter +=1 #update the plot
fileCounter +=1 #update the file
# Average the power spectrum and take the log10 transform
for key in PSpecAver.keys():
PSpecAver[key] = PSpecAver[key]/fileCounter
PSpecVar[key] = (PSpecVar[key]/fileCounter - PSpecAver[key]**2)**0.5
save_obj(PSpecAver,"AveragePSpec")
save_obj(PSpecVar,"StdPSpec")
else:
PSpecAver = load_obj("AveragePSpec.pkl")
PSpecVar = load_obj("StdPSpec.pkl")
return PSpecAver, PSpecVar
def calculateIsoVar(PowerSpectrum,k,var2D):
"""
Assuming isotropy of the kz, this function calculates R = sigma^2_2 / sigma^2_3
INPUTS:
------------------------------------------------------------------------------------------
PowerSpectrum - the 2D power spectrum.
k - the k wavevector as a 2D grid.
var2D - the varianace of the 2D column density.
OUTPUTS:
------------------------------------------------------------------------------------------
R - the ratio between the 2D and 3D variance
var3D - the estimated 3D variance
"""
# Differentials for integration
dkx = k[0,0]-k[0,1]
dky = k[0,0]-k[1,0]
dk = np.hypot(dkx,dky)
# Calculate the integrals over the 2D and 3D power spectrum, assuming isotropy
P2D = 2* np.pi* sum( sum( PowerSpectrum ) ) * dk
P3D = 4* np.pi* sum( sum( PowerSpectrum * k ) ) * dk
# Calculate R from Brunt et al. 2010, and the 3D variance.
R = P2D / P3D
var3D = var2D / R
return R, var3D
|
[
"jamesbeattie@James-MacBook-Pro.local"
] |
jamesbeattie@James-MacBook-Pro.local
|
caf70606137c0215e3fb64625ec643e1dc3b2668
|
4ee3f1ce9d06815fbefa6c674d1e00fda7c1dec1
|
/exercises.py
|
7faa60fb61740354ef8009359c125e8b8c5f7807
|
[] |
no_license
|
Seal125/binary_tree_basic
|
79bce998c34c56bbe938784f2be8048da66206d6
|
957df46130f8c0e68bb8cf7145d0d01aee60e34f
|
refs/heads/master
| 2021-04-17T11:05:05.827069
| 2020-03-23T14:47:25
| 2020-03-23T14:47:25
| 249,440,206
| 0
| 0
| null | 2020-03-23T13:35:43
| 2020-03-23T13:35:43
| null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
class Node:
def __init__(self, value = None):
self.value = value
self.left = None
self.right = None
def inorder(root):
values = []
def add(node):
if node:
add(node.left)
list.append(node.value)
add(node.right)
add(root)
return values
def is_unival_tree(tree):
value = tree.value
is_unival= True
def add(node):
if node:
add(node.left)
if node.value != value:
is_unival = False
return is_unival
add(node.right)
|
[
"stephaniesmith12514@gmail.com"
] |
stephaniesmith12514@gmail.com
|
12f5a26fff930d6da4d651c4f4dd72d554d44fc2
|
b40fe0bd5f7678926baabdff48df9cff8ec673b6
|
/lzyServer/manage.py
|
c78694823ca24da087e46e4630fb555c6131b801
|
[] |
no_license
|
huangzhongkai/lzy
|
9f3c413c68b3d57677c06c2f588289d2b2889577
|
f204294785589173cd11b6363c68590a8fc24dff
|
refs/heads/master
| 2021-10-26T00:45:31.699647
| 2019-04-09T05:19:42
| 2019-04-09T05:19:42
| 79,820,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lzyServer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"huangzhongkai@huangzhongkaideMacBook-Pro.local"
] |
huangzhongkai@huangzhongkaideMacBook-Pro.local
|
8ad21205f4c323d5f5949973e2286fd410352fdf
|
24b4dcd555dd3e644467aec13edd671afdd3f49c
|
/SU2/opt/UQ.py
|
fe95153422af7b38c4541bf5320368b4442f680a
|
[] |
no_license
|
garcgutierrez/adj_sto_su2
|
2c8294b65dcef8faf4bf1f453a413bca429a6751
|
22ec37839ed0a08f5dbe1935d18205f085b28a70
|
refs/heads/master
| 2022-11-16T22:13:59.666403
| 2020-07-14T15:38:22
| 2020-07-14T15:38:22
| 279,776,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
from pylab import *
import chaospy as cp
class UQ(object):
def __init__(self):
self.alpha_dist = cp.Uniform(-0.5,0.5)
self.Ma_dist = cp.Uniform(0.1,0.2)
self.T = cp.Uniform(273, 274)
self.distribution = cp.J(self.alpha_dist, self.Ma_dist)
self.computeQuadrature()
def computeQuadrature(self, nOrder=2, ruleN='C'):
self.absissas, self.weights = cp.generate_quadrature(
order = nOrder, dist=self.distribution, rule=ruleN)
self.Machs = around(array(self.absissas)[1,:],2)
self.AOAs = around(array(self.absissas)[0,:],3)
self.Nquadrature = len(self.Machs)
self.polynomial_expansion = cp.orth_ttr(nOrder, self.distribution)
def computeProperties(self, numArray, debug=True):
if(debug):
print('shape: {}'.format(shape(numArray)))
print('Nq:{}'.format(self.Nquadrature))
print('Variables:{}'.format(numArray))
self.poly_approx = cp.fit_quadrature(
self.polynomial_expansion, self.absissas,
self.weights, numArray)
mean = cp.E(self.poly_approx, self.distribution)
sigma = cp.Std(self.poly_approx, self.distribution)
return mean, sigma
|
[
"garcgutierrez@gmail.com"
] |
garcgutierrez@gmail.com
|
155fa4c41fd0a7c40be7e863303ef3b568645e29
|
faae5e2e431cc55089324510715b5bc91732ff42
|
/DecisionTree.py
|
b874cf8d205050881abc97fc35a0b050a75094f3
|
[] |
no_license
|
sk929/MLLearning
|
ac9e84d9bbf0c8dfa7ad23b8941925320ed8c083
|
ca5a9992b1fc40105a722b447ded6da20db32238
|
refs/heads/master
| 2022-07-20T01:29:05.298884
| 2020-05-26T13:38:25
| 2020-05-26T13:38:25
| 266,844,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,174
|
py
|
# Code you have previously used to load data
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
# Path of the file to read
iowa_file_path = '../input/home-data-for-ml-course/train.csv'
home_data = pd.read_csv(iowa_file_path)
# Create target object and call it y
y = home_data.SalePrice
# Create X
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = home_data[features]
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Specify Model
iowa_model = DecisionTreeRegressor(random_state=1)
# Fit Model
iowa_model.fit(train_X, train_y)
# Make validation predictions and calculate mean absolute error
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)
print("Validation MAE: {:,.0f}".format(val_mae))
def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
model.fit(train_X, train_y)
preds_val = model.predict(val_X)
mae = mean_absolute_error(val_y, preds_val)
return(mae)
candidate_max_leaf_nodes = [5, 25, 50, 100, 250, 500]
# Write loop to find the ideal tree size from candidate_max_leaf_nodes
node = {}
for leaf_nodes in candidate_max_leaf_nodes:
node[leaf_nodes]= get_mae(leaf_nodes, train_X, val_X, train_y, val_y)
# Store the best value of max_leaf_nodes (it will be either 5, 25, 50, 100, 250 or 500)
best_tree_size = min(node,key = node.get)
# Fill in argument to make optimal size and uncomment
final_model = DecisionTreeRegressor(max_leaf_nodes = best_tree_size , random_state=1)
# fit the final model and uncomment the next two lines
final_model.fit(X, y)
'''Random
Forest'''
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
forest_model = RandomForestRegressor(random_state=1)
forest_model.fit(train_X, train_y)
melb_preds = forest_model.predict(val_X)
print(mean_absolute_error(val_y, melb_preds))
|
[
"noreply@github.com"
] |
noreply@github.com
|
1981adb6d51f44d042af9407d1b2ef43e248447e
|
6784941fe6b67b5531a6154becc9d9a641cd64d9
|
/ActualizaDDBB.py
|
c330d03d7fe9392fa24943b1577a64adb505fd50
|
[] |
no_license
|
alexistdk/todo-list
|
07ba52926d94b2c05b8cca0854549cebed6e335b
|
62da1526d57fccc9f8d2c7d255efc1bd7dfe0fe8
|
refs/heads/main
| 2022-12-30T06:15:12.107125
| 2020-10-22T02:05:17
| 2020-10-22T02:05:17
| 213,752,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,411
|
py
|
from datetime import date
from ConectarDDBB import *
class ActualizaDDBB(ConectarDDBB):
@classmethod
def crear_tarea(cls, titulo, descripcion, id_usuario):
try:
db = cls.conexion()
cursor = db.cursor()
fecha = date.today()
cursor.execute(cls.insertar_tarea(), (fecha, titulo, descripcion, 0, id_usuario))
except Error:
print("Error ", Error)
finally:
db.commit()
@classmethod
def existe_tarea(cls, titulo, id_usuario):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.busca_tarea, titulo, id_usuario)
return True
except Error:
print("Error ", Error)
@classmethod
def actualizar_tarea(cls, id_tarea):
try:
db = cls.conexion()
cursor = db.cursor()
descripcion = input("Descripción nueva: ")
cursor.execute(cls.actualizar_descripcion(), (descripcion, id_tarea))
except Error:
print("No existe la tarea!", Error)
finally:
db.commit()
@classmethod
def cambiar_estado(cls, id_tarea):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.actualizar_estado(), (id_tarea, ))
except Error:
print("Error ", Error)
finally:
db.commit()
@classmethod
def listar_tareas(cls, id_usuario):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.seleccionar_tareas(), (id_usuario, ))
records = cursor.fetchall()
print("\nLista de tareas\n ")
for row in records:
print("ID = ", row[0])
print("Fecha = ", row[1])
print("Título = ", row[2])
print("Descripción = ", row[3])
print("Estado = ", row[4], "\n")
except Error:
print("Error al leer la lista de tareas", Error)
@classmethod
def eliminar_tarea(cls, id_tarea):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.borrar_tarea(), (id_tarea,))
except Error:
print("Error al eliminar la tarea", Error)
finally:
db.commit()
@classmethod
def registrar_usuario(cls, nombre_usuario, email, contrasenia):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.registrarusuario(), (nombre_usuario, email, contrasenia))
except Error:
print("Error", Error)
finally:
db.commit()
@classmethod
def loguear_usuario(cls, nombre_usuario, contrasenia):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.existe_usuario(), (nombre_usuario, contrasenia))
return cursor.fetchone()[0]
except Error:
print("Error", Error)
finally:
db.commit()
@classmethod
def id_usuario(cls, nombre_usuario):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.retorna_id_usuario(), (nombre_usuario, ))
return cursor.fetchone()[0]
except Error:
print("Error", Error)
|
[
"alexisndelgado@gmail.com"
] |
alexisndelgado@gmail.com
|
074e39bc74b5205dfecb5d90f2cd5a25847b0312
|
bb93b0907ed8f7c8c0e2bed23dcf2fe948c39b8d
|
/08-tuples.py
|
34abd356c6c615e4a40e1344285aeda269431484
|
[] |
no_license
|
hue113/complete-python
|
103b0e8b2c74a6a85a0c69227790fa17cada7e19
|
c82ba9dd9a8c7ef2b84e2e6b8b33ba44f3974049
|
refs/heads/master
| 2023-03-21T17:30:30.292050
| 2021-03-14T22:40:16
| 2021-03-14T22:40:16
| 347,771,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31
|
py
|
# Tuple: like a immutable list
|
[
"huepham113@gmail.com"
] |
huepham113@gmail.com
|
3166d19669d179ae390fe0176d83516606e617ba
|
9c991a8b7bbdda40d9115d685122cf63627a1ace
|
/Week 1/Day1Practice/madlib.py
|
c5e85cfff4c6a4833517dcdcabfcb373a663a619
|
[] |
no_license
|
Zacros7164/unit1
|
321844820178e16909df52f5620319e1aeeb0d4a
|
76d737067a685af110f6ec00ee315136c3cad51a
|
refs/heads/master
| 2020-04-06T11:26:00.023411
| 2019-02-12T14:06:00
| 2019-02-12T14:06:00
| 157,416,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
print "Madlibs!"
name = raw_input("Please give me a name. ")
subject = raw_input("Please give me a school subject. ")
print name + "'s favorite subject in school is " + subject + "."
|
[
"Zacros7164@gmail.com"
] |
Zacros7164@gmail.com
|
4a4c5276c3bf38dc20522b4f06a995c51f55462c
|
79d471c012ec9220836cf529d6062803c6fadb03
|
/localizer.py
|
9e8eb0782ede0b517412d026a4c69f2c0423f56f
|
[] |
no_license
|
Knevari/histogram-filter
|
4e4c6604258478f14c1d1bd0604faed3d7a56859
|
47930837ff816769dbc9d6cb7f9ccc19d75040d3
|
refs/heads/master
| 2020-12-09T21:37:18.329609
| 2020-01-17T14:22:27
| 2020-01-17T14:22:27
| 233,422,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
from helpers import normalize, blur
def initialize_beliefs(grid):
height = len(grid)
width = len(grid[0])
area = height * width
belief_per_cell = 1.0 / area
beliefs = []
for i in range(height):
row = []
for j in range(width)
row.append(belief_per_cell)
beliefs.append(row)
return beliefs
def sense(color, grid, beliefs, p_hit, p_miss):
new_beliefs = []
height = len(grid)
width = len(grid[0])
for i in range(height):
row = []
for j in range(width)
hit = (grid[i][j] == color)
row.append(beliefs[i][j] * (hit * p_hit + (1-hit) * p_miss))
new_beliefs.append(row)
return normalize(new_beliefs)
def move(dy, dx, beliefs, blurring):
height = len(beliefs)
width = len(beliefs[0])
new_G = [[0.0 for i in range(width)] for j in range(height)]
for i, row in enumerate(beliefs):
for j, cell in enumerate(row):
new_i = (i + dy) % height
new_j = (j + dx) % width
new_G[int(new_i)][int(new_j)] = cell
return blur(new_G, blurring)
|
[
"mateus7319@gmail.com"
] |
mateus7319@gmail.com
|
d2e18daba5039bfa0fe53bdc30e97c234ded7ec8
|
bbfa9cdfd5f09c833ab9190cd4ad5a46e7a515e7
|
/effective-python/2020-05/item_61.py
|
863a8f8f00e61d939277ee2b82426ba026599225
|
[] |
no_license
|
alexchonglian/readings
|
775204e013a2301f08fee96c5e8b116842faebcb
|
03cb6cb266d8d2376db411e9b12e9b6cd1f2b33b
|
refs/heads/master
| 2022-12-02T13:56:56.878477
| 2021-06-18T05:53:14
| 2021-06-18T05:53:14
| 218,573,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,393
|
py
|
import random
random.seed(1234)
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Write all output to a temporary directory
import atexit
import gc
import io
import os
import tempfile
TEST_DIR = tempfile.TemporaryDirectory()
atexit.register(TEST_DIR.cleanup)
# Make sure Windows processes exit cleanly
OLD_CWD = os.getcwd()
atexit.register(lambda: os.chdir(OLD_CWD))
os.chdir(TEST_DIR.name)
def close_open_files():
everything = gc.get_objects()
for obj in everything:
if isinstance(obj, io.IOBase):
obj.close()
atexit.register(close_open_files)
def example(i): print(f'\n==== Example {i} ====')
example(1)
class EOFError(Exception):
pass
class ConnectionBase:
def __init__(self, connection):
self.connection = connection
self.file = connection.makefile('rb')
def send(self, command):
line = command + '\n'
data = line.encode()
self.connection.send(data)
def receive(self):
line = self.file.readline()
if not line:
raise EOFError('Connection closed')
return line[:-1].decode()
example(2)
import random
WARMER = 'Warmer'
COLDER = 'Colder'
UNSURE = 'Unsure'
CORRECT = 'Correct'
class UnknownCommandError(Exception):
pass
example(3)
example(4)
example(5)
example(6)
class Session(ConnectionBase):
def __init__(self, *args):
super().__init__(*args)
self._clear_state(None, None)
def _clear_state(self, lower, upper):
self.lower = lower
self.upper = upper
self.secret = None
self.guesses = []
def loop(self):
while command := self.receive():
parts = command.split(' ')
if parts[0] == 'PARAMS':
self.set_params(parts)
elif parts[0] == 'NUMBER':
self.send_number()
elif parts[0] == 'REPORT':
self.receive_report(parts)
else:
raise UnknownCommandError(command)
def set_params(self, parts):
assert len(parts) == 3
lower = int(parts[1])
upper = int(parts[2])
self._clear_state(lower, upper)
def next_guess(self):
if self.secret is not None:
return self.secret
while True:
guess = random.randint(self.lower, self.upper)
if guess not in self.guesses:
return guess
def send_number(self):
guess = self.next_guess()
self.guesses.append(guess)
self.send(format(guess))
def receive_report(self, parts):
assert len(parts) == 2
decision = parts[1]
last = self.guesses[-1]
if decision == CORRECT:
self.secret = last
print(f'Server: {last} is {decision}')
example(7)
example(8)
example(9)
example(10)
import contextlib
import math
class Client(ConnectionBase):
def __init__(self, *args):
super().__init__(*args)
self._clear_state()
def _clear_state(self):
self.secret = None
self.last_distance = None
@contextlib.contextmanager
def session(self, lower, upper, secret):
print(f'Guess a number between {lower} and {upper}!'
f' Shhhhh, it\'s {secret}.')
self.secret = secret
self.send(f'PARAMS {lower} {upper}')
try:
yield
finally:
self._clear_state()
self.send('PARAMS 0 -1')
def request_numbers(self, count):
for _ in range(count):
self.send('NUMBER')
data = self.receive()
yield int(data)
if self.last_distance == 0:
return
def report_outcome(self, number):
new_distance = math.fabs(number - self.secret)
decision = UNSURE
if new_distance == 0:
decision = CORRECT
elif self.last_distance is None:
pass
elif new_distance < self.last_distance:
decision = WARMER
elif new_distance > self.last_distance:
decision = COLDER
self.last_distance = new_distance
self.send(f'REPORT {decision}')
return decision
example(11)
import socket
from threading import Thread
def handle_connection(connection):
with connection:
session = Session(connection)
try:
session.loop()
except EOFError:
pass
def run_server(address):
with socket.socket() as listener:
# Allow the port to be reused
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(address)
listener.listen()
while True:
connection, _ = listener.accept()
thread = Thread(target=handle_connection,
args=(connection,),
daemon=True)
thread.start()
example(12)
def run_client(address):
with socket.create_connection(address) as connection:
client = Client(connection)
with client.session(1, 5, 3):
results = [(x, client.report_outcome(x))
for x in client.request_numbers(5)]
with client.session(10, 15, 12):
for number in client.request_numbers(5):
outcome = client.report_outcome(number)
results.append((number, outcome))
return results
example(13)
def main():
address = ('127.0.0.1', 1234)
server_thread = Thread(
target=run_server, args=(address,), daemon=True)
server_thread.start()
results = run_client(address)
for number, outcome in results:
print(f'Client: {number} is {outcome}')
main()
example(14)
class AsyncConnectionBase:
def __init__(self, reader, writer): # Changed
self.reader = reader # Changed
self.writer = writer # Changed
async def send(self, command):
line = command + '\n'
data = line.encode()
self.writer.write(data) # Changed
await self.writer.drain() # Changed
async def receive(self):
line = await self.reader.readline() # Changed
if not line:
raise EOFError('Connection closed')
return line[:-1].decode()
example(15)
example(16)
example(17)
example(18)
example(19)
class AsyncSession(AsyncConnectionBase): # Changed
def __init__(self, *args):
super().__init__(*args)
self._clear_values(None, None)
def _clear_values(self, lower, upper):
self.lower = lower
self.upper = upper
self.secret = None
self.guesses = []
async def loop(self): # Changed
while command := await self.receive(): # Changed
parts = command.split(' ')
if parts[0] == 'PARAMS':
self.set_params(parts)
elif parts[0] == 'NUMBER':
await self.send_number() # Changed
elif parts[0] == 'REPORT':
self.receive_report(parts)
else:
raise UnknownCommandError(command)
def set_params(self, parts):
assert len(parts) == 3
lower = int(parts[1])
upper = int(parts[2])
self._clear_values(lower, upper)
def next_guess(self):
if self.secret is not None:
return self.secret
while True:
guess = random.randint(self.lower, self.upper)
if guess not in self.guesses:
return guess
async def send_number(self): # Changed
guess = self.next_guess()
self.guesses.append(guess)
await self.send(format(guess)) # Changed
def receive_report(self, parts):
assert len(parts) == 2
decision = parts[1]
last = self.guesses[-1]
if decision == CORRECT:
self.secret = last
print(f'Server: {last} is {decision}')
example(20)
example(21)
example(22)
example(23)
class AsyncClient(AsyncConnectionBase): # Changed
def __init__(self, *args):
super().__init__(*args)
self._clear_state()
def _clear_state(self):
self.secret = None
self.last_distance = None
@contextlib.asynccontextmanager # Changed
async def session(self, lower, upper, secret): # Changed
print(f'Guess a number between {lower} and {upper}!'
f' Shhhhh, it\'s {secret}.')
self.secret = secret
await self.send(f'PARAMS {lower} {upper}') # Changed
try:
yield
finally:
self._clear_state()
await self.send('PARAMS 0 -1') # Changed
async def request_numbers(self, count): # Changed
for _ in range(count):
await self.send('NUMBER') # Changed
data = await self.receive() # Changed
yield int(data)
if self.last_distance == 0:
return
async def report_outcome(self, number): # Changed
new_distance = math.fabs(number - self.secret)
decision = UNSURE
if new_distance == 0:
decision = CORRECT
elif self.last_distance is None:
pass
elif new_distance < self.last_distance:
decision = WARMER
elif new_distance > self.last_distance:
decision = COLDER
self.last_distance = new_distance
await self.send(f'REPORT {decision}') # Changed
# Make it so the output printing is in
# the same order as the threaded version.
await asyncio.sleep(0.01)
return decision
example(24)
import asyncio
async def handle_async_connection(reader, writer):
session = AsyncSession(reader, writer)
try:
await session.loop()
except EOFError:
pass
async def run_async_server(address):
server = await asyncio.start_server(
handle_async_connection, *address)
async with server:
await server.serve_forever()
example(25)
async def run_async_client(address):
# Wait for the server to listen before trying to connect
await asyncio.sleep(0.1)
streams = await asyncio.open_connection(*address) # New
client = AsyncClient(*streams) # New
async with client.session(1, 5, 3):
results = [(x, await client.report_outcome(x))
async for x in client.request_numbers(5)]
async with client.session(10, 15, 12):
async for number in client.request_numbers(5):
outcome = await client.report_outcome(number)
results.append((number, outcome))
_, writer = streams # New
writer.close() # New
await writer.wait_closed() # New
return results
example(26)
async def main_async():
address = ('127.0.0.1', 4321)
server = run_async_server(address)
asyncio.create_task(server)
results = await run_async_client(address)
for number, outcome in results:
print(f'Client: {number} is {outcome}')
logging.getLogger().setLevel(logging.ERROR)
asyncio.run(main_async())
logging.getLogger().setLevel(logging.DEBUG)
|
[
"alexchonglian@gmail.com"
] |
alexchonglian@gmail.com
|
1d851a0b72fbdf9725b48f0991a89504fbb6cf55
|
e3d6acf088991d776ed17b61e464ef128b83e6da
|
/src/enums/type.py
|
ce89b3f44f25b45100f5afe9e4030269c107e187
|
[
"Apache-2.0"
] |
permissive
|
antamb/google-personal-assistant
|
407c6a0e420d667810571bcb5b58a5a3130bde1b
|
a81d1e65cd5d42e963bd359482a0ba7e3879a1d5
|
refs/heads/master
| 2020-12-03T02:09:26.805036
| 2017-07-01T09:12:06
| 2017-07-01T09:12:06
| 95,910,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
from enum import Enum
class Type(Enum):
OTHER = 1
EVENT = 2
PERSON = 3
UNKNOWN = 4
LOCATION = 5
WORK_OF_ART = 6
ORGANIZATION = 7
CONSUMER_GOOD = 8
entities_type = {
Type.EVENT: 'Event',
Type.PERSON: 'Person',
Type.UNKNOWN: 'Unknown',
Type.OTHER: 'Other types',
Type.LOCATION: 'Location',
Type.WORK_OF_ART: ' Work of art',
Type.ORGANIZATION: 'Organization',
Type.CONSUMER_GOOD: 'Consumer goods',
}
def get_type_from_value(value):
value_type = Type.UNKNOWN
for t in Type:
if entities_type[t] == value:
value_type = t
return value_type
|
[
"anta.aidara@gmail.com"
] |
anta.aidara@gmail.com
|
4f07499c2074eb8c88a885caeb77b365c77adf2b
|
60fdc04010f1de5ed8017ae6f9d455feab94c33a
|
/juego con tortuga 8.py
|
5bf62bb0d7652da17d3342d7d845031f68dbc925
|
[] |
no_license
|
JDHINCAMAN/Python_examples
|
af7ef6c4c6df196dd15bf602c967cc56ec088b27
|
010b2b415fc9c61a4dcfd7728d3d7a7231b531c8
|
refs/heads/main
| 2023-03-23T12:09:38.245610
| 2021-03-23T14:05:55
| 2021-03-23T14:05:55
| 350,734,987
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
import turtle
t = turtle.Pen()
t.reset()
for x in range(1,38):
t.forward(100)
t.left(175)
|
[
"noreply@github.com"
] |
noreply@github.com
|
0d72f76083eab3990a6815596501ba6a7019de76
|
ebb081aea082ea8964c6de96d8ee4063e2660eba
|
/question_set.py
|
fbbaeb74231f4b8bbcc8727ace37848045609470
|
[] |
no_license
|
tramlam-ng/QuestionAnsweringSystem
|
8298f79764917e09e9ae34510cbedaf3b87f0d94
|
ca28ef59fe8eaf7136bf9c71a2d88c2b63ffac74
|
refs/heads/master
| 2022-01-14T15:47:13.364148
| 2019-01-12T13:57:25
| 2019-01-12T13:57:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
data1=pd.read_csv('WikiQA-train.tsv',delimiter='\t',encoding='utf-8')
data2=pd.read_csv('WikiQA-test.tsv',delimiter='\t',encoding ='utf-8')
data = data1.append(data2, ignore_index=True)
#Extracting the unique questions along with their questionID
def extract_questions(data):
new_data=data.drop(['DocumentID','DocumentTitle','Label'],axis=1)
d=new_data.drop_duplicates()
return d
d=extract_questions(data)
d.to_csv('questions.csv',index=False)
|
[
"noreply@github.com"
] |
noreply@github.com
|
21dad83cf27d3b9f8a2e6cff7584c09f606351a6
|
aff5cc92f38213a45323d7dede291dd918e96519
|
/simulation/crystal_mode_code/plane_transistion_plot.py
|
7c7ad320310ff374257a8f598b9f15e2ec976c37
|
[] |
no_license
|
nistpenning/calc
|
bd475b75a36ba93e74356a37529d0f9dac30a083
|
15d651bcc5c067032041b5ad9cf0be38169bb750
|
refs/heads/master
| 2021-01-18T22:59:31.619436
| 2015-11-03T23:44:05
| 2015-11-03T23:44:05
| 32,483,830
| 3
| 1
| null | 2015-06-17T16:58:16
| 2015-03-18T20:54:43
|
Matlab
|
UTF-8
|
Python
| false
| false
| 4,645
|
py
|
__author__ = 'sbt'
"""
Makes a plot of the rotation frequency of the
2-1 plane transistion for a given configuration of the Ion trap.
"""
from mode_analysis_code import ModeAnalysis
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
if __name__ == "__main__":
# Select the trapping and wall potentials which will be used
# for all future runs
trappotential = (0.0, -873, -1000)
wallpotential = 0.1
precision_solving = True
# Determines the number of ions to find the transition frequency for.
nionlist = [19, 20, 26, 37, 50, 61, 75, 91, 110, 127, 130, 169, 190, 217, 231, 300, 331]
currentfrequency = 93
transistionfrequencies = []
# Iterate through number of ions to test for stability
for N in nionlist:
if N > 100:
# Set to false to decrease run time for the biggest crystals
# and run with un-perturbed crystals
# (potentially not global energy minimum)
precision_solving = True
# Instantiate a crystal and see if it is stable
crystal = ModeAnalysis(N=N, Vtrap=trappotential, Ctrap=1.0, ionmass=None,
B=4.4588, frot=currentfrequency, Vwall=wallpotential,
wall_order=2, quiet=False, precision_solving=precision_solving)
crystal.run()
# Increase the frequency until stability is lost- most important for the first
# crystal tested
while crystal.is_plane_stable():
print("Crystal of", N, "is currently at", currentfrequency,
"increasing to ", currentfrequency + 1)
currentfrequency += 1
crystal = ModeAnalysis(N=N, Vtrap=trappotential, Ctrap=1.0, ionmass=None,
B=4.4588, frot=currentfrequency, Vwall=wallpotential,
wall_order=2,
quiet=False, precision_solving=precision_solving)
crystal.run()
# When frequency is lost, reduce to find when it resumes
while not crystal.is_plane_stable():
print("Found turning point: reducing frequency from", currentfrequency, "to ",
currentfrequency - 1)
currentfrequency -= 1
crystal = ModeAnalysis(N=N, Vtrap=trappotential, Ctrap=1.0, ionmass=None,
B=4.4588, frot=currentfrequency, Vwall=wallpotential,
wall_order=2,
quiet=False, precision_solving=precision_solving)
crystal.run()
# Once stability has resumed the lowest frequency at which 1->2 transition occurs is stored
print("Transistion frequency is", currentfrequency + 1, " for number of ions", crystal.Nion)
transistionfrequencies.append(currentfrequency + 1)
print("Transitions found:")
print("nions:", nionlist)
print("frequencies", transistionfrequencies)
#########################################
transfreq=transistionfrequencies
nions=nionlist
shells=[1,2,3,4,5,6,7,8,9,10]
shelln=[7,19,37,61,91,127,169,217,271,331]
def func(x, a, b, c):
return a * np.exp(-b * x) + c
fig = plt.figure(figsize=(14, 12))
plt.rcParams['font.size'] = 16
ax = fig.add_subplot(1,1,1)
for i in range(len(transfreq)):
if nions[i] in shelln:
plt.plot(transfreq[i],nions[i],"o",color='red')
else:
plt.plot(transfreq[i],nions[i],"o",color='blue')
plt.title("1-2 Plane Transistion for $V_{Mid}=-.873, \ V_{Center}=-1.0 \ (kV) V_{Wall} =1 V$", y=1.02)
plt.xlabel("Transistion Frequency (kHz)")
plt.ylabel("Number of Ions")
major_ticks = np.arange(min(transfreq),max(transfreq),2)
minor_ticks = np.arange(min(transfreq),max(transfreq),.5)
print(major_ticks)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
yticks=np.arange(0,400,25)
yticksmin=np.arange(0,400,5)
ax.set_yticks(yticks)
ax.set_yticks(yticksmin, minor=True)
fig = plt.grid(True)
fig = plt.xlim([min(transfreq)*.99,max(transfreq)*1.01])
popt, pcov = curve_fit(func, transfreq, nions,p0=[127,.1,122])
print(popt)
x=np.linspace(min(transfreq)*.99,max(transfreq)*1.01,200)
plt.plot(x, func(x, *popt), 'r-', label="Fitted Curve",color="black")
plt.legend(loc=1)
for N in shelln:
plt.plot([min(transfreq)*.99,max(transfreq)*1.01],[N,N],"--",color='black')
for N in shells:
plt.text(max(transfreq)*1.013,shelln[N-1],"%d" %N)
plt.show()
|
[
"storrisi@u.rochester.edu"
] |
storrisi@u.rochester.edu
|
8b06643905de8fc715a65a1df5347cc97d12961b
|
dfcddf4ed51bc48c4bd6288e3517fd8629000fbd
|
/app/http/responses/__init__.py
|
cb1798211886d0480004cb258bb6cdb3ead1bd94
|
[] |
no_license
|
ugabiga/flask-boilerplate
|
508548d1f713c9f4412e43c68dd59d9a6210882d
|
5a317a80295aacf9bfc8c7c1a5736d2d5b22fc98
|
refs/heads/master
| 2022-08-30T16:28:58.332410
| 2022-08-23T12:03:34
| 2022-08-23T12:03:34
| 208,466,604
| 1
| 0
| null | 2022-08-23T12:03:35
| 2019-09-14T16:12:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
from typing import Any, Tuple, Type
import marshmallow as ma
from flask import jsonify
from flask.wrappers import Response
from core.use_cases.output import Failure, Output
def build_success_output_with_schema(
output: Output, schema_class: Type[ma.Schema], many: bool = None
) -> Tuple[Response, int]:
output_schema = schema_class().dump(output.get_data(), many=many)
return build_success_response(output_schema, output.get_meta())
def build_success_response(data: Any, meta: dict = None) -> Tuple[Response, int]:
response = {"data": data}
if meta is not None:
response["meta"] = meta
return jsonify(response), 200
def build_failure_response(output: Failure) -> Tuple[Response, int]:
return jsonify(error=output.get_type(), error_message=output.get_message()), 400
def build_response(
output: Output, schema_class: Type[ma.Schema] = None, many: bool = None
) -> Tuple[Response, int]:
if output.is_success() and schema_class is not None:
return build_success_output_with_schema(output, schema_class, many)
if output.is_success():
return build_success_response(output.get_data(), output.get_meta())
if isinstance(output, Failure):
return build_failure_response(output)
return build_failure_response(
Failure.build_empty_internal_response_error("in_response_builder")
)
|
[
"ugabiga@gmail.com"
] |
ugabiga@gmail.com
|
31519fa2a14b4aedde98b2f3a8defd664bd00223
|
69ef0b99e5b2a1fde4780501e87725a618c7889f
|
/abc/python3/hello.py
|
346ba43f5dd4dcecc557910820aefe3bf7f003ce
|
[] |
no_license
|
wsz-/real_hub
|
350f5133ec55fb0357a1c76e72ac6f93757352cb
|
f1b4d3140bc8c723076bba79fbaf8c0495592314
|
refs/heads/master
| 2021-01-10T21:14:13.724398
| 2012-11-02T04:42:25
| 2012-11-02T04:42:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
import sys
argvs=sys.argv
#print(len(argvs))
base="Hello"
err='''用法:
hello
hello -p
hello -p $str
大小写无关'''
arg_len=len(argvs)
if arg_len==1 :
print(base,'word!')
elif arg_len==2:
if argvs[1].lower()=='-p':
print(base,'word!')
else:
print(err);
elif arg_len==3:
if argvs[1].lower()=='-p':
print(base,argvs[2])
else:
print(err)
else:
print(err)
|
[
"cisir92@gmail.com"
] |
cisir92@gmail.com
|
8cbeb7315d0f6c9e820555d49e344399fd8269ca
|
992c31a3bda2467e9d90ec8989f15a4cd38bae2b
|
/drone.py
|
e51fef99bd7c3de22cda760187dc7caf67aed65a
|
[] |
no_license
|
aleksandarnikov/dwm
|
a35a83f720e75e85d23039a091d280675d716797
|
3de3f0795955fd30056e4b71cd1b92ef33950ccd
|
refs/heads/main
| 2023-01-06T16:17:42.826991
| 2020-11-14T12:11:44
| 2020-11-14T12:11:44
| 312,095,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
import paho.mqtt.client as mqtt
import time
import random
import sys
name = sys.argv[1]
client = mqtt.Client(name)
client.connect("localhost")
def on_publish(client, userdata, result):
print("data published \n")
pass
client.on_publish = on_publish
# ret = client.publish("dwm/node/abc1/uplink/location", '{"position":{"x":1.3936733,"y":1.174517,"z":-0.26708269,"quality":81},"superFrameNumber":136}')
x = 5
y = 4
dx = 0.06
dy = 0.05
while True:
ddx = x + dx
ddy = y + dy
if ddx >= 10 or ddx < 0:
dx = -dx
continue
if ddy >= 10 or ddy < 0:
dy = -dy
continue
x = ddx
y = ddy
ret = client.publish("dwm/node/" + name + "/uplink/location", '{"position":{"x":' + str(x) + ',"y":' + str(y) + ',"z":-0.26708269,"quality":81},"superFrameNumber":136}')
print(x, y)
time.sleep(0.01)
# ret2 = client.publish("abc", "xyz")
client.loop_start() #start the loop
time.sleep(10)
|
[
"aleksandar.nikov@netcetera.com"
] |
aleksandar.nikov@netcetera.com
|
b5fd5e255e2b4a38a8967b95ec48bf042b24c2d1
|
939e8a8838ff66f72655a7c103bf79b31ccd6966
|
/MyApp/models.py
|
94b7a286c938b6311a14ab45019c9aed1b7cf375
|
[] |
no_license
|
github653224/ApiTest
|
3647292471fe11d8a124e0bd41061a2de3add5ed
|
9c1fc9c05dce38a4e2618c43943f8f44090ab4f2
|
refs/heads/master
| 2023-02-03T16:54:21.599640
| 2020-12-18T10:17:01
| 2020-12-18T10:17:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,410
|
py
|
from django.db import models
# Create your models here.
class DB_tucao(models.Model):
user = models.CharField(max_length=30,null=True) #吐槽人名字
text = models.CharField(max_length=1000,null=True) #吐槽内容
ctime = models.DateTimeField(auto_now=True) #创建时间
def __str__(self):
return self.text+ str(self.ctime)
class DB_home_href(models.Model):
name = models.CharField(max_length=30,null=True) #超链接名字
href = models.CharField(max_length=2000,null=True) #超链接内容
def __str__(self):
return self.name
class DB_project(models.Model):
name = models.CharField(max_length=100,null=True) #项目名字
remark = models.CharField(max_length=1000,null=True) #项目备注
user = models.CharField(max_length=15,null=True) #项目创建者名字
other_user = models.CharField(max_length=200,null=True) #项目其他创建者
def __str__(self):
return self.name
class DB_apis(models.Model):
project_id = models.CharField(max_length=10,null=True) #项目id
name = models.CharField(max_length=100,null=True) #接口名字
api_method = models.CharField(max_length=10,null=True) #请求方式
api_url = models.CharField(max_length=1000,null=True) #url
api_header = models.CharField(max_length=1000,null=True) #请求头
api_login = models.CharField(max_length=10,null=True) #是否带登陆态
api_host = models.CharField(max_length=100,null=True) #域名
des = models.CharField(max_length=100,null=True) #描述
body_method = models.CharField(max_length=20,null=True) #请求体编码格式
api_body = models.CharField(max_length=1000,null=True) #请求体
result = models.TextField(null=True) #返回体 因为长度巨大,所以用大文本方式存储
sign = models.CharField(max_length=10,null=True) #是否验签
file_key = models.CharField(max_length=50,null=True) #文件key
file_name = models.CharField(max_length=50,null=True) #文件名
public_header = models.CharField(max_length=1000,null=True) #全局变量-请求头
last_body_method = models.CharField(max_length=20,null=True) #上次请求体编码格式
last_api_body = models.CharField(max_length=1000,null=True) #上次请求体
def __str__(self):
return self.name
class DB_apis_log(models.Model):
user_id = models.CharField(max_length=10,null=True) #所属用户id
api_method = models.CharField(max_length=10,null=True) #请求方式
api_url = models.CharField(max_length=1000,null=True) #url
api_header = models.CharField(max_length=1000,null=True) #请求头
api_login = models.CharField(max_length=10,null=True) #是否带登陆态
api_host = models.CharField(max_length=100,null=True) #域名
body_method = models.CharField(max_length=20,null=True) #请求体编码格式
api_body = models.CharField(max_length=1000,null=True) #请求体
sign = models.CharField(max_length=10,null=True) #是否验签
file_key = models.CharField(max_length=50,null=True) #文件key
file_name = models.CharField(max_length=50,null=True) #文件名
def __str__(self):
return self.api_url
class DB_cases(models.Model):
project_id = models.CharField(max_length=10,null=True) #所属项目id
name = models.CharField(max_length=50,null=True) #用例名字
def __str__(self):
return self.name
class DB_step(models.Model):
Case_id = models.CharField(max_length=10,null=True) #所属大用例id
name = models.CharField(max_length=50,null=True) #步骤名字
index = models.IntegerField(null=True) #执行步骤
api_method = models.CharField(max_length=10,null=True) # 请求方式
api_url = models.CharField(max_length=1000,null=True) #url
api_host = models.CharField(max_length=100,null=True) #host
api_header = models.CharField(max_length=1000,null=True) #请求头
api_body_method = models.CharField(max_length=10,null=True) #请求体编码类型
api_body = models.CharField(max_length=10,null=True) #请求体
get_path = models.CharField(max_length=500,null=True) #提取返回值-路径法
get_zz = models.CharField(max_length=500,null=True) #提取返回值-正则
assert_zz = models.CharField(max_length=500,null=True) #断言返回值-正则
assert_qz = models.CharField(max_length=500,null=True) #断言返回值-全文检索存在
assert_path = models.CharField(max_length=500,null=True) #断言返回值-路径法
mock_res = models.CharField(max_length=1000,null=True) #mock返回值
public_header = models.CharField(max_length=1000,null=True) #全局变量-请求头
def __str__(self):
return self.name
class DB_project_header(models.Model):
project_id = models.CharField(max_length=10,null=True) #所属项目id
name = models.CharField(max_length=20,null=True) #请求头变量名字
key = models.CharField(max_length=20,null=True) #请求头header的 key
value = models.TextField(null=True) #请求头的value,因为有可能cookie较大,达到几千字符,所以采用大文本方式存储
def __str__(self):
return self.name
class DB_host(models.Model):
host = models.CharField(max_length=100,null=True) #域名内容
des = models.CharField(max_length=100,null=True) #域名描述
def __str__(self):
return self.host
|
[
"wangzijia@xiaozhu.com"
] |
wangzijia@xiaozhu.com
|
a666a99db10c5f01012215a5c6ee570d7c03bffa
|
09e4bd1f19806b0ed223066be6fa381fb2b65598
|
/monitor/task.py
|
9ba0e801e38687e93741f9f85cf61d276d4c6df7
|
[] |
no_license
|
icellus/shell_scripts
|
f220a90f37a8070b04302a3be80ef03a58517134
|
7dc4d85b5b7fcd6ff98ebc6bdfa6ae4d3df55c48
|
refs/heads/master
| 2021-08-27T16:43:12.397195
| 2021-08-23T02:40:07
| 2021-08-23T02:40:07
| 143,691,816
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/05/24 00:00
# @Desc : 定时任务,以需要的时间间隔执行某个命令
# @File : task.py
# @Software: PyCharm
import time, os
from monitor import task
def roll_back(cmd, inc = 60):
while True:
#执行方法,函数
task()
time.sleep(inc)
roll_back("echo %time%", 30)
|
[
"2283411628@qq.com"
] |
2283411628@qq.com
|
54316a4f35c167022b648ae75bf34184134084ad
|
b0c0706e4c4f41a729ec235e31ba90385eb44845
|
/coinlist/migrations/0002_auto_20180502_1107.py
|
77d0be8b066d8e33d3c6253e4f0c6ef73b7a80a7
|
[] |
no_license
|
kupreeva/TopCoin
|
d7a6a56e6df869c0f978024c9e34351c75a0a580
|
babe9e306a38ab4dbd457b6c3e579fa0c3cf86f4
|
refs/heads/master
| 2020-03-14T23:22:51.481252
| 2018-05-02T15:40:09
| 2018-05-02T15:40:09
| 131,843,712
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-05-02 11:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coinlist', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='coin',
name='coins_daily',
field=models.FloatField(),
),
]
|
[
"kristine.kupreeva@gmail.com"
] |
kristine.kupreeva@gmail.com
|
2bf07793bfef24a2bed035690bb6849533f776bc
|
1239393937f155fd5090c41f462262098fa6c6c1
|
/dev/docs/source/conf.py
|
20af5dc2d5e88af3e123d49e2e27b9d9573e3297
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause"
] |
permissive
|
hyunjinb/XlsxWriter
|
af4fe17c11b81c05ba8ec6adf27d0f6d1d632399
|
b4c4b499ffb3db8e0fa1b306880bcbcb3675fd4d
|
refs/heads/master
| 2021-01-23T13:42:00.785444
| 2017-09-05T23:17:06
| 2017-09-05T23:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,926
|
py
|
# -*- coding: utf-8 -*-
#
# XlsxWriter documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 28 00:12:14 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'XlsxWriter'
copyright = u'2013-2017, John McNamara'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.9'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/2/': None}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# # 'nosidebar': True,
# 'sidebarbgcolor': '#F2F2F2',
# 'relbarbgcolor': '#9CB640',
# 'linkcolor': '#9CB640',
# 'sidebarlinkcolor': '#9CB640',
# 'footerbgcolor': '#FFFFFF',
# 'footertextcolor': '#9CB640',
# 'headtextcolor': '#9CB640',
# 'codebgcolor': '#FFFFFF',
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "XlsxWriter Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_images/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'XlsxWriterdoc'
# Remove permalinks.
html_add_permalinks = ""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'XlsxWriter.tex', u'Creating Excel files with Python and XlsxWriter',
u'John McNamara', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_images/logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xlsxwriter', u'XlsxWriter Documentation',
[u'John McNamara'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'XlsxWriter', u'XlsxWriter Documentation',
u'John McNamara', 'XlsxWriter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'XlsxWriter'
epub_author = u'John McNamara'
epub_publisher = u'John McNamara'
epub_copyright = u'2013-2017, John McNamara'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
88c0d4f7001e4d7f2d2a994d979b9b99a1ed7d08
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/ashvin/icml2020/hand/buffers/pen1.py
|
c92cde36156496ccf82fa584986ffbc35a17a452
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,576
|
py
|
"""
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy, BinnedGMMPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
buffer_policy_class=BinnedGMMPolicy,
buffer_policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
num_gaussians=11,
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
use_validation_buffer=True,
)
search_space = {
'env': ["pen-sparse-v0", "door-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.5, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [11, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
796df0bd81da274209df3eab5785899295b1efb8
|
143fa4b592ca6cbd420d78ceb6991ecce58370cb
|
/src/anpocs44.py
|
9920404c4f039c612ad78e4676f3c7ed73642beb
|
[
"MIT"
] |
permissive
|
vmussa/anpocs-scraper
|
3b07d9f861275404acc870910682aa79604a20b2
|
dd042f3765bea7e699b77bcf323738e761e70b17
|
refs/heads/main
| 2023-04-28T23:12:01.330071
| 2021-05-17T23:29:11
| 2021-05-17T23:29:11
| 356,652,869
| 3
| 3
|
MIT
| 2021-05-15T00:41:53
| 2021-04-10T17:37:15
|
Python
|
UTF-8
|
Python
| false
| false
| 4,128
|
py
|
"""Código para a aquisição dos dados dos Encontros Anuais da ANPOCS."""
from bs4 import BeautifulSoup
import pandas as pd
import re
from tqdm import tqdm
import sys
from os import mkdir, sep
from os.path import abspath, dirname, exists
import requests
from helium import (
start_chrome, click, get_driver, kill_browser, find_all, S
)
EVENT_ID = 44
BASE_URLS = [
"https://www.anpocs2020.sinteseeventos.com.br/atividade/hub/gt",
"https://www.anpocs2020.sinteseeventos.com.br/atividade/hub/simposioposgraduada"
]
def get_page_source(url):
"""Obtém soup object para páginas não interativas."""
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
return soup
def get_urls(base_urls):
"""Obtém todos os URLs das páginas a serem raspadas."""
urls = []
for base_url in base_urls:
soup = get_page_source(base_url)
urls_sources = soup.select("h5 > a")
urls += [a['href'] for a in urls_sources]
return urls
def get_interactive_page_source(url):
"""Obtém código-fonte completo da página."""
# inicia o chrome para renderizar o código-fonte
try:
start_chrome(url, headless=True)
except Exception:
print(
"Erro: você precisa instalar o Google Chrome e o ChromeDriver par"
"a executar esse raspador."
)
sys.exit(1)
driver = get_driver()
# clica em todos os botões "Veja mais!" para liberar os dados dos resumos
print(f"Raspando a página \"{driver.title}\". Isso pode demorar alguns segundos...")
buttons = find_all(S("//span[@onClick]"))
for _ in tqdm(range(len(buttons))):
click("Veja mais!")
print('Fim da raspagem da página.')
# obtém objeto soup a partir do código-fonte renderizado pelo helium
soup = BeautifulSoup(driver.page_source, 'html.parser')
# fecha o chrome
kill_browser()
return soup
def get_page_data(soup):
"""Obtém dados dos trabalhos apresentados em uma sessão."""
# obtém dados textuais a partir dos seletores CSS de cada campo
authors = [autor.text for autor in soup.select('i')]
titles = [titulo.text for titulo in soup.select('li > b')]
abstract_source = soup.find_all('div', id=re.compile('^resumoFull'))
abstracts = [abstract.text.strip() for abstract in abstract_source]
session = soup.select_one('h3.first').text.strip()
# cria dict com os dados obtidos
data = {
'autores': authors,
'titulo': titles,
'resumo': abstracts,
'sessao': session,
'id_evento': EVENT_ID
}
return data
def export_all_pages_data(urls):
"""Obtém e exporta para CSV dados de trabalhos de todas as sessões."""
for url in urls:
soup = get_interactive_page_source(url)
data = get_page_data(soup)
df = pd.DataFrame(data)
output_path = f"{dirname(dirname(abspath(__file__)))}{sep}output{sep}"
filename = "resumos_anpocs44.csv"
if exists(output_path+filename):
df.to_csv(
output_path + filename,
mode='a',
index=False,
header=False
)
else:
try:
mkdir(output_path)
df.to_csv(output_path + filename, index=False)
except FileExistsError:
df.to_csv(output_path + filename, index=False)
def main():
print(
"Carregando algumas informações. A raspagem do 44º Encontro Anual da "
"ANPOCS iniciará em breve..."
)
urls = get_urls(BASE_URLS)
# checa se já há arquivos de raspagens antigas na pasta output
output_path = f"{dirname(dirname(abspath(__file__)))}{sep}output{sep}"
filename = "resumos_anpocs44.csv"
if exists(output_path+filename):
raise Exception(
"Os dados raspados já estão na pasta output. "
"Remova-os da pasta antes de rodar o raspador."
)
export_all_pages_data(urls)
print("O 44º Encontro foi raspado com sucesso.")
if __name__ == "__main__":
main()
|
[
"vtrmussa@gmail.com"
] |
vtrmussa@gmail.com
|
cd154db704763f331c942f98c0e560adc5f97522
|
96681aca57fa55e82aeb7d9ca56041f20498bf37
|
/account/forms.py
|
fb3724a6e226c77ad07cd4721c0a98f1a1b1666d
|
[] |
no_license
|
karyshev63rus/docent63
|
191c57ae6310df91b5e7a5657ffab2f3fdb2249f
|
67c4312db1be3c79c287814fda6d91b039520cfe
|
refs/heads/master
| 2023-07-10T23:44:23.209061
| 2021-08-14T18:44:50
| 2021-08-14T18:44:50
| 373,348,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,044
|
py
|
from django import forms
from django.contrib.auth.models import User
from .models import Profile
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(label='Пароль',
widget=forms.PasswordInput)
password2 = forms.CharField(label='Повторите пароль',
widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'first_name', 'email')
labels = {
'username': 'Логин',
'first_name': 'Имя',
'email': 'Адрес эл. почты',
}
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError("Пароли не совпадают")
return cd['password2']
class UpdateUserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
widgets = {
'first_name': forms.TextInput(
attrs={'class': 'form-control'}
),
'last_name': forms.TextInput(
attrs={'class': 'form-control'}
),
'email': forms.EmailInput(
attrs={'class': 'form-control'}
),
}
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('phone_number', 'address', 'postal_code', 'city', 'country')
widgets = {
'phone_number': forms.TextInput(
attrs={'class': 'form-control'}
),
'address': forms.TextInput(
attrs={'class': 'form-control'}
),
'postal_code': forms.TextInput(
attrs={'class': 'form-control'}
),
'city': forms.TextInput(
attrs={'class': 'form-control'}
),
'country': forms.TextInput(
attrs={'class': 'form-control'}
)
}
|
[
"karyshev63rus@gmail.com"
] |
karyshev63rus@gmail.com
|
969d035c63ace1f7b4c413e93f06400bb2d2bf34
|
119437adb7830659307c18b79a9cc3f6bfc6fe40
|
/transformers_learning/english_sequence_labeling/torch_model_train.py
|
234011630b2febd960451887847252ee4bdd95c0
|
[] |
no_license
|
percent4/PyTorch_Learning
|
478bec35422cdc66bf41b4258e29fbcb6d24f60c
|
24184d49032c9c9a68142aff89dabe33adc17b52
|
refs/heads/master
| 2023-03-31T03:01:19.372830
| 2023-03-17T17:02:39
| 2023-03-17T17:02:39
| 171,400,828
| 16
| 7
| null | 2023-09-02T08:53:26
| 2019-02-19T03:47:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,513
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2021/1/31 15:01
# @Author : Jclian91
# @File : torch_model_train.py
# @Place : Yangpu, Shanghai
import json
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from transformers import BertForTokenClassification, BertTokenizer, BertConfig
from util import event_type, train_file_path, test_file_path
from util import MAX_LEN, BERT_MODEL_DIR, TRAIN_BATCH_SIZE, VALID_BATCH_SIZE, EPOCHS, LEARNING_RATE
from load_data import read_data
# tokenizer and label_2_id_dict
with open("{}_label2id.json".format(event_type), "r", encoding="utf-8") as f:
tag2idx = json.loads(f.read())
idx2tag = {v: k for k, v in tag2idx.items()}
class CustomDataset(Dataset):
def __init__(self, tokenizer, sentences, labels, max_len):
self.len = len(sentences)
self.sentences = sentences
self.labels = labels
self.tokenizer = tokenizer
self.max_len = max_len
def __getitem__(self, index):
sentence = str(self.sentences[index])
inputs = self.tokenizer.encode_plus(
sentence,
None,
add_special_tokens=True,
max_length=self.max_len,
truncation=True,
padding="max_length",
# pad_to_max_length=True,
return_token_type_ids=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
label = self.labels[index]
label.extend([0] * MAX_LEN)
label = label[:MAX_LEN]
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'tags': torch.tensor(label, dtype=torch.long)
}
def __len__(self):
return self.len
# Creating the customized model
class BERTClass(torch.nn.Module):
def __init__(self):
super(BERTClass, self).__init__()
config = BertConfig.from_pretrained("./bert-base-uncased", num_labels=len(list(tag2idx.keys())))
self.l1 = BertForTokenClassification.from_pretrained('./bert-base-uncased', config=config)
# self.l2 = torch.nn.Dropout(0.3)
# self.l3 = torch.nn.Linear(768, 200)
def forward(self, ids, mask, labels):
output_1 = self.l1(ids, mask, labels=labels)
# output_2 = self.l2(output_1[0])
# output = self.l3(output_2)
return output_1
def flat_accuracy(preds, labels):
flat_preds = np.argmax(preds, axis=2).flatten()
flat_labels = labels.flatten()
return np.sum(flat_preds == flat_labels)/len(flat_labels)
def valid(model, testing_loader):
model.eval()
eval_loss = 0; eval_accuracy = 0
nb_eval_steps, nb_eval_examples = 0, 0
with torch.no_grad():
for _, data in enumerate(testing_loader):
ids = data['ids'].to(dev, dtype=torch.long)
mask = data['mask'].to(dev, dtype=torch.long)
targets = data['tags'].to(dev, dtype=torch.long)
output = model(ids, mask, labels=targets)
loss, logits = output[:2]
logits = logits.detach().cpu().numpy()
label_ids = targets.to('cpu').numpy()
accuracy = flat_accuracy(logits, label_ids)
eval_loss += loss.mean().item()
eval_accuracy += accuracy
nb_eval_examples += ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss/nb_eval_steps
print("Validation loss: {}".format(eval_loss))
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
if __name__ == '__main__':
# Preparing for CPU or GPU usage
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tokenizer = BertTokenizer.from_pretrained('./{}'.format(BERT_MODEL_DIR))
# Creating the Dataset and DataLoader for the neural network
train_sentences, train_labels = read_data(train_file_path)
train_labels = [[tag2idx.get(l) for l in lab] for lab in train_labels]
test_sentences, test_labels = read_data(test_file_path)
test_labels = [[tag2idx.get(l) for l in lab] for lab in test_labels]
print("TRAIN Dataset: {}".format(len(train_sentences)))
print("TEST Dataset: {}".format(len(test_sentences)))
training_set = CustomDataset(tokenizer, train_sentences, train_labels, MAX_LEN)
testing_set = CustomDataset(tokenizer, test_sentences, test_labels, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE, 'shuffle': True, 'num_workers': 0}
test_params = {'batch_size': VALID_BATCH_SIZE, 'shuffle': True, 'num_workers': 0}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
# train the model
model = BERTClass()
model.to(dev)
optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
for epoch in range(EPOCHS):
model.train()
for _, data in enumerate(training_loader):
ids = data['ids'].to(dev, dtype=torch.long)
mask = data['mask'].to(dev, dtype=torch.long)
targets = data['tags'].to(dev, dtype=torch.long)
loss = model(ids, mask, labels=targets)[0]
# optimizer.zero_grad()
if _ % 50 == 0:
print(f'Epoch: {epoch}, Batch: {_}, Loss: {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
# model evaluate
valid(model, testing_loader)
torch.save(model.state_dict(), '{}_ner.pth'.format(event_type))
|
[
"1137061634@qq.com"
] |
1137061634@qq.com
|
0fc64ab80d0fe321eccbc84cf5dfdc3c647f3803
|
966b8ce654c67bbabd4c5166e7bb6e2a7086d172
|
/xml_read2.py
|
8c0a0ffc82d17475694707f66371dacbfe122d34
|
[] |
no_license
|
muzklj/learn_code
|
c4a316fcdd4d8348fb7959b66194a60d9f89b010
|
7dde268175391c2d4a2911fd40074ada5e7016a4
|
refs/heads/main
| 2023-08-08T16:25:37.540950
| 2023-07-24T00:31:42
| 2023-07-24T00:31:42
| 396,725,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
'''
Author: MuZonghan
Date: 2021-07-23 15:20:07
LastEditTime: 2021-08-19 15:14:07
Descripition: 统计xml文件的类别数目
FilePath: /4pcodes/learncodes/xml_read2.py
'''
import os
import xml.dom.minidom
xml_path = '/home/trunk/muzklj/5datasets/bigdata/img-txt2/sec-all1-xml/'
files = os.listdir(xml_path)
gt_dict = {}
if __name__ == '__main__':
for xm in files:
xmlfile = xml_path + xm
dom = xml.dom.minidom.parse(xmlfile) # 读取xml文档
root = dom.documentElement # 得到文档元素对象
filenamelist = root.getElementsByTagName("filename")
filename = filenamelist[0].childNodes[0].data
objectlist = root.getElementsByTagName("object")
for objects in objectlist:
namelist = objects.getElementsByTagName("name")
objectname = namelist[0].childNodes[0].data
if objectname == '-':
print(filename)
if objectname in gt_dict:
gt_dict[objectname] += 1
else:
gt_dict[objectname] = 1
dic = sorted(gt_dict.items(), key=lambda d: d[1], reverse=True)
print(dic)
# print(len(dic))
|
[
"“muzklj@163.com”"
] |
“muzklj@163.com”
|
efa28e9d986d4fe70e7cfe524ef2a44c04fde8b2
|
38ce870a1a4a9862b5d054aca31f5c0337c82ead
|
/arduino/libraries/ledtable/documentation/pixelorder_double_spiral.py
|
533bf98979abf1d143249c85d3f17016b55d2896
|
[
"MIT"
] |
permissive
|
pmerlin/ledtable
|
6a4cde37f6987be1a2ae6567aece1ec48c5bc60b
|
a94d276f8a06e0f7f05f5cc704018c899e56bd9f
|
refs/heads/master
| 2020-04-05T18:38:14.014614
| 2017-01-10T17:15:26
| 2017-01-10T17:15:26
| 157,106,815
| 1
| 0
|
MIT
| 2018-11-11T18:10:14
| 2018-11-11T18:10:14
| null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
def s(x, y, w, h):
if y == 0: return x
return s(y - 1, w - 1 - x, h - 1, w) + w
def s2(x, y, w, h):
m = min(x, y, w-1-x, h-1-y)
outer = sum((w - m_*2) * 2 + (h - m_*2) * 2 - 4 for m_ in range(m))
outer = m * 2 * (w + h - 2*m)
_x = x - m;
_y = y - m;
_w = w - 2 * m;
_h = h - 2 * m;
if _y == 0: return outer + _x;
elif _x == _w - 1: return outer + _w + _y - 1;
elif _y == _h - 1: return outer + _w + _h + _w - 3 - _x;
elif _x == 0: return outer + _w + _h + _w + _h - 4 - _y;
else: return "!{}{}".format(x, y)
w = 10
h = 14
for y in range(h):
print(*(s(x, y, w, h) for x in range(w)), sep = "\t")
print('--------------')
for y in range(h):
print(*(s2(x, y, w, h) for x in range(w)), sep = "\t")
|
[
"niccokunzmann@rambler.ru"
] |
niccokunzmann@rambler.ru
|
70c3c06f681b066ac0388b0d3c1198b4074e9724
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/tests/trac/trac-0186/check.py
|
08b3119a43dd3dd72dd22febf93509b88bca7eca
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756
| 2023-04-29T20:38:15
| 2023-04-29T20:45:13
| 20,547,850
| 130
| 63
|
Apache-2.0
| 2021-08-19T16:52:18
| 2014-06-06T01:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 493
|
py
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.utils.domutils
import resources
import unittest
class ExternalTrac0186 (unittest.TestCase):
def testXBIngress (self):
instance = resources.XBIngress(match='all', action1='none', digits1='', action2='none', digits2='')
def testXBMatch (self):
instance = resources.XBMatch('all')
if '__main__' == __name__:
unittest.main()
|
[
"pab@pabigot.com"
] |
pab@pabigot.com
|
d8c137dda1852fc28941eac7e6a8c8a76905993e
|
9bde6cafb4273d721229448d115853ff2f5994a6
|
/myblog/blog/models.py
|
29739ca1865621b4e4224bca3f600e41f915a179
|
[] |
no_license
|
davejonesbkk/myblog
|
11eb30b4d75270b3e99f172f27f05ce31e318f93
|
4a5cbeb47154004ef239b16e63155997b1c9afe6
|
refs/heads/master
| 2021-01-17T17:43:28.465235
| 2016-05-31T02:02:07
| 2016-05-31T02:02:07
| 59,930,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
from django.db import models
from django_markdown.models import MarkdownField
from django.core.urlresolvers import reverse
class EntryQuerySet(models.QuerySet):
def published(self):
return self.filter(publish=True)
class Entry(models.Model):
title = models.CharField(max_length=200)
body = models.TextField()
slug = models.SlugField(max_length=200, unique=True)
publish = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = EntryQuerySet.as_manager()
def __str__(self):
return self.title
class Meta:
verbose_name = 'Blog Entry'
verbose_name_plural = 'Blog Entries'
ordering = ["-created"]
|
[
"davejonesbkk@gmail.com"
] |
davejonesbkk@gmail.com
|
a6247ca012289c8bc806e6836e82eb8bd9df5793
|
9b01f09991618b13deeb75044c66a721253eba52
|
/Baysim.py
|
f066c87bfbada580e68b3787a8cd3935c53a8ec3
|
[] |
no_license
|
BlackDragonBayliss/question-bank-app
|
2f0c5e1fb87395c1e607064639637029a219c154
|
2a8d3c05cf554b092981c44b7f05271e83bdf4ae
|
refs/heads/master
| 2020-04-27T09:04:30.279021
| 2019-06-21T21:08:38
| 2019-06-21T21:08:38
| 174,199,735
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
from StateStoreComposite import StateStoreComposite
def main():
instanceStateStoreComposite = StateStoreComposite()
if __name__ == "__main__": main()
|
[
"wallacecarr4@gmail.com"
] |
wallacecarr4@gmail.com
|
58b16c0c8049c70a9146960656d3f2f7323cab0e
|
b251a605a8f4cf62970df3d7c2e75a46fc2445b2
|
/sva.py
|
aac2a7ae497d54e722449331dc4c7be943a41429
|
[] |
no_license
|
wheatfields/Q
|
f9fefed09cc598ab3feb872bc87f8dda27c166e1
|
a5dd5593b559c2ceae1d6d41337af944f5000e6f
|
refs/heads/main
| 2023-08-03T22:59:08.843170
| 2021-07-02T05:30:59
| 2021-07-02T05:30:59
| 376,154,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53,030
|
py
|
# -*- coding: utf-8 -*-
"""
@author: adamw
"""
import pandas as pd
class sva:
"""
Initialise with a path to the document & a sheet name.
"""
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
# initiate nested classes
self.dlr_parameters = self.dlr_parameters(path, sheet_name)
self.termination_rates = self.termination_rates(path, sheet_name)
self.stress_margins = self.stress_margins(path, sheet_name)
# =============================================================================
@classmethod
def table_import(cls, path,
sheet_name,
columns,
row_start, row_end,
header_row,
clear_first_n_rows = None,
index_col=None,
trim_column_names = None,
trim_index_name = None):
rows = row_end - row_start
if header_row is not None:
if isinstance(header_row, list)==False:
header = header_row - 1
else:
header = header_row
else:
header = None
# [Will always be reference 0]
table = pd.DataFrame(pd.read_excel(path,
sheet_name = sheet_name,
header = header,
usecols = columns,
nrows = rows,
index_col = index_col)
)
# SVA sometimes has a blank row between header and the start of the data
if clear_first_n_rows is not None:
table = table.iloc[clear_first_n_rows:]
# The way read_excel works means that if the header has already been 'seen'
# in previous columns, it will add a trailing '.[number]'. This removes it.
if trim_column_names is not None:
table.columns = table.columns.map(str)
table.columns = table.columns.str.replace(r'\.\d+$', '')
if trim_index_name is not None:
table.index.name = table.index.name.split('.')[0]
return table
# =============================================================================
# 1
def claims_reporting_delay(self):
"""
"""
claims_reporting_delay = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'B:J',
row_start = 11, row_end = 305,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return claims_reporting_delay
def claim_delay_factors(self):
"""
"""
claim_delay_factors = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'L:T',
row_start = 11, row_end = 305,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return claim_delay_factors
# =============================================================================
# 2
def claims_expense_reserve(self):
"""
"""
claims_expense_reserve = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'W:Z',
row_start = 11, row_end = 18,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return claims_expense_reserve
def operating_expense_perc_premium(self):
"""
"""
operating_expense_perc_premium = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AB:AE',
row_start = 11, row_end = 18,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return operating_expense_perc_premium
def budgeted_trustee_expense(self):
"""
"""
budgeted_trustee_expense = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AG:AI',
row_start = 11, row_end = 23,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return budgeted_trustee_expense
def projected_trustee_expense(self):
"""
"""
projected_trustee_expense = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AK:AM',
row_start = 11, row_end = 21,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return projected_trustee_expense
# =============================================================================
# 3
def ip_continuance_rates(self):
"""
"""
ip_continuance_rates = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AP:AT',
row_start = 11, row_end = 52,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
# Manually renaming index here.
ip_continuance_rates.index.rename('Month', inplace=True)
return ip_continuance_rates
class dlr_parameters:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
def salary_replacement_ratio(self):
"""
"""
salary_replacement_ratio = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 12,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return salary_replacement_ratio
def continuing_retirement_benefit(self):
"""
"""
continuing_retirement_benefit = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 13,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return continuing_retirement_benefit
def assumed_avg_age_at_disability(self):
"""
"""
assumed_avg_age_at_disability = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 14,
header_row = 11,
clear_first_n_rows = 2,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return assumed_avg_age_at_disability
def assumed_default_salary(self):
"""
"""
assumed_default_salary = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 15,
header_row = 11,
clear_first_n_rows = 3,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return assumed_default_salary
def payment_ratio(self):
"""
"""
payment_ratio = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 16,
header_row = 11,
clear_first_n_rows = 4,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return payment_ratio
def reopened_claims_reserves_loading(self):
"""
"""
reopened_claims_reserves_loading = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 17,
header_row = 11,
clear_first_n_rows = 5,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return reopened_claims_reserves_loading
def claim_index_rate(self):
"""
"""
claim_index_rate = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 18,
header_row = 11,
clear_first_n_rows = 6,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return claim_index_rate
def benefit_indexation_month(self):
"""
"""
benefit_indexation_month = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 19,
header_row = 11,
clear_first_n_rows = 7,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return benefit_indexation_month
def ip_ibnr_adjustment(self):
"""
"""
ip_ibnr_adjustment = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AY:AZ',
row_start = 11, row_end = 15,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return ip_ibnr_adjustment
# =============================================================================
# 4
def appeals_reserve_assumptions(self):
"""
"""
appeals_reserve_assumptions = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BC:BE',
row_start = 11, row_end = 15,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return appeals_reserve_assumptions
def perc_of_appealed_claims_accepted(self):
"""
"""
perc_of_appealed_claims_accepted= self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BC:BE',
row_start = 11, row_end = 17,
header_row = 11,
clear_first_n_rows = 5,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
perc_of_appealed_claims_accepted.rename(index={0:'GOV', 1:'NONGOV'}, inplace=True)
return perc_of_appealed_claims_accepted
# =============================================================================
# 5
def decline_rate(self):
"""
"""
decline_rate = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BH:BK',
row_start = 11, row_end = 12,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return decline_rate
def decline_rate_delay(self):
"""
"""
decline_rate_delay = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BH:BI',
row_start = 14, row_end = 21,
header_row = 14,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return decline_rate_delay
def simultaneous_ip_tpd_decline(self):
"""
"""
simultaneous_ip_tpd_decline = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BK:BM',
row_start = 14, row_end = 22,
header_row = 14,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return simultaneous_ip_tpd_decline
# =============================================================================
# 6
def expected_loss_ratio_gov(self):
"""
"""
expected_loss_ratio_gov = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BP:BS',
row_start = 11, row_end = 84,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return expected_loss_ratio_gov
def expected_loss_ratio_nongov(self):
"""
"""
expected_loss_ratio_nongov = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BU:BX',
row_start = 11, row_end = 84,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return expected_loss_ratio_nongov
# =============================================================================
# 7
def payment_delay_factors(self):
"""
"""
payment_delay_factors = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CA:CG',
row_start = 11, row_end = 35,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return payment_delay_factors
# 7
def payment_delay_factors_discrete(self):
"""
"""
payment_delay_factors_discrete = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CI:CO',
row_start = 11, row_end = 35,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return payment_delay_factors_discrete
# =============================================================================
# 8
def average_claim_size(self):
"""
"""
average_claim_size = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CR:DA',
row_start = 11, row_end = 12,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return average_claim_size
def acs_ip_linked_tpd(self):
"""
"""
acs_ip_linked_tpd = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CR:CV',
row_start = 20, row_end = 32,
header_row = 20,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return acs_ip_linked_tpd
def acs_by_notification_delay_q(self):
"""
"""
acs_by_notification_delay_q = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CW:CY',
row_start = 20, row_end = 85,
header_row = 20,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return acs_by_notification_delay_q
def perc_si_at_ip_doe(self):
"""
"""
perc_si_at_ip_doe = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CZ:DA',
row_start = 19, row_end = 20,
header_row = 19,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = None,
trim_index_name = None)
return perc_si_at_ip_doe
def tpd_si_scales_by_age(self):
"""
"""
tpd_si_scales_by_age = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CZ:DA',
row_start = 22, row_end = 76,
header_row = 22,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return tpd_si_scales_by_age
# =============================================================================
# 9
class termination_rates:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
def age_rates(self):
"""
"""
age_rates = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'DD:DF',
row_start = 11, row_end = 57,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return age_rates
def duration_of_claim_g_wp_oc(self):
"""
"""
duration_of_claim_g_wp_oc = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'DH:EF',
row_start = 10, row_end = 134,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
# Data adjustments here to correctly index table.
# Note: Consider 'melting' multi-index tables for use in models.
df = duration_of_claim_g_wp_oc.copy()
# info = duration_of_claim_g_wp_oc[1].copy()
index = df[0:4]
index = index.fillna(method='ffill', axis=1)
df = df[4:]
df.columns = pd.MultiIndex.from_arrays(index.values)
df.index.name = 'Duration of Claim (months)'
# duration_of_claim_g_wp_oc = tuple([df, info])
duration_of_claim_g_wp_oc = df
return duration_of_claim_g_wp_oc
def smoker_status(self):
"""
"""
smoker_status = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'EH:EI',
row_start = 10, row_end = 12,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
smoker_status.rename(columns={smoker_status.columns[0]: "smoker_status" }, inplace = True)
return smoker_status
def benefit_type(self):
"""
"""
benefit_type = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'EK:EL',
row_start = 10, row_end = 12,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
benefit_type.rename(columns={benefit_type.columns[0]: "benefit_type" }, inplace = True)
return benefit_type
def policy_duration_factor(self):
"""
"""
policy_duration_factor = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'EN:ER',
row_start = 10, row_end = 23,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
# Data adjustments here to correctly index table.
# Note: Consider 'melting' multi-index tables for use in models.
df = policy_duration_factor.copy()
# info = policy_duration_factor[1].copy()
index = df[0:2]
index = index.fillna(method='ffill', axis=1)
df = df[2:]
df.columns = pd.MultiIndex.from_arrays(index.values)
df.index.name = 'Curtate Policy Year'
# policy_duration_factor = tuple([df, info])
policy_duration_factor = df
return policy_duration_factor
# =============================================================================
# 10
class stress_margins:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
self.random = self.random(path, sheet_name)
self.future = self.future(path, sheet_name)
class random:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
def random_all(self):
random_all = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 16, row_end = 26,
header_row = 16,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return random_all
def death(self):
death = self.random_all().iloc[0,0]
return death
def death_ibnr(self):
death_ibnr = self.random_all().iloc[1,0]
return death_ibnr
def death_rbna(self):
death_rbna = self.random_all().iloc[2,0]
return death_rbna
def tpd(self):
tpd = self.random_all().iloc[3,0]
return tpd
def tpd_ibnr(self):
tpd_ibnr = self.random_all().iloc[4,0]
return tpd_ibnr
def tpd_rbna(self):
tpd_rbna = self.random_all().iloc[5,0]
return tpd_rbna
def ip(self):
ip = self.random_all().iloc[6,0]
return ip
def ip_dlr(self):
ip_dlr = self.random_all().iloc[7,0]
return ip_dlr
def ip_ibnr(self):
ip_ibnr = self.random_all().iloc[8,0]
return ip_ibnr
def ip_rbna(self):
ip_rbna = self.random_all().iloc[9,0]
return ip_rbna
class future:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
def future_all(self):
future_all = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 27, row_end = 37,
header_row = 27,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return future_all
def death(self):
death = self.future_all().iloc[0,0]
return death
def death_ibnr(self):
death_ibnr = self.future_all().iloc[1,0]
return death_ibnr
def death_rbna(self):
death_rbna = self.future_all().iloc[2,0]
return death_rbna
def tpd(self):
tpd = self.future_all().iloc[3,0]
return tpd
def tpd_ibnr(self):
tpd_ibnr = self.future_all().iloc[4,0]
return tpd_ibnr
def tpd_rbna(self):
tpd_rbna = self.future_all().iloc[5,0]
return tpd_rbna
def ip(self):
ip = self.future_all().iloc[6,0]
return ip
def ip_dlr(self):
ip_dlr = self.future_all().iloc[7,0]
return ip_dlr
def ip_ibnr(self):
ip_ibnr = self.future_all().iloc[8,0]
return ip_ibnr
def ip_rbna(self):
ip_rbna = self.future_all().iloc[9,0]
return ip_rbna
def time_to_react_future(self):
time_to_react_future = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 39, row_end = 40,
header_row = 39,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return time_to_react_future
def event_pandemic_death(self):
event_pandemic_death = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 42, row_end = 46,
header_row = 42,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return event_pandemic_death
def event_pandemic_tpd(self):
event_pandemic_tpd = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 42, row_end = 46,
header_row = 42,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[1,0]
return event_pandemic_tpd
def event_pandemic_ip(self):
event_pandemic_ip = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 42, row_end = 46,
header_row = 42,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[2,0]
return event_pandemic_ip
def prop_disabled_after_wp(self):
prop_disabled_after_wp = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 42, row_end = 46,
header_row = 42,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[3,0]
return prop_disabled_after_wp
def lapse_stress(self):
lapse_stress = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 48, row_end = 50,
header_row = 48,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return lapse_stress
def servicing_expense_stress(self):
servicing_expense_stress = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 48, row_end = 50,
header_row = 48,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[1,0]
return servicing_expense_stress
# =============================================================================
# 11
def reinsurance(self):
reinsurance = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FT:FY',
row_start = 11, row_end = 14,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return reinsurance
def catastrophe_pl(self):
catastrophe_pl = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FT:FY',
row_start = 21, row_end = 23,
header_row = 21,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0, 4]
return catastrophe_pl
def catastrophe_capital(self):
catastrophe_capital = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FT:FY',
row_start = 21, row_end = 23,
header_row = 21,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[1, 4]
return catastrophe_capital
# =============================================================================
# 12
def par_loadings(self):
par_loadings = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GB:GC',
row_start = 10, row_end = 11,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return par_loadings
def stamp_duty(self):
stamp_duty = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GB:GC',
row_start = 13, row_end = 15,
header_row = 13,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return stamp_duty
def investment_earnings_b0(self):
investment_earnings_b0 = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GB:GC',
row_start = 16, row_end = 17,
header_row = 16,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return investment_earnings_b0
# =============================================================================
# 13
def contingency_margin_start(self):
contingency_margin_start = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GF:GG',
row_start = 10, row_end = 11,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return contingency_margin_start
def contingency_margin(self):
contingency_margin = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GF:GH',
row_start = 13, row_end = 14,
header_row = 13,
clear_first_n_rows = None,
index_col = None,
trim_column_names = True,
trim_index_name = None)
return contingency_margin
# =============================================================================
# 14
def notification_delay(self):
notification_delay = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GK:GM',
row_start = 11, row_end = 12,
header_row = 11,
clear_first_n_rows = None,
index_col = None,
trim_column_names = True,
trim_index_name = None)
return notification_delay
# =============================================================================
# 15
def cmm_impact_termination_rates_start(self):
cmm_impact_termination_rates = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GP:GQ',
row_start = 11, row_end = 13,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return cmm_impact_termination_rates
def cmm_impact_termination_rates_perc(self):
cmm_impact_termination_rates_perc = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GP:GQ',
row_start = 11, row_end = 13,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[1,0]
return cmm_impact_termination_rates_perc
# =============================================================================
# 16
def covid19_impact_termination_rates(self):
covid19_impact_termination_rates = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GS:GT',
row_start = 11, row_end = 16,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return covid19_impact_termination_rates
# =============================================================================
# 17
def covid19_adjustment_ip_dlr(self):
covid19_adjustment_ip_dlr = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GV:GW',
row_start = 11, row_end = 27,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return covid19_adjustment_ip_dlr
# =============================================================================
# 18
def expected_lr_combined_capital(self):
expected_lr_combined_capital = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GY:HB',
row_start = 11, row_end = 90,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return expected_lr_combined_capital
# =============================================================================
# 19
def gov_tpd_linked_to_ip(self):
gov_tpd_linked_to_ip = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HD:HF',
row_start = 11, row_end = 23,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return gov_tpd_linked_to_ip
def tpd_linked_reporting_delay(self):
tpd_linked_reporting_delay = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HH:HI',
row_start = 11, row_end = 65,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return tpd_linked_reporting_delay
def conversion_rates(self):
conversion_rates = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HK:HM',
row_start = 11, row_end = 26,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return conversion_rates
# =============================================================================
# 20
def claims_reporting_delay_tpd_ip(self):
claims_reporting_delay_tpd_ip = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HO:HQ',
row_start = 11, row_end = 305,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return claims_reporting_delay_tpd_ip
def claims_delay_factors_tpd_ip(self):
claims_delay_factors_tpd_ip = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HS:HU',
row_start = 11, row_end = 305,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return claims_delay_factors_tpd_ip
# =============================================================================
# 21
def missing_subcase_reserve(self):
missing_subcase_reserve = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HW:HX',
row_start = 11, row_end = 15,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return missing_subcase_reserve
# =============================================================================
|
[
"68405635+wheatfields@users.noreply.github.com"
] |
68405635+wheatfields@users.noreply.github.com
|
d165a083b3e3a41120522e9b4cd22520c188909d
|
029fa717816e977e736100128168c1c66161541d
|
/aries_cloudagent/wallet/tests/test_key_pair.py
|
b81062d8563ac7d8651bf77dad80875a2f3da169
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
estrehle/aries-cloudagent-python
|
5cd0ac23851268d435b9eafe6b59e6efdb26ad90
|
1460b2d32c933944b4677cf25a78c4ace07346c8
|
refs/heads/main
| 2023-09-04T10:31:36.141037
| 2021-11-10T12:16:16
| 2021-11-10T12:16:16
| 424,557,794
| 1
| 0
|
Apache-2.0
| 2021-11-04T10:41:01
| 2021-11-04T10:41:01
| null |
UTF-8
|
Python
| false
| false
| 3,954
|
py
|
from asynctest import TestCase as AsyncTestCase
import json
from ...storage.error import StorageNotFoundError
from ..util import bytes_to_b58
from ..key_type import KeyType
from ...core.in_memory import InMemoryProfile
from ...storage.in_memory import InMemoryStorage
from ..key_pair import KeyPairStorageManager, KEY_PAIR_STORAGE_TYPE
class TestKeyPairStorageManager(AsyncTestCase):
test_public_key = b"somepublickeybytes"
test_secret = b"verysecretkey"
async def setUp(self):
self.profile = InMemoryProfile.test_profile()
self.store = InMemoryStorage(self.profile)
self.key_pair_mgr = KeyPairStorageManager(self.store)
async def test_create_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert record.tags == {"verkey": verkey, "key_type": KeyType.ED25519.key_type}
assert value["verkey"] == verkey
assert value["secret_key"] == bytes_to_b58(self.test_secret)
assert value["metadata"] == {}
assert value["key_type"] == KeyType.ED25519.key_type
async def test_get_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
key_pair = await self.key_pair_mgr.get_key_pair(verkey)
assert key_pair["verkey"] == verkey
assert key_pair["secret_key"] == bytes_to_b58(self.test_secret)
assert key_pair["metadata"] == {}
assert key_pair["key_type"] == KeyType.ED25519.key_type
async def test_get_key_pair_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.get_key_pair("not_existing_verkey")
async def test_delete_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
await self.key_pair_mgr.delete_key_pair(verkey)
# should be deleted now
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.delete_key_pair(verkey)
async def test_delete_key_pair_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.delete_key_pair("non_existing_verkey")
async def test_update_key_pair_metadata(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
metadata={"some": "data"},
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert value["metadata"] == {"some": "data"}
await self.key_pair_mgr.update_key_pair_metadata(verkey, {"some_other": "data"})
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert value["metadata"] == {"some_other": "data"}
async def test_update_key_pair_metadata_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.update_key_pair_metadata("non_existing_verkey", {})
|
[
"timo@animo.id"
] |
timo@animo.id
|
89eb8d3c440b20fc430683ddb303868c4dfccc4a
|
480459352928aa307317bac9d7c8f0efe427023c
|
/getting_started/config.py
|
e7586e40cad89565f5e9aa9ede9c5d8236c38670
|
[
"MIT-0"
] |
permissive
|
seeq12/amazon-lookout-for-equipment
|
031a265095d7c153d086af6c2b97c17c2bbf835b
|
cb760aa0f9e2dad8fce13ed7c50282a10e320b40
|
refs/heads/main
| 2023-06-28T20:30:55.880074
| 2021-07-27T15:00:00
| 2021-07-27T15:00:00
| 390,020,084
| 0
| 0
| null | 2021-07-27T14:45:26
| 2021-07-27T14:45:25
| null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
# Update the name of the bucket you want to use
# to store the intermediate results of this getting
# started:
BUCKET = '<<YOUR_BUCKET>>'
# You can leave these other parameters to these
# default values:
PREFIX_TRAINING = 'getting_started/training-data/'
PREFIX_LABEL = 'getting_started/label-data/'
PREFIX_INFERENCE = 'getting_started/inference-data'
DATASET_NAME = 'getting-started-pump'
MODEL_NAME = f'{DATASET_NAME}-model'
INFERENCE_SCHEDULER_NAME = f'{DATASET_NAME}-scheduler'
|
[
"michoara@amazon.fr"
] |
michoara@amazon.fr
|
447a75ff7f1e949a3c268918e94f8ab08d58da0f
|
68cd659b44f57adf266dd37789bd1da31f61670d
|
/2020-01/python/18188_다오의데이트.py
|
7c55c44e597a14f68e338a66b4a4458c5ab95c41
|
[] |
no_license
|
01090841589/solved_problem
|
c0c6f5a46e4d48860dccb3b0288aa5b56868fbca
|
bbea2f31e5fe36cad100bc514eacd83545fb25b1
|
refs/heads/master
| 2023-07-02T23:55:51.631478
| 2021-08-04T13:57:00
| 2021-08-04T13:57:00
| 197,157,830
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
import sys
sys.stdin = open("다오의데이트.txt")
DIR = [[-1, 0], [0, 1], [1, 0], [0, -1]]
def go_dao(y, x, k, route):
global result, rts
if result:
return
if k >= A:
return
flag = 1
for i in range(4):
if can[k][i]:
Y = y+DIR[i][0]
X = x+DIR[i][1]
if 0 <= Y < H and 0 <= X < W:
if MAP[Y][X] != '@':
if MAP[Y][X] == 'Z':
rts = route+arr[i]
result = 1
return
flag = 0
go_dao(Y, X, k+1, route+arr[i])
H, W = map(int, input().split())
MAP = [list(input()) for _ in range(H)]
for h in range(H):
for w in range(W):
if MAP[h][w] == 'D':
y = h
x = w
result = 0
rts = ''
A = int(input())
arr = ['W', 'D', 'S', 'A']
can = [[0, 0, 0, 0] for _ in range(A)]
for i in range(A):
B, C = map(str, input().split())
can[i][arr.index(B)] = 1
can[i][arr.index(C)] = 1
go_dao(y, x, 0, '')
if result:
print("YES")
print(rts)
else:
print("NO")
|
[
"chanchanhwan@naver.com"
] |
chanchanhwan@naver.com
|
1ef85ed8de5af85610939be3fd8aaef0b637de4c
|
2127976c32452664cbe5bc46e858f6c1059300fc
|
/spotify.py
|
f1e603f01287548e6f8b5a843597b714959fb8f9
|
[] |
no_license
|
Luis199/spotify
|
8c536680652d99b5b63c85859eb6b0e626107057
|
961fd0970305ee8bab8ed9105dad3c07a646a297
|
refs/heads/master
| 2023-02-09T00:57:01.231526
| 2021-01-05T19:59:16
| 2021-01-05T19:59:16
| 282,363,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
birdy_uri = 'spotify:artist:2WX2uTcsvV5OnS0inACecP'
spotify = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials())
results = spotify.artist_albums(birdy_uri, album_type='album')
albums = results['items']
while results['next']:
results = spotify.next(results)
albums.extend(results['items'])
for album in albums:
print(album['name'])
# export SPOTIPY_CLIENT_ID='3750a2d0a4494d3385dbbda87871bab2'
# export SPOTIPY_CLIENT_SECRET='81acd20ff18642b9b9c941d811dfa2de'
# export SPOTIPY_REDIRECT_URI='your-app-redirect-url'
|
[
"luiscasado620@gmail.com"
] |
luiscasado620@gmail.com
|
f56222a598de1c4002c0712cef364ba7722e2078
|
6ddfd7082e9126a88ce9357250c96137af5228e5
|
/PIplot.py
|
cb18ffd6cc8c8676f6a12389cc7671e112066070
|
[] |
no_license
|
ryantro/ICE-Rb-Cell-Absorption-Spectrum-Plot
|
861ef2f970273c1b546319daf345d87be5e094e4
|
aca2f7d2145e18b9a8c9bec6c88fdf18d0b320f5
|
refs/heads/master
| 2022-11-25T13:28:47.851825
| 2020-07-31T22:07:35
| 2020-07-31T22:07:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,821
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 16:57:41 2020
@author: ryan.robinson
"""
import time
import os
from serial import serialwin32 as serial
import numpy as np
import sys, string,subprocess
import nidaqmx
class ICE:
def __init__(self,BoxNum,SlotNum):
self.BoxNum = int(BoxNum)
self.SlotNum = int(SlotNum)
return None
IceTimeout = .1 #Communication Timeout (seconds)
IceByteRead = 256 #Number of bytes to read on ser.read()
IceDelay = .01 #Delay in seconds after sending Ice Command to ensure execution
###Functions###
def setSlot(self,SlotNum):
self.SlotNum = SlotNum
print('Changed Slot To: '+str(SlotNum))
return None
def wait(self,num):
'''Forces program to wait num seconds.
Note: Shortest Delay--> 1ms'''
time.sleep(num)
return None
def IceSend(self, CommandInput):
'''Function that sends a serial string command to ICE Box
Input: ICE Box Number[int], ICE Slot Number[int], CommandInput[str]
Output: None (unless print line uncommented)/Read buffer always emptied!
Note 1: Enter a slot number outside range(1-8) and function sends command directly
to master board (ex. '#PowerOff' Command)
Note 2: COM Port is opened/closed each time funciton is run'''
#Open Port w/ ICE COM Default Settings
IceSer = serial.Serial(port='COM'+str(int(self.BoxNum)),baudrate=115200,timeout=self.IceTimeout,parity='N',stopbits=1,bytesize=8)
self.wait(.001)
#Define Command and Send (perform read after each command to maintain synchronicity)
if int(self.SlotNum) in range(1,9): #If a Valid Slot Number is input, send command to slot num
#Define Commands
MasterCommand = str('#slave ' + str(int(self.SlotNum)) + '\r\n')
SlaveCommand = str(str(CommandInput) + '\r\n')
#Send Commands/Close Port
IceSer.write(MasterCommand.encode())
self.wait(self.IceDelay)
IceOutputSlave = IceSer.read(self.IceByteRead).decode() #Read Buffer
self.wait(self.IceDelay)
IceSer.write(SlaveCommand.encode())
self.wait(self.IceDelay)
IceOutputReturn = IceSer.read(self.IceByteRead).decode() #Read Buffer
self.wait(self.IceDelay)
IceSer.close() #Close COM Port
#Return Output
return IceOutputReturn
print( ' ')
print( 'Master Board Return: ', IceOutputSlave)
print( 'Slave Board Return: ', IceOutputReturn)
7
else: #Command sent only to Master Board (preceding '#', no slot num to specify)
#Define Command
MasterCommand = str('#' + str(CommandInput) + '\r\n')
#Send Commands/Close Port
IceSer.write(MasterCommand)
self.wait(self.IceDelay)
IceOutputReturn = IceSer.read(self.IceByteRead) #Read Buffer
self.wait(self.IceDelay)
IceSer.close() #Close COM Port
#Return Output
return IceOutputReturn
print( ' ')
print( 'Master Board Return: ', IceOutputReturn)
# GET DATA FROM NI-DAQmx
def nidaxgrab():
with nidaqmx.Task() as task:
task.ai_channels.add_ai_voltage_chan("Dev1/ai0")
data = task.read(number_of_samples_per_channel=1)
power = ' '.join([str(elem) for elem in data])
return power
def CurrentSet(IB,current):
return IB.IceSend(1,1,'CurrSet '+str(current))
def makefolder(newpath):
if not os.path.exists(newpath):
os.makedirs(newpath)
return newpath
def loggingLoops(IB,iArray):
'''
Creates a directory and logs laser current and laser power.
The purpose of this is to find at which current mode-hops occur by seeing a sharp change in power
'''
logDir = makefolder(os.getcwd()+'\\testlogging\\'+time.strftime("%Y-%m-%d_%H-%M-%S"))
print('Log Dirrectory: %' %logDir)
IB.IceSend('CurrLim 125')
### OPEN FILE ###
PIData = open(logDir+'\\PIData.csv', 'a+')
### LOGGING LOOPS ###
for i in iArray:
setCurrent = IB.IceSend('CurrSet '+str(i))
time.sleep(1) #Maybe this needs to be greater
line = str(setCurrent)+','+str(nidaxgrab())
print(line)
PIData.write(line)
### CLOSE FILE ###
PIData.close()
return None
def main():
BoxNum = input('Box Num: ')
SlotNum = input('Slot Num of CS1 Board: ')
IB = ICE(BoxNum,SlotNum)
iArray = np.linspace(0,100,100)
iArray = np.round(iArray,1)
loggingLoops(IB,iArray)
return None
if(__name__=="__main__"):
main()
|
[
"ryan.robinson@Vescent.local"
] |
ryan.robinson@Vescent.local
|
c1af50c6c4bae299368467230953b197828dfb68
|
8f4b481b2e92d4a29822d7ea4756d9d51af8ed10
|
/RDF/single_frame/rdf_drug_initial.py
|
47dac791467bd6943960044a52681100c84319f0
|
[] |
no_license
|
Zilu-Zhang/MD-simulation-data-analysis
|
fbe4d4b94ea3506dfa0fe084e7279ad364f0f108
|
21da1d96418a89f80fd827aef0f0206934046543
|
refs/heads/main
| 2023-05-30T16:16:09.314265
| 2021-06-08T13:20:03
| 2021-06-08T13:20:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
import mdtraj as md
import numpy as np
import os
import os.path
import pandas as pd
import openpyxl as pxl
from statistics import mean
from math import sqrt
def dis(ref, tag):
x = ref[0] - tag[0]
y = ref[1] - tag[1]
z = ref[2] - tag[2]
return sqrt(x**2 + y**2 + z**2)
n_frames = 1
for filename in os.listdir('./'):
if filename.endswith('.pdb'):
excipient_name = filename[17:-4]
traj = md.load(filename)
top = traj.topology
ori = 0
total = np.empty(12 * n_frames)
i = 0 # changable
start = 0
position = np.zeros((13,3))
for j in range(12):
res = top.residue(j)
length = res.n_atoms
x = mean(traj.xyz[i, start:start + length, 0])
y = mean(traj.xyz[i, start:start + length, 1])
z = mean(traj.xyz[i, start:start + length, 2])
position[j][:] = x, y, z
start += length
position[-1][:] = mean(position[:-1][0]), mean(position[:-1][1]), mean(position[:-1][2])
distance = np.zeros(12)
for h in range(12):
distance[h] = dis(position[-1], position[h])
total[ori:ori + 12] = distance
ori += 12
r_range = np.array([0, 5])
bin_width = 0.05
n_bins = int((r_range[1] - r_range[0]) / bin_width)
g_r, edges = np.histogram(total, range=r_range, bins=n_bins)
g_r = g_r / (12 * n_frames)
r = 0.5 * (edges[1:] + edges[:-1])
df = pd.DataFrame({'r': r, 'g_r': g_r})
if not os.path.isfile('rdf_drug_0.xlsx'):
df.to_excel('rdf_drug_0.xlsx', '%s' % excipient_name, index = True)
else:
excel_book = pxl.load_workbook('rdf_drug_0.xlsx')
with pd.ExcelWriter('rdf_drug_0.xlsx', engine = 'openpyxl') as writer:
writer.book = excel_book
writer.sheets = {worksheet.title: worksheet for worksheet in excel_book.worksheets}
df.to_excel(writer, '%s' % excipient_name, index = True)
writer.save()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b1918d70a960ef445232d6b1b21ffd44d9848c48
|
71c7683331a9037fda7254b3a7b1ffddd6a4c4c8
|
/Phys/Urania/examples/KsPiZeroMM_angularPDF.py
|
a83417211276319e5a15c72d57e48769a1b46477
|
[] |
no_license
|
pseyfert-cern-gitlab-backup/Urania
|
edc58ba4271089e55900f8bb4a5909e9e9c12d35
|
1b1c353ed5f1b45b3605990f60f49881b9785efd
|
refs/heads/master
| 2021-05-18T13:33:22.732970
| 2017-12-15T14:42:04
| 2017-12-15T14:42:04
| 251,259,622
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,684
|
py
|
from Urania.Helicity import *
from Urania.SympyBasic import *
from os import *
DiLeptonSpins = [0,1,2] ## DMS: I doube we'll need 2, probably we'll only
## have Pwave (J=1) from the photon, plus maybe some S-wave (J=0)
### transAmp=1 : Changes to transversity amplitude basis
A = doKsPizeroMuMu(DiLeptonSpins ) ## This is now in Urania.Helicity
### massage a bit the expression to make it more suitable for fitting
pdf_split = DecomposeAmplitudes(A,TransAmplitudes.values())
phys = 0
for key in pdf_split: phys += StrongPhases(key)*pdf_split[key]
### change the free variables to cosines
x = USymbol("helcosthetaK","c\\theta_{K}",real = True)
y = USymbol("helcosthetaL", "c\\theta_{l}", real = True)
z = USymbol("helphi" , "\\phi", real = True)
CThL = Cos(ThetaL)
CThK = Cos(ThetaK)
def changeFreeVars(function):
### Phi now as in DTT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
function = function.subs( Sin(2*ThetaK), 2*Sin(ThetaK)*Cos(ThetaK))
function = function.subs( Sin(2*ThetaL), 2*Sin(ThetaL)*Cos(ThetaL))
function = function.subs( Cos(2*ThetaK), 2*Cos(ThetaK)**2 - 1)
function = function.subs( Cos(2*ThetaL), 2*Cos(ThetaL)**2 - 1)
function = function.subs( Sin(ThetaK), Sqrt(1-Cos(ThetaK)**2))
function = function.subs( Sin(ThetaL), Sqrt(1-Cos(ThetaL)**2))
function = function.subs([(CThK,x),(CThL,y), (Phi, -z)])
return function
func = changeFreeVars(phys)
### Print out to a latex document
from Urania.LatexFunctions import *
flatex = file("Kspizeromm_PDF.tex","w")
begintex(flatex)
begin_multline(flatex)
i = 0
for key in pdf_split.keys():
if i > 20:
i = 0
multline_break(flatex)
if pdf_split[key]:
flatex.write(Ulatex(key) + "\t" + Ulatex(pdf_split[key]) + "\\\\" + "\n")
i += 1
end_multline(flatex)
flatex.write("\\end{document}\n")
flatex.close()
system("pdflatex " + "Kspizeromm_PDF")
print "angular function saved in Kspizeromm_PDF.pdf"
print "Now making RooFit class as well"
##BREAK
##### Generate and compile a fitting class corresponding to "A"
### Trial 1, w/o analytical integrals
from Urania.RooInterfaces import *
potential_list = [x,y,z]+TransAmpModuli.values() + TransAmpPhases.values()
final_list = []
for thing in potential_list:
if thing in func.atoms(): final_list.append(thing)
op = RooClassGenerator(func, final_list ,"RooKspizeroMM")
### Define intermediate variables to be calculated once
op.makePdf(integrable = 1)
op.doIntegral(1,(y,-1,1))#,(y,-1,1),(z,-Pi,Pi))
##op.doIntegral(2,(x,-1,1),(y,-1,1))
##op.doIntegral(3,(x,-1,1),(z,-Pi,Pi))
##op.doIntegral(4,(y,-1,1),(z,-Pi,Pi))
op.overwrite()
op.invoke()
|
[
"liblhcb@cern.ch"
] |
liblhcb@cern.ch
|
3e35560a675840b2ed59a45d39e280ce612af5c6
|
4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5
|
/suning/api/union/UnionInfomationGetRequest.py
|
5a52d242f32e5e4c7c3d65d8e1872c3832f9291a
|
[] |
no_license
|
shijingyu/sunningAPI
|
241f33b0660dc84635ce39688fed499f5c57a5da
|
4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5
|
refs/heads/master
| 2020-04-24T22:15:11.584028
| 2019-02-24T06:41:20
| 2019-02-24T06:41:20
| 172,305,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
# -*- coding: utf-8 -*-
'''
Created on 2016-1-27
@author: suning
'''
from suning.api.abstract import AbstractApi
class UnionInfomationGetRequest(AbstractApi):
'''
'''
def __init__(self):
AbstractApi.__init__(self)
self.goodsCode = None
self.setParamRule({
'goodsCode':{'allow_empty':False}
})
def getApiBizName(self):
return 'getUnionInfomation'
def getApiMethod(self):
return 'suning.netalliance.unioninfomation.get'
|
[
"945090896@qq.com"
] |
945090896@qq.com
|
2916961de45167313f15922e1456df4053e14745
|
afe57be84b5dde07967be0e23f677ed85ab8d4da
|
/posts/urls.py
|
a1c411fb775d8e4ef1bf807f6402c87b547e111e
|
[] |
no_license
|
furgot100/CarHub
|
0c3fdab1529c589e04eabe94c615ce953d4501b1
|
417de07dce488b50971396d200c721e0869382a2
|
refs/heads/master
| 2022-04-28T10:27:47.879441
| 2020-04-22T18:24:54
| 2020-04-22T18:24:54
| 247,791,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
from django.urls import path
from .views import PostCreateView, PostDetailView, PostListView, HomeView, PostDeleteView, ProductListView, ProductDetailView, ProductCreateView, EventListView, EventDetailView, EventCreateView
from django.conf import settings
from django.conf.urls.static import static
app_name = 'posts'
urlpatterns = [
path('', HomeView.as_view(), name="home"),
path('blog/', PostListView.as_view(), name='post-list-page'),
path('new/', PostCreateView.as_view(), name='post-new-page' ),
path('blog/<str:slug>/', PostDetailView.as_view(), name='post-details-page'),
# path('<slug>/delete', PostDeleteView.as_view(), name='post-delete-page')
path('store/', ProductListView.as_view(), name="store-list"),
path('store/<str:slug>/', ProductDetailView.as_view(), name='store-item'),
path('store/new', ProductCreateView.as_view(), name='store-new'),
path('event/', EventListView.as_view(), name="event-list"),
path('event/<str:slug>/', EventDetailView.as_view(), name="event-detail"),
path('event/new', EventCreateView.as_view(), name='event-new'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"frtsang40@gmail.com"
] |
frtsang40@gmail.com
|
ba96f4130d8855366f57a4652b61e6a6af74ad00
|
b7dec7dcffc5290e8f7856baccdb42d84e9e11e8
|
/thesis/urls.py
|
bcb9bc2faf6e5143e317aaa599f36e9f70626730
|
[] |
no_license
|
lthebe/cp_thesis
|
406fd5441f7e0944ebf9e0439c9ce3a16cd7df63
|
573f9c339e57f33895e9924b04f3792ceb50e9e1
|
refs/heads/master
| 2021-06-14T13:27:18.702167
| 2017-01-31T13:56:14
| 2017-01-31T13:56:14
| 80,521,434
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
"""thesis URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from support.views import SupportOrderView, SupportFinalCheckoutView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('social.apps.django_app.urls', namespace='social')),
url(r'^community/', include('community.urls')),
url(r'', include('people.urls', namespace='people')),
url(r'^posts/', include("posts.urls", namespace="posts")),
url(r'^(?P<pk>\d+)/support/', SupportOrderView.as_view(), name='sponser'),
url(r'^finalize-support/', SupportFinalCheckoutView.as_view(), name="finalize-support"),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"laxmi.thebe@gmail.com"
] |
laxmi.thebe@gmail.com
|
bf42c98bb55e0b61192663d6f96ce710d0f07d01
|
eecc738c416a9ed5ccac250cb1d676a7f104d2fe
|
/landmarkEmo/dontuse/test.py
|
256c616a63876e779034b5636e4b4a24dc3e1020
|
[] |
no_license
|
cosmic119/CNN
|
b331d35e048fd24ad73dcbd5d0481220314d89c2
|
a1016323ef2f89020d793fe66e0d4db850a0359a
|
refs/heads/master
| 2021-04-15T03:33:22.834015
| 2018-03-23T02:22:36
| 2018-03-23T02:22:36
| 126,423,542
| 0
| 0
| null | 2018-03-23T02:44:01
| 2018-03-23T02:44:01
| null |
UTF-8
|
Python
| false
| false
| 10,279
|
py
|
# -*- coding: utf-8 -*-
"""
niektemme/tensorflow-mnist-predict 를 참조하였음
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#레파지토리에서 테스트 프로그램에 필요한 데이터 다운로드
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
sess = tf.InteractiveSession()
"""
모델 생성에 필요한 데이터 정의
x : 인풋레이어에 사용할 변수 정의
y : 아웃풋레이어에 사용할 변수 정의
w : 784 X 10 개의 초기값 0을 갖는 메트릭스 생성
b : 10개짜리 배열 생성
y = x * w + b
x (784) * w(784*10) = x*w(10)
x*w(10) + b(10) = y(10)
위에처럼 메트릭스 연산이 수행되기 때문에 위와 같이 데이터 사이즈를 잡은 것이다.
"""
x = tf.placeholder(tf.float32, [None, 784])
y_= tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# 원하는 행렬 사이즈로 초기 값을 만들어서 리턴하는 메서드
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# 0.1 로 초기값 지정하여 원하는 사이즈로 리턴
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
"""
필터에 대해서 설명하고자 하면 CNN 의 동작 원리를 설명해야만한다.
[5, 5, 1, 32] 는 5X5 사이즈의 단위 필터를 사용해서 인풋데이터
(여기서는 28 X 28 사이즈 메트릭스)를 CNN연산을 하겠다는 것이다.
Stride 가 [1,1] 이라고 하면 28X28크기 행렬을 5X5 사이즈의
메트릭스로가로세로 한칸씩 이동하면서 필터에 연산하겠다는 의미가 된다.
결과적으로 아웃풋은 24X24 사이즈가 된다. 왜냐하면 5X5 사이즈의
메트릭스로 이동할 수 있는 한계가 있기 때문이다.
(메트릭스 끝부분 까지 이동할 수 없음)
이러한 경우 패딩 옵션을 사용하여 0으로 태두리를 채워넣어 메특릭스
사이즈를 동일하게 유지할 수도 있다
참조:http://deeplearning4j.org/convolutionalnets.html
"""
def conv2d(x, W):
# tf.nn.conv2d(input, filter, strides, padding, use_cudnn
# _on_gpu=None, data_format=None, name=None)
# strides= [1 , stride, stride, 1] 차원축소 작업시 마스크 메트릭스를 이동하는 보복
# padding='SAME' 다음 레벨에서도 메특릭스가 줄어들지 않도록 패딩을 추가한다
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
"""
보통은 이렇게 생성한 메트릭스를 max pooling 을 사용하여 다시 한번 간소화한다.
위에서 필터링이 마스크에 대한 & 연산이었다면, max Pooling은 메트릭스에서 가장
큰 값 하나만 뽑아서 사용하는 방법이다. 아래와 같은 max pooling 정의
(mask [2,2] , stride[2,2] )를 4X4 메트릭스에 적용하면 2X2 메트릭스가 될 것이다
"""
# x : [batch, height, width, channels]
# 2x2 행열에 가장 큰 값을 찾아서 추출, 가로세로 2칸씩이동
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# [filter_height, filter_width, in_channels, out_channels]
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
"""
Layer 1
아래의 3줄로써 인풋 레이어에 대한 정의는 완료된다. 28X28 행렬 하나를 넣고
28X28행렬(원래는 24X24가 되지만 Padding 적용) 32개를 만들고 다시 max pool
(2,2)를 사용하여 14X14 메트릭스 32개를 리턴하는 레이어를 정의하였다
메트릭스 단위로 정리하면 인풋 1개, 아웃풋 32개 이다 트
"""
#인풋 데이터터 메트릭스를 변형한다. 784 개의 행렬을 갖는 복수의 데이터를
#[-1, 28, 28,1] 로 의 형태로 변형한다. 테스트 데이터 수 만큼 (-1) ,
#[28x28] 행렬로 만드는데 각 픽셀데이터는 rgb 데이터가 아니고 하나의 값만 갖도 변환
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
"""
Layer 2
1번 레이어에서 아웃풋을 32개를 전달하였음으로 2번 레이어의 인풋은
14X14 메트릭스 32개 그리고 아웃풋은 동일한 max pool 을 적용하여 8x8 메트릭스
64개를 출력한다. 정리하면 인풋 32개(14X14) 아웃풋 64개(7X7) 이 된다
"""
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
"""
Layer 3
현재 최종 데이터의 수는 7 X 7 X 64 = 3136 개 이지만 1024 개 를 사용한다
1024는 임의의 선택 값이다
"""
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
"""
Drop Out
Dropout 은 데이터 간의 연과 관계가 큰 데이터들을 제거함으로써 과적합 문제를
해결하는 기법의 하나이다.
"""
# drop out 연산의 결과를 담을 변수
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
"""
Out put Layer
마지막으로 1024개의 노드에서 10개의 (0~9까지 숫자)에 대한 확률을 Soft Max 를
이용하여 도출할 수 있도록 한다
"""
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# Define loss and optimizer
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
"""
Train & Save Model
"""
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
#50개씩, 20000번 반복학습
for i in range(20000):
batch = mnist.train.next_batch(50)
# 10회 단위로 한번씩 모델 정합성 테스트
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
# batch[0] 28X28 이미지, batch[1] 숫자태그, keep_prob : Dropout 비율
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
# 모델에 사용된 모든 변수 값을 저장한다
save_path = saver.save(sess, "model2.ckpt")
print ("Model saved in file: ", save_path)
#최종적으로 모델의 정합성을 체크한다
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
sess.close()
|
[
"gksthf3178@naver.com"
] |
gksthf3178@naver.com
|
213fc24bf448ac094d3843b30e9c24e1aaa77fcc
|
402b45344b310c76c37f354c30a82d1934667735
|
/crawl_push.py
|
2435ece7d58e2e06518fb2919706a8d32263b4d1
|
[] |
no_license
|
yuktmitash21/Crawler
|
5cba755cba669a55d9e38d5e95166760cc0417cd
|
d9fb5710f5ca3dea9c3516e0ed7b963d4a07ff83
|
refs/heads/main
| 2023-03-13T23:25:45.524622
| 2021-02-07T13:24:53
| 2021-02-07T13:24:53
| 336,657,238
| 0
| 0
| null | 2021-02-07T13:24:54
| 2021-02-06T23:20:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
import pandas as pd
import requests #Pushshift accesses Reddit via an url so this is needed
import json #JSON manipulation
import csv #To Convert final table into a csv file to save to your machine
import time
import datetime
def getPushshiftData(query, after, before, limit):
url = 'https://api.pushshift.io/reddit/search/submission/?&before=' + before + '&after=' + after + '&q=' + query + '&sort_type=score&sort=desc&subreddit=wallstreetbets&size=' + limit
#Print URL to show user
print(url)
#Request URL
r = requests.get(url)
#Load JSON data from webpage into data variable
data = json.loads(r.text)
#return the data element which contains all the submissions data
return data['data']
tickers = ["GME", "SPY", "AMC", "BB", "TSLA", "PLNTR", "CRSR", "NOK", "AAPL", "SNAP"]
before = datetime.datetime(2021, 1, 5)
later = before + datetime.timedelta(days=1)
map = {}
allData = []
for ticker in tickers:
for i in range(0, 30):
data = getPushshiftData(ticker, str(int(before.timestamp())), str(int(later.timestamp())), str(1000))
print (len(data), ticker)
for dat in data:
allData.append({ 'score': dat.get('score') or 0, 'num_comments': dat.get('num_comments') or 0, 'created': dat.get('created_utc') or 0, 'title': dat.get('title') or '', 'body': dat.get('selftext') or '', 'upvote_ratio': dat.get('upvote_ratio') or ''})
before = later
later += datetime.timedelta(days=1)
time.sleep(2)
map[ticker] = allData
allData = []
before = datetime.datetime(2021, 1, 5)
later = before + datetime.timedelta(days=1)
json_string = json.dumps(map)
with open('data-gme.json', 'w') as f:
json.dump(json_string, f)
|
[
"ymitash3@gatech.edu"
] |
ymitash3@gatech.edu
|
e8492bd500e419e50fa3815209d4889eb2e4e971
|
c761f3fbce728e61cbcf5179f1d3f27e1e5625cd
|
/register_key.py
|
1328baddc2fe4d7e5f91b2052b07daa49e53649f
|
[] |
no_license
|
philopon/usermon
|
16033d41436efe2cf4971bcd3b25f99cf82de318
|
7f97db09a65466e2133d4304f9fe5ba212299598
|
refs/heads/master
| 2021-01-18T16:51:56.457593
| 2017-04-21T13:06:12
| 2017-04-21T13:06:12
| 86,775,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
#!/usr/bin/env python3
def main():
import sys
import os
import pwd
import pamela
pw = pwd.getpwuid(os.getuid())
ssh_dir = os.path.join(pw.pw_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
os.makedirs(ssh_dir, mode=0o700, exist_ok=True)
with open(auth_keys, 'a') as f:
for key in sys.stdin:
print(key.strip(), file=f)
os.chmod(auth_keys, 0o600)
if __name__ == '__main__':
main()
|
[
"philopon.dependence@gmail.com"
] |
philopon.dependence@gmail.com
|
a0f37bf8594ae4e002a3cbda9f0f4fb8efd4c144
|
038dc1f463fba1889264de89369791d7359b4f86
|
/requested_events/views.py
|
c8f249e07f7a2491ed5370300a6202ac0483e330
|
[] |
no_license
|
paishrikrishna/BE-Project
|
079d979fd1a2b158dadc8f9d72d1153f8c17aa21
|
e0949c2523b8fc3d0f0edfd86eaf8717ff824a60
|
refs/heads/master
| 2023-04-13T11:53:31.312949
| 2021-05-04T13:19:34
| 2021-05-04T13:19:34
| 313,569,032
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,337
|
py
|
from django.shortcuts import render
from calander.form import events_form
from calander.models import events
from .form import req_events_form
from .models import req_events
from login_page.login_form import login_form
from login_page.models import login_model
from new_users.models import new_login_model
# Create your views here.
def req_events_index_page(request,user,auth):
if request.method=="POST":
if request.POST['action']=="Add Event":
try:
events_form().save()
except:
obj = events.objects.get(organizer='n/a')
obj.organizer = request.POST['Organizer']
obj.content = request.POST['Agenda']
obj.date = request.POST['New_date']
obj.save()
obj = req_events.objects.get(id=int(request.POST['row']))
obj.delete()
elif request.POST['action']=="Delete Event":
obj = req_events.objects.get(id=int(request.POST['row']))
obj.delete()
elif request.POST['action']=="Add User":
try:
login_form().save()
except:
obj = login_model.objects.get(username='n/a')
obj.username = request.POST['username']
obj.password = request.POST['password']
obj.auth = "member"
obj.link = request.POST['link']
obj.email = request.POST['email']
obj.wing = request.POST['wing']
obj.floor = request.POST['floor']
obj.flat = request.POST['flat']
obj.save()
obj = new_login_model.objects.get(email=(request.POST['email']))
obj.delete()
elif request.POST['action']=="Delete User":
obj = new_login_model.objects.get(email=(request.POST['email']))
obj.delete()
obj = list(req_events.objects.all())
organizer , content , date ,ID= [],[],[],[]
for i in obj:
organizer.append(i.organizer)
content.append(i.content)
date.append(i.date)
ID.append(i.id)
obj = list(new_login_model.objects.all())
username,password ,user_ID,floor,wing,link,pswd,ID= [],[],[],[],[],[],[],[]
for i in obj:
username.append(i.username)
password.append(i.email)
user_ID.append(i.flat)
ID.append(i.id)
floor.append(i.floor)
wing.append(i.wing)
link.append(i.link)
pswd.append(i.password)
return render(request,"requested_events.html",{"ID":ID,"floor":floor,"pswd":pswd,"wing":wing,"link":link,"organizer":organizer,"event_dates":date,"content":content,"ID":ID,"user":user,"username":username,"password":password,"user_ID":user_ID,"auth":auth})
|
[
"2017.shrikrishna.pai@ves.ac.in"
] |
2017.shrikrishna.pai@ves.ac.in
|
f29fc6830528398b792fd60578b01a78f12aa4e7
|
41ede4fd3bfba1bff0166bca7aee80dcf21434c6
|
/ayhanyalcinsoy/Desktop/lxde/base/libfm/actions.py
|
ad79cdbb6f0b2d887aa5244a18b52080cbb19379
|
[] |
no_license
|
pisilinux/playground
|
a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c
|
e4e12fff8a847ba210befc8db7e2af8556c3adf7
|
refs/heads/master
| 2022-08-12T23:03:27.609506
| 2022-08-11T18:28:19
| 2022-08-11T18:28:19
| 8,429,459
| 16
| 22
| null | 2022-08-11T18:28:20
| 2013-02-26T09:37:11
|
Python
|
UTF-8
|
Python
| false
| false
| 811
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "libfm-%s" % (get.srcVERSION())
def setup():
autotools.configure("--disable-static \
--sysconfdir=/etc \
--enable-debug \
--enable-udisks \
--enable-demo")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
pisitools.dosed("data/libfm.conf", "xarchiver", "file-roller")
autotools.install()
pisitools.dodoc("AUTHORS", "COPYING", "TODO")
|
[
"ayhanyalcinsoy@gmail.com"
] |
ayhanyalcinsoy@gmail.com
|
8d5168f30b7e5f51483fcba73a2d034e20b80ae8
|
fe822705c38caf70c8a72433291acb3a729a0539
|
/backend/delivery_app/services/logdata.py
|
2c1474d24434b68bc2bbd67c691a705c785fe72f
|
[] |
no_license
|
tanficial/delivery-food-fighter
|
138db44dbfee33d0f9f4ecd4ea832436910d8878
|
a73a4df208ef94537f6f4374b1a7aa476bc23d3c
|
refs/heads/main
| 2023-09-02T10:54:05.089348
| 2021-11-02T23:56:18
| 2021-11-02T23:56:18
| 421,084,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
from delivery_app.models.logdata import Logdata, db
def add_logdata(id):
"""
log 이벤트 생성 시 logdata DB에 저장
"""
try:
new_logdata = Logdata(post_id = id)
db.session.add(new_logdata)
db.session.commit()
return new_logdata
except Exception:
db.session.rollback()
raise
|
[
"tanficial9574@gmail.com"
] |
tanficial9574@gmail.com
|
987828b08e77fc4ed6a670121d87f280fc0aed0b
|
5e0a7d90b3fd5d16bbc52eb0c8a118b835c17bad
|
/test/maxicode.py
|
e8ab9114db9fee3362ec2c70d7acb04ecd94aac4
|
[
"LicenseRef-scancode-secret-labs-2011",
"MIT",
"BSD-2-Clause"
] |
permissive
|
ehpale/elaphe
|
424abd206ce2af95d6e7de49758ca96cd6f797c8
|
0a0c51ee8627cccc57d557330ba6c2f2c5266960
|
refs/heads/master
| 2022-03-06T19:43:08.986519
| 2022-02-25T18:53:46
| 2022-02-25T18:53:46
| 59,607,180
| 11
| 5
|
NOASSERTION
| 2022-02-25T19:00:33
| 2016-05-24T20:40:54
|
PostScript
|
UTF-8
|
Python
| false
| false
| 812
|
py
|
symbology = 'maxicode'
cases = [
('001.png', 'This is MaxiCode'),
('002.png', 'This is Maxi^067ode', dict(parse=True)),
('003.png', ('152382802^029840^029001^0291Z00004951^029UPSN^02906X610'
'^029159^0291234567^0291/1^029^029Y^029634 ALPHA DR^029P'
'ITTSBURGH^029PA^029^004'), dict(mode=2, parse=True)),
('004.png', ('ABC123^029840^029001^0291Z00004951^029UPSN^02906X610^029'
'159^0291234567^0291/1^029^029Y^029634 ALPHA DR^029PITTSB'
'URGH^029PA^029^004'), dict(mode=3, parse=True)),
('005.png', ('[)>^03001^02996152382802^029840^029001^0291Z00004951^029'
'UPSN^02906X610^029159^0291234567^0291/1^029^029Y^029634 '
'ALPHA DR^029PITTSBURGH^029PA^029^004'), dict(mode=2, parse=True)),
]
|
[
"whosaysni@gmail.com"
] |
whosaysni@gmail.com
|
0ae55acd20bb59d6c3f499e32e0f526820a351d7
|
822d3cd484b54f0531fc205520c765a8321c0613
|
/pyFile/8.面向对象/2.类的属性/9.类方法和静态方法.py
|
a0ccbf84964d8f9059c7feb1ae5efeedb1a3e65a
|
[] |
no_license
|
mghxy123/learnPython
|
31d1cc18deeed5a89864ca0333fe488e0dbf08b4
|
00740e87d55a4dffd78773deaff8689485df31e8
|
refs/heads/master
| 2021-07-21T14:31:02.421788
| 2020-06-27T11:28:01
| 2020-06-27T11:28:01
| 187,751,182
| 0
| 0
| null | 2020-06-07T05:14:05
| 2019-05-21T02:58:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : 9.类方法和静态方法.py
# Author: HuXianyong
# Mail: mghxy123@163.com
# Date : 2019/5/16 0016
#类中普通函数的方法
# class Person:
# def normal_method(): #可以吗? 这样是可以的没有语法上面的问题,执行也没问题,只是大家都默认不这么写
# print('normal')
#
# # 如何调用?
# Person.normal_method() #可以吗? 这个是可以的,应为只是直接调用函数
# # Person().normal_method() #可以吗? 这个不可以,应为这个是实例化,实例化之后类里面的方法需要接受一个类的实例化对象,然而这里并没有传入,self,因此会报错
# print(Person.__dict__)
# # 静态方法
# class Person:
# @staticmethod
# def class_method():
# print('this is staticMethod')
# Person.class_method()
# Person().class_method()
#静态方法
class Person:
@classmethod
def class_method(cls): #cls 是什么?
print('this is class method')
print('class = {0.__name__}({0})'.format(cls))
cls.HEIGHT = 170
@staticmethod
def static_method():
print('this is staticMethod')
Person.class_method()
print(Person.__dict__)
|
[
"mghxy123@163.com"
] |
mghxy123@163.com
|
da850d8841ddddfdccfc6bde153467956b91789c
|
78e60a7d8a67ed76244004e8a3ed573fbf396e41
|
/samples/get_zip_codes.py
|
a89c105f5ec1a635d350ba870418f9f735a0bb60
|
[
"MIT"
] |
permissive
|
Crivez/apiclient-python
|
837a9f7cc0453ccd3121311adc7920b5fe6b3e33
|
860fc054f546152a101e29b1af388c381075ac47
|
refs/heads/master
| 2023-06-08T13:24:09.249704
| 2021-06-17T12:16:35
| 2021-06-17T12:16:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Search for zip codes in Germany.
COUNTRY_CODE = "DE"
COUNT = 1
try:
res = voxapi.get_zip_codes(COUNTRY_CODE,
count=COUNT)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
|
[
"andrey@voximplant.com"
] |
andrey@voximplant.com
|
e03e0083b6c860f813b2cae42fbca20c5014d738
|
6ba1da25bb624c8bf74f1899f64b450602f12ff4
|
/Example/PY/django/TestDemo/test_function.py
|
257991ced039e50cdddaa45a0e0b660d0048ea62
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-free-unknown",
"FSFAP",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Zoey-dot/pinpoint-c-agent
|
1dd58ce89610a7aafcdda842145a764cebe3f783
|
c76f9e41d8f2a9fdd8b0c90d52bb30e08bbd634d
|
refs/heads/master
| 2021-04-04T06:58:19.145805
| 2020-08-05T02:14:02
| 2020-08-05T02:14:02
| 263,580,530
| 1
| 0
|
Apache-2.0
| 2020-07-01T07:13:35
| 2020-05-13T09:11:23
| null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from pinpoint.plugins.PinpointCommonPlugin import PinpointCommonPlugin
@PinpointCommonPlugin("", __name__)
def test_func1(arg1, arg2):
return "this is test_func1: arg1=%s, arg2=%s"%(arg1, arg2)
class TestUserFunc1(object):
def __init__(self, name, score):
self.name = name
self.score = score
@PinpointCommonPlugin("TestUserFunc1", __name__)
def test_func2(self):
return "%s\'s score is : %s"%(self.name, self.score)
|
[
"su.wei@navercorp.com"
] |
su.wei@navercorp.com
|
5a82456358fe6bffb55775bcea1ef64c6a01c840
|
9f72ad0c091df885df5953286003d23f25216602
|
/Tarefa5/testClient2.py
|
c2396d0f28ac570b6764acb221eabd7f6bccef6c
|
[] |
no_license
|
SD-CC-UFG/felipe.gemmal.sd.ufg
|
0d96f50a34d5052df42454d8a6c7965fa8a2a035
|
472d38137c641570278cee2ae9f957fbdfc81188
|
refs/heads/master
| 2020-03-27T11:50:06.795260
| 2018-12-13T10:26:56
| 2018-12-13T10:26:56
| 146,510,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
#Cliente basico acesso indireto de Felipe Gemmal
# -*- coding: utf-8 -*-
import os
import socket, string
import sys
nameServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip = 'localhost'
porta = 12388
print("Conectando")
nameServer.connect((ip,porta))
print("Enviando requisicao")
#tipo de servico requisitado, colocado aqui por conta do tempo de recv do dns
nameServer.send("getAddress")
print("Recebendo resposta")
resposta = str(nameServer.recv(1024).decode('utf-8'))
print(resposta)
nameServer.close()
|
[
"lipegemmal@hotmail.com"
] |
lipegemmal@hotmail.com
|
ec76be96a998db58443e1d0b6cf215fe81c6c74e
|
386cff3bff62a6fb76ba22fd41e3c4f112bae6ba
|
/marathon/subscriber.py
|
c5605c7b54352bef497888ef5530763635bb8f99
|
[] |
no_license
|
davidbliu/scaffolding
|
7c960acdc39528be5d9bed5068809c2b5f02bbc4
|
ff921b669f171075c2f06d195f455fa521b25f50
|
refs/heads/master
| 2016-08-07T21:15:12.797544
| 2014-07-03T15:48:58
| 2014-07-03T15:48:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,792
|
py
|
#!/usr/bin/env python
import argparse
import atexit
import sys
import urlparse
from flask import Flask, request, jsonify
import marathon
from stores import InMemoryStore, SyslogUdpStore
app = Flask(__name__)
# re-initialize later
events = None
event_store = None
def on_exit(marathon_client, callback_url):
marathon_client.delete_event_subscription(callback_url)
@app.route('/events', methods=['POST'])
def event_receiver():
print 'hello'
# event = request.get_json()
# event_store.save(event)
# return ''
@app.route('/events', methods=['GET'])
def list_events():
print 'i have arrived here'
# return jsonify({'events': event_store.list()})
@app.route('/callback', methods=['GET', 'POST'])
def callback():
print 'callback'
try:
event = request.get_json()
print event
except:
print 'no event'
return jsonify(result={"status": 200})
@app.route('/marathon', methods=['GET'])
def marathon_register():
print 'marathon stuff happening here'
marathon_url = 'localhost:8080'
callback_url = 'localhost:5000/callback'
m = marathon.MarathonClient(marathon_url)
m.create_event_subscription(callback_url)
atexit.register(on_exit, m, callback_url)
return jsonify(result={"status": 200})
if __name__ == '__main__':
print 'cool stuff dude'
# parser = argparse.ArgumentParser(description='Marathon Logging Service')
# parser.add_argument('-m', '--marathon-url', required=True, help='Marathon server URL (http[s]://<host>:<port>[<path>])')
# parser.add_argument('-c', '--callback-url', required=True, help='callback URL for this service (http[s]://<host>:<port>[<path>]/events')
# parser.add_argument('-e', '--event-store', default='in-memory://localhost/', help='event store connection string (default: in-memory://localhost/)')
# parser.add_argument('-p', '--port', type=int, default=5000, help='Port to listen on (default: 5000)')
# parser.add_argument('-i', '--ip', default='0.0.0.0', help='IP to listen on (default: 0.0.0.0)')
# args = parser.parse_args()
# event_store_url = urlparse.urlparse(args.event_store)
# if event_store_url.scheme == 'in-memory':
# event_store = InMemoryStore(event_store_url)
# elif event_store_url.scheme == 'syslog':
# event_store = SyslogUdpStore(event_store_url)
# else:
# print 'Invalid event store type: "{scheme}" (from "{url}")'.format(scheme=event_store_url.scheme, url=args.event_store)
# sys.exit(1)
marathon_url = 'http://localhost:8080'
callback_url = 'http://localhost:5000/callback'
m = marathon.MarathonClient(marathon_url)
m.create_event_subscription(callback_url)
atexit.register(on_exit, m, callback_url)
app.run(port=5000, host='localhost')
|
[
"david.liu@autodesk.com"
] |
david.liu@autodesk.com
|
5a573494952b197ef81f13cde9b7c7b8ce088c5c
|
234d650ff5d906c2e3ce8da37c7b725c694791a0
|
/dxy/items.py
|
6b256d68ace5de8eb111d8b72fffeefaa5badeb1
|
[] |
no_license
|
IvanQin/dxy_spider
|
7846a0aeb96f8725e091be09db20c198c559b36c
|
ed1e73e09986f2397151d369a08586cc7e6574da
|
refs/heads/master
| 2021-01-25T09:21:02.082754
| 2017-06-09T04:15:43
| 2017-06-09T04:15:43
| 93,814,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class DxyItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
link = scrapy.Field()
page = scrapy.Field()
|
[
"yifan.qing@datarx.cn"
] |
yifan.qing@datarx.cn
|
9275404a0fb19e0fa17944bb3c32530ebb0cca93
|
5fdbd06b033464fdd5bc5be7a181422a92e5fc3c
|
/RandomForestWithGPs/GPPython/gp.py
|
3d3681d4d33dcd90192f3a3b1c7e12bd86a8cf17
|
[] |
no_license
|
themasterlink/RandomForestWithGPs
|
02ab4b4473caef734c7234348163b973c03f73df
|
fcbd294b381ecba570ad34aca9eda1e70bf4e95e
|
refs/heads/master
| 2021-01-17T12:49:05.304383
| 2017-09-11T14:40:17
| 2017-09-11T14:40:17
| 59,106,215
| 2
| 2
| null | 2017-06-12T14:07:05
| 2016-05-18T10:34:39
|
C++
|
UTF-8
|
Python
| false
| false
| 6,865
|
py
|
#!/Users/Max/anaconda/bin/python
import numpy as np
import json
from pprint import pprint
import math
import scipy
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib.cm as cm
with open("init.json") as data_file:
data = json.load(data_file)
class GaussianProccess:
def __init__(self, fileName):
lines = open(data["Training"]["path"], "r").read().split("\n")
self.data = []
self.lSquared = float(data["GP"]["l"]) * float(data["GP"]["l"])
self.sigmaNSquared = float(data["GP"]["sigmaN"]) * float(data["GP"]["sigmaN"])
self.labels = []
for line in lines:
if len(line) > 3:
ele = line.split(",")
point = np.array([float(ele[0]), float(ele[1])])
self.data.append(point)
self.labels.append(-1 if int(ele[2]) == 0 else 1)
self.labels = np.asarray(self.labels)
self.dataPoints = len(self.data)
self.K = np.empty([self.dataPoints, self.dataPoints], dtype=float)
for i in range(0, self.dataPoints):
self.K[i][i] = self.sigmaNSquared
for j in range(i + 1, self.dataPoints):
temp = self.kernelOf(self.data[i], self.data[j])
self.K[i][j] = temp
self.K[j][i] = temp
def updatePis(self):
for i in range(0, self.dataPoints):
self.pis[i] = 1.0 / (1.0 + math.exp(-self.labels[i] * self.f[i]))
self.dPis[i] = self.t[i] - self.pis[i]
self.ddPis[i] = -(-self.pis[i] * (1 - self.pis[i])) # - to get minus dd Pi
self.sqrtDDPis[i] = math.sqrt(self.ddPis[i])
def trainF(self):
self.f = np.zeros(self.dataPoints)
self.pis = np.empty(self.dataPoints)
self.dPis = np.empty(self.dataPoints)
self.ddPis = np.empty(self.dataPoints)
self.sqrtDDPis = np.empty(self.dataPoints)
self.t = (self.labels + np.ones(self.dataPoints)) * 0.5
converge = False
eye = np.eye(self.dataPoints)
lastObject = 1e100;
while(not converge):
self.updatePis()
self.W = np.diag(self.ddPis)
self.WSqrt = np.diag(self.sqrtDDPis)
C = eye + np.dot(np.dot(self.WSqrt, self.K), self.WSqrt)
print("K:\n"+str(self.K))
print("inner:\n"+str(C))
self.L = scipy.linalg.cho_factor(C, lower = True)
self.U = scipy.linalg.cho_factor(C, lower = False)
b = np.dot(self.W, self.f) + self.dPis;
nenner = scipy.linalg.cho_solve(self.L, (np.dot(self.WSqrt,np.dot(self.K,b))))
self.a = b - np.dot(self.WSqrt, scipy.linalg.cho_solve(self.U, nenner))
self.f = np.dot(self.K, self.a)
prob = 1.0 / (1.0 + math.exp(-np.dot(self.labels,self.f)))
objective = -0.5 * np.dot(self.f, self.a) + math.log(max(min(prob,1-1e-7),1e-7));
print(objective)
if math.fabs(objective / lastObject - 1.0) < 1e-5:
converge = True
lastObject = objective
print("Trained")
return
def train(self):
converge = False
while(not converge):
trainF()
logZ = -0.5 * np.dot(self.a, self.f) + (-math.log(1 + math.exp(-np.dot(self.labels, self.f)))) + math.log(L.diagonal().sum())
R = np.dot(self.WSqrt, scipy.linalg.cho_solve(self.U, scipy.linalg.cho_solve(self.L, self.WSqrt)))
C = scipy.linalg.cho_solve(self.L, np.dot(self.WSqrt, self.K))
dddPis = np.empty(self.dataPoints)
for i in range(0, self.dataPoints):
ddPis = -self.ddPis[i];
dddPis[i] = - ddPis * (1-self.pis[i]) - self.pis[i] * (1 - ddPis)
s2 = -0.5 * (self.K.diagonal() - np.dot(C.T,C).diagonal).diagonal() * dddPis
#for i in range(0,3):
#C =
self.W = np.diag(self.ddPis)
self.WSqrt = np.diag(self.sqrtDDPis)
C = eye + np.dot(np.dot(self.WSqrt, self.K), self.WSqrt)
self.L = scipy.linalg.cho_factor(C, lower = True)
self.U = scipy.linalg.cho_factor(C, lower = False)
b = np.dot(self.W, self.f) + self.dPis;
nenner = scipy.linalg.cho_solve(self.L, (np.dot(self.WSqrt,np.dot(self.K,b))))
a = b - np.dot(self.WSqrt, scipy.linalg.cho_solve(self.U, nenner))
self.f = np.dot(self.K, a)
prob = 1.0 / (1.0 + math.exp(-np.dot(self.labels,self.f)))
objective = -0.5 * np.dot(self.f, a) + math.log(prob if prob > 1e-7 and prob < 1 - 1e-7 else 1e-7 if prob <= 1e-7 else 1 - 1e-7);
print(objective)
#if math.fabs(objective / lastObject - 1.0) < 1e-5:
converge = True
lastObject = objective
print("Trained")
return
def predict(self, newPoint):
kXStar = np.empty(self.dataPoints)
for i in range(0, self.dataPoints):
kXStar[i] = self.kernelOf(newPoint, self.data[i])
fStar = np.dot(kXStar, self.dPis)
v = scipy.linalg.cho_solve(self.L, np.dot(self.WSqrt,kXStar))
vFStar = math.fabs(self.sigmaNSquared + 1 - np.dot(v,v))
start = fStar - vFStar * 1.5
end = fStar + vFStar * 1.5
stepSize = (end - start) / float(data["GP"]["samplingAmount"])
prob = 0.0
for p in np.arange(start,end,stepSize):
gaussRand = np.random.normal(fStar, vFStar)
height = 1.0 / (1.0 + math.exp(p)) * gaussRand
prob += height * stepSize;
return max(min(prob,1), 0)
def plot(self):
plt.figure(0)
min = np.min(self.data)
max = np.max(self.data)
min -= (max-min) * 0.2
max += (max-min) * 0.2
stepSize = (max - min) / float(data["GP"]["plotRes"]);
listGrid = []
i = 0
for x in np.arange(min,max, stepSize):
print("Done: " + str(float(i) / float(data["GP"]["plotRes"]) * 100) + "%")
i += 1
newList = []
for y in np.arange(min,max, stepSize):
newPoint = [y,x]
prob = self.predict(newPoint)
newList.append(prob)
listGrid.append(newList)
plt.imshow(listGrid, extent=(max, min, min, max), interpolation='nearest', cmap=cm.rainbow)
plt.gca().invert_xaxis()
plt.gca().set_ylim([min, max])
plt.gca().set_xlim([min, max])
for i in range(0,self.dataPoints):
plt.plot(self.data[i][0],self.data[i][1], 'bo' if self.labels[i] == 1 else 'ro')
print("Finished plotting")
plt.show()
def kernelOf(self, x, y):
diff = x - y
return math.exp(- 0.5 / self.lSquared * diff.dot(diff));
gp = GaussianProccess(data["Training"]["path"])
gp.trainF()
gp.plot()
|
[
"themasterlink93@googlemail.com"
] |
themasterlink93@googlemail.com
|
9f079bd4933ce55dbaf3592ee17fa1c5dcd75ac5
|
de788449bd7433bbfc7c0574a0d81fd9dd24649f
|
/geoportal/geoportailv3_geoportal/static-ngeo/ngeo/buildtools/test-eof-newline
|
6872c85514ca17a1a053dbfa72b1aa9ce77eb0ca
|
[
"MIT"
] |
permissive
|
Geoportail-Luxembourg/geoportailv3
|
6ab27bed755ff4f933c2f9700e2d6086ae8f5b68
|
45722f46bd5e4650ed3b01b1920de3732f848186
|
refs/heads/master
| 2023-08-18T21:02:45.652482
| 2023-08-02T14:12:56
| 2023-08-02T14:12:56
| 24,669,372
| 25
| 17
|
MIT
| 2023-08-25T13:39:08
| 2014-10-01T07:23:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,302
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2017, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import os
import subprocess
exit_code = 0
FNULL = open(os.devnull, 'w')
for filename in subprocess.check_output(["git", "ls-files"]).decode("utf-8").split("\n"):
if os.path.isfile(filename):
if subprocess.call(
"git check-attr -a '{}' | grep ' text: set'".format(filename),
shell=True, stdout=FNULL) == 0:
size = os.stat(filename).st_size
if size != 0:
with open(filename) as f:
f.seek(size - 1)
if ord(f.read()) != ord("\n"):
print("No new line at end of '{}' file.".format(filename))
exit_code = 2
exit(exit_code)
|
[
"antoine@abt.im"
] |
antoine@abt.im
|
|
d7336abe08b51fb335e57cf3d53ee20b79886453
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/insights/v20160301/_inputs.py
|
5910733c44e6efa9bc7563418d54942acbf6f519
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,307
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'LocationThresholdRuleConditionArgs',
'ManagementEventAggregationConditionArgs',
'ManagementEventRuleConditionArgs',
'RetentionPolicyArgs',
'RuleEmailActionArgs',
'RuleManagementEventClaimsDataSourceArgs',
'RuleManagementEventDataSourceArgs',
'RuleMetricDataSourceArgs',
'RuleWebhookActionArgs',
'ThresholdRuleConditionArgs',
]
@pulumi.input_type
class LocationThresholdRuleConditionArgs:
def __init__(__self__, *,
failed_location_count: pulumi.Input[int],
odata_type: pulumi.Input[str],
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
A rule condition based on a certain number of locations failing.
:param pulumi.Input[int] failed_location_count: the number of locations that must fail to activate the alert.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition'.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "failed_location_count", failed_location_count)
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition')
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="failedLocationCount")
def failed_location_count(self) -> pulumi.Input[int]:
"""
the number of locations that must fail to activate the alert.
"""
return pulumi.get(self, "failed_location_count")
@failed_location_count.setter
def failed_location_count(self, value: pulumi.Input[int]):
pulumi.set(self, "failed_location_count", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
@pulumi.input_type
class ManagementEventAggregationConditionArgs:
def __init__(__self__, *,
operator: Optional[pulumi.Input['ConditionOperator']] = None,
threshold: Optional[pulumi.Input[float]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
How the data that is collected should be combined over time.
:param pulumi.Input['ConditionOperator'] operator: the condition operator.
:param pulumi.Input[float] threshold: The threshold value that activates the alert.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
if operator is not None:
pulumi.set(__self__, "operator", operator)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input['ConditionOperator']]:
"""
the condition operator.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input['ConditionOperator']]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
The threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
@pulumi.input_type
class ManagementEventRuleConditionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
aggregation: Optional[pulumi.Input['ManagementEventAggregationConditionArgs']] = None,
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None):
"""
A management event rule condition.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition'.
:param pulumi.Input['ManagementEventAggregationConditionArgs'] aggregation: How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition')
if aggregation is not None:
pulumi.set(__self__, "aggregation", aggregation)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def aggregation(self) -> Optional[pulumi.Input['ManagementEventAggregationConditionArgs']]:
"""
How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
"""
return pulumi.get(self, "aggregation")
@aggregation.setter
def aggregation(self, value: Optional[pulumi.Input['ManagementEventAggregationConditionArgs']]):
pulumi.set(self, "aggregation", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@pulumi.input_type
class RetentionPolicyArgs:
def __init__(__self__, *,
days: pulumi.Input[int],
enabled: pulumi.Input[bool]):
"""
Specifies the retention policy for the log.
:param pulumi.Input[int] days: the number of days for the retention in days. A value of 0 will retain the events indefinitely.
:param pulumi.Input[bool] enabled: a value indicating whether the retention policy is enabled.
"""
pulumi.set(__self__, "days", days)
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def days(self) -> pulumi.Input[int]:
"""
the number of days for the retention in days. A value of 0 will retain the events indefinitely.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: pulumi.Input[int]):
pulumi.set(self, "days", value)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
a value indicating whether the retention policy is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class RuleEmailActionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
custom_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
send_to_service_owners: Optional[pulumi.Input[bool]] = None):
"""
Specifies the action to send email when the rule condition is evaluated. The discriminator is always RuleEmailAction in this case.
:param pulumi.Input[str] odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction'.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_emails: the list of administrator's custom email addresses to notify of the activation of the alert.
:param pulumi.Input[bool] send_to_service_owners: Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction')
if custom_emails is not None:
pulumi.set(__self__, "custom_emails", custom_emails)
if send_to_service_owners is not None:
pulumi.set(__self__, "send_to_service_owners", send_to_service_owners)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="customEmails")
def custom_emails(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
the list of administrator's custom email addresses to notify of the activation of the alert.
"""
return pulumi.get(self, "custom_emails")
@custom_emails.setter
def custom_emails(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "custom_emails", value)
@property
@pulumi.getter(name="sendToServiceOwners")
def send_to_service_owners(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
return pulumi.get(self, "send_to_service_owners")
@send_to_service_owners.setter
def send_to_service_owners(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "send_to_service_owners", value)
@pulumi.input_type
class RuleManagementEventClaimsDataSourceArgs:
def __init__(__self__, *,
email_address: Optional[pulumi.Input[str]] = None):
"""
The claims for a rule management event data source.
:param pulumi.Input[str] email_address: the email address.
"""
if email_address is not None:
pulumi.set(__self__, "email_address", email_address)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> Optional[pulumi.Input[str]]:
"""
the email address.
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_address", value)
@pulumi.input_type
class RuleManagementEventDataSourceArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
claims: Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']] = None,
event_name: Optional[pulumi.Input[str]] = None,
event_source: Optional[pulumi.Input[str]] = None,
legacy_resource_id: Optional[pulumi.Input[str]] = None,
level: Optional[pulumi.Input[str]] = None,
metric_namespace: Optional[pulumi.Input[str]] = None,
operation_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_location: Optional[pulumi.Input[str]] = None,
resource_provider_name: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
sub_status: Optional[pulumi.Input[str]] = None):
"""
A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case.
:param pulumi.Input[str] odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'.
:param pulumi.Input['RuleManagementEventClaimsDataSourceArgs'] claims: the claims.
:param pulumi.Input[str] event_name: the event name.
:param pulumi.Input[str] event_source: the event source.
:param pulumi.Input[str] legacy_resource_id: the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] level: the level.
:param pulumi.Input[str] metric_namespace: the namespace of the metric.
:param pulumi.Input[str] operation_name: The name of the operation that should be checked for. If no name is provided, any operation will match.
:param pulumi.Input[str] resource_group_name: the resource group name.
:param pulumi.Input[str] resource_location: the location of the resource.
:param pulumi.Input[str] resource_provider_name: the resource provider name.
:param pulumi.Input[str] resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] status: The status of the operation that should be checked for. If no status is provided, any status will match.
:param pulumi.Input[str] sub_status: the substatus.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource')
if claims is not None:
pulumi.set(__self__, "claims", claims)
if event_name is not None:
pulumi.set(__self__, "event_name", event_name)
if event_source is not None:
pulumi.set(__self__, "event_source", event_source)
if legacy_resource_id is not None:
pulumi.set(__self__, "legacy_resource_id", legacy_resource_id)
if level is not None:
pulumi.set(__self__, "level", level)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if operation_name is not None:
pulumi.set(__self__, "operation_name", operation_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_location is not None:
pulumi.set(__self__, "resource_location", resource_location)
if resource_provider_name is not None:
pulumi.set(__self__, "resource_provider_name", resource_provider_name)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
if status is not None:
pulumi.set(__self__, "status", status)
if sub_status is not None:
pulumi.set(__self__, "sub_status", sub_status)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def claims(self) -> Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']]:
"""
the claims.
"""
return pulumi.get(self, "claims")
@claims.setter
def claims(self, value: Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']]):
pulumi.set(self, "claims", value)
@property
@pulumi.getter(name="eventName")
def event_name(self) -> Optional[pulumi.Input[str]]:
"""
the event name.
"""
return pulumi.get(self, "event_name")
@event_name.setter
def event_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_name", value)
@property
@pulumi.getter(name="eventSource")
def event_source(self) -> Optional[pulumi.Input[str]]:
"""
the event source.
"""
return pulumi.get(self, "event_source")
@event_source.setter
def event_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_source", value)
@property
@pulumi.getter(name="legacyResourceId")
def legacy_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "legacy_resource_id")
@legacy_resource_id.setter
def legacy_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "legacy_resource_id", value)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
the level.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[pulumi.Input[str]]:
"""
the namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@metric_namespace.setter
def metric_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_namespace", value)
@property
@pulumi.getter(name="operationName")
def operation_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the operation that should be checked for. If no name is provided, any operation will match.
"""
return pulumi.get(self, "operation_name")
@operation_name.setter
def operation_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operation_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
the resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[pulumi.Input[str]]:
"""
the location of the resource.
"""
return pulumi.get(self, "resource_location")
@resource_location.setter
def resource_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_location", value)
@property
@pulumi.getter(name="resourceProviderName")
def resource_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
the resource provider name.
"""
return pulumi.get(self, "resource_provider_name")
@resource_provider_name.setter
def resource_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_provider_name", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the operation that should be checked for. If no status is provided, any status will match.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="subStatus")
def sub_status(self) -> Optional[pulumi.Input[str]]:
"""
the substatus.
"""
return pulumi.get(self, "sub_status")
@sub_status.setter
def sub_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_status", value)
@pulumi.input_type
class RuleMetricDataSourceArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
legacy_resource_id: Optional[pulumi.Input[str]] = None,
metric_name: Optional[pulumi.Input[str]] = None,
metric_namespace: Optional[pulumi.Input[str]] = None,
resource_location: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None):
"""
A rule metric data source. The discriminator value is always RuleMetricDataSource in this case.
:param pulumi.Input[str] odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource'.
:param pulumi.Input[str] legacy_resource_id: the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] metric_name: the name of the metric that defines what the rule monitors.
:param pulumi.Input[str] metric_namespace: the namespace of the metric.
:param pulumi.Input[str] resource_location: the location of the resource.
:param pulumi.Input[str] resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource')
if legacy_resource_id is not None:
pulumi.set(__self__, "legacy_resource_id", legacy_resource_id)
if metric_name is not None:
pulumi.set(__self__, "metric_name", metric_name)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if resource_location is not None:
pulumi.set(__self__, "resource_location", resource_location)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="legacyResourceId")
def legacy_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "legacy_resource_id")
@legacy_resource_id.setter
def legacy_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "legacy_resource_id", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> Optional[pulumi.Input[str]]:
"""
the name of the metric that defines what the rule monitors.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[pulumi.Input[str]]:
"""
the namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@metric_namespace.setter
def metric_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_namespace", value)
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[pulumi.Input[str]]:
"""
the location of the resource.
"""
return pulumi.get(self, "resource_location")
@resource_location.setter
def resource_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_location", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@pulumi.input_type
class RuleWebhookActionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_uri: Optional[pulumi.Input[str]] = None):
"""
Specifies the action to post to service when the rule condition is evaluated. The discriminator is always RuleWebhookAction in this case.
:param pulumi.Input[str] odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction'.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
:param pulumi.Input[str] service_uri: the service uri to Post the notification when the alert activates or resolves.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction')
if properties is not None:
pulumi.set(__self__, "properties", properties)
if service_uri is not None:
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> Optional[pulumi.Input[str]]:
"""
the service uri to Post the notification when the alert activates or resolves.
"""
return pulumi.get(self, "service_uri")
@service_uri.setter
def service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_uri", value)
@pulumi.input_type
class ThresholdRuleConditionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
operator: pulumi.Input['ConditionOperator'],
threshold: pulumi.Input[float],
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None,
time_aggregation: Optional[pulumi.Input['TimeAggregationOperator']] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
A rule condition based on a metric crossing a threshold.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition'.
:param pulumi.Input['ConditionOperator'] operator: the operator used to compare the data and the threshold.
:param pulumi.Input[float] threshold: the threshold value that activates the alert.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param pulumi.Input['TimeAggregationOperator'] time_aggregation: the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition')
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "threshold", threshold)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if time_aggregation is not None:
pulumi.set(__self__, "time_aggregation", time_aggregation)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input['ConditionOperator']:
"""
the operator used to compare the data and the threshold.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input['ConditionOperator']):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
the threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="timeAggregation")
def time_aggregation(self) -> Optional[pulumi.Input['TimeAggregationOperator']]:
"""
the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
"""
return pulumi.get(self, "time_aggregation")
@time_aggregation.setter
def time_aggregation(self, value: Optional[pulumi.Input['TimeAggregationOperator']]):
pulumi.set(self, "time_aggregation", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
|
[
"noreply@github.com"
] |
noreply@github.com
|
8886c4eff59f8379795b40f8995408fb237f04c7
|
c6221e1163b7c1cdb0a1bc6e29da2dcbec04d1b8
|
/Core/game.py
|
4de0c1df275a0f209ceae7e11870ed60d7e2d01a
|
[] |
no_license
|
Dexton/Tesla-V.-Edison-Demo-Prototype
|
674e7620908b2920fde776444756823138580a32
|
7cebdbc24a6c78bdfc460c17a8d62596593cfe82
|
refs/heads/master
| 2021-01-24T03:38:25.794850
| 2011-10-09T00:37:09
| 2011-10-09T00:37:09
| 2,540,805
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
import pyglet
from game_batch import GameBatch
class GameStates:
MAIN_MENU = 0
PLAYING = 1
PAUSED = 2
GAME_OVER = 3
class GameWindow(pyglet.window.Window):
def __init__(self, *args, **kwargs):
""" Creates necesary items and displays the menu """
super(GameWindow, self).__init__(1024, 768, *args, **kwargs)
self.game_state = GameStates.MAIN_MENU
#self.main_menu_batch = MainMenu(self, self.width, self.height)
#self.pause_menu_batch = PauseMenu(self, self.width, self.height)
self.game_batch = GameBatch(self, self.width, self.height)
# this next line makes pyglet call self.update at 120Hz
# this has to be the last line in __init__
pyglet.clock.schedule_interval(self.update, 1/120.0)
def update(self, dt):
""" Update game information
dt: time delta, the change in time
"""
def on_key_press(self, symbol, modifiers):
""" Key Press Event Handler
symbol: the symbol(key) pressed
modifiers: the extra keys pressed (ex. Ctrl or Alt)
"""
if self.game_state == GameStates.MAIN_MENU:
self.main_menu_batch.on_key_press(symbol, modifiers)
if self.game_state == GameStates.PLAYING:
self.game_batch.on_key_press(symbol, modifiers)
if self.game_state == GameStates.PAUSED:
self.pause_menu_batch.on_key_press(symbol, modifiers)
def on_draw(self):
""" Draw Screen Event Handler """
self.clear()
if self.game_state == GameStates.MAIN_MENU:
self.main_menu_batch.draw()
if self.game_state == GameStates.PLAYING:
self.game_batch.draw()
if self.game_state == GameStates.PAUSED:
self.pause_menu_batch.draw()
|
[
"loktacar@gmail.com"
] |
loktacar@gmail.com
|
c603c10469c17a5fe10f107b6cfe4f567d52bce1
|
e294a32686c46c520186326be47a48861aaacdad
|
/final number while(終極密碼).py
|
219dcd95b9b4f7a4fc730498539f43d897995102
|
[] |
no_license
|
goodgood9897/python-2020-8
|
6381894fb2e68f35fe3d583aec6761b32e22149c
|
375969d7c457340659b35d1a9fb41479e0b05c09
|
refs/heads/master
| 2022-11-30T06:35:24.609085
| 2020-08-07T09:03:37
| 2020-08-07T09:03:37
| 284,589,524
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
import random
a = 1
b = 100
number = random.randint(1,100)
while True:
print('Now%d-%d'%(a,b))
answer = int(input('Please enter nummber:'))
if answer<a or answer>b:
print('Please enter again.')
elif answer>number:
b=answer
elif answer<number:
a=answer
elif answer==number:
print('correct~~~!')
break
|
[
"noreply@github.com"
] |
noreply@github.com
|
3ae1533fbdd3e8eab796faa6ec41d76f5cbed112
|
39800224358654c8225aefa25a0daf26e489c33f
|
/reviews/reviews/urls.py
|
78ca205221d204726b1df8f89f1278efb47a3986
|
[] |
no_license
|
dprestsde/Review_system
|
59e5f73716a6ab02e7cecd140519b4505cf1c278
|
b636a568b51189c9f78f874461e6eaa323317868
|
refs/heads/master
| 2021-09-22T21:49:20.164221
| 2018-09-17T12:22:35
| 2018-09-17T12:22:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
"""reviews URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('guestbook.urls'))
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
26afa536ebd4f0faec9f6c755154abae49230382
|
df0f8bc85e3855c37034ce571f5f0ded8c4ebb90
|
/Day_11/AoC_2016_11_2.py
|
6a10cac4796753a46068f92565ebb3116ac4fa7e
|
[] |
no_license
|
jshales4/aoc2016
|
67592f3e40fc631b1d7ae132c70b144d74095ef8
|
3bd8b42dd4363dfec71973cff9e8b19178abb3a1
|
refs/heads/master
| 2021-01-11T20:18:39.566972
| 2017-02-23T06:42:58
| 2017-02-23T06:42:58
| 77,817,897
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,553
|
py
|
##AoC_2016_11.py
import itertools
import sys
from copy import deepcopy
from datetime import datetime
def main():
print datetime.now().strftime('%Y-%m-%d %H:%M:%S')
move_tracker = {}
move_watch = True
#Example case
#floors = [['SG', 'SM', 'PG', 'PM'], ['TG','RG','RM','CG','CM'],['TM'],[]]
floors = [['EG', 'EM', 'DG', 'DM', 'SG', 'SM', 'PG', 'PM'], ['TG','RG','RM','CG','CM'],['TM'],[]] #This runs in two hours without tree cleaning
#floors = [['HM', 'LM'], ['HG'], ['LG'], []]
#floors = [['HM', 'HG'], [], [], []]
elevator = 0
ini_state = Game_State(floors, elevator, 0)
move_tracker[hash(''.join(ini_state.current_setup[0])+ '_' + ''.join(ini_state.current_setup[1]) + '_' +''.join(ini_state.current_setup[2])+ '_' +''.join(ini_state.current_setup[3]) + ''.join(str(elevator)))] = 1
while (move_watch ==True):
moves1 = len(move_tracker)
move_tracker = climb_tree(ini_state, move_tracker)
clean_tree(ini_state)
if moves1==len(move_tracker):
move_watch = False
#make_moves(ini_state, move_tracker)
print_levels(ini_state, 0)
print datetime.now().strftime('%Y-%m-%d %H:%M:%S')
class Game_State:
def __init__(self, current_setup, elevator_pos, moves_made):
self.current_setup = current_setup
self.elevator_pos = elevator_pos
self.moves_made = moves_made
self.move_options = []
self.moves_remain = True
self.solution_flag = False
def add_move (self, new_game_state):
self.move_options.append(new_game_state)
def climb_tree(game_state, move_tracker):
if game_state.solution_flag == True:
return move_tracker
elif len(game_state.move_options)>0 and game_state.moves_remain == True:
for n in game_state.move_options:
move_tracker = climb_tree(n, move_tracker)
return move_tracker
elif game_state.moves_remain == True:
move_tracker = make_moves_eff(game_state, move_tracker)
return move_tracker
else:
game_state.moves_remain = False
return move_tracker
def clean_tree(game_state):
for n in game_state.move_options:
if n.moves_remain == False:
game_state.move_options.remove(n)
for p in game_state.move_options:
clean_tree(p)
def iterate_levels(game_state, move_tracker):
results = []
no_changes = True
if len(game_state.moves_made)>0 and game_state.moves_remain == True:
for n in game_state.move_options:
results.append(iterate_levels(n, move_tracker)[0])
elif game_state.moves_remain == False:
return True, move_tracker
else:
make_moves_eff(game_state, move_tracker)
return False, move_tracker
def print_levels(game_state, levels_traveled):
if validate_solutions(game_state.current_setup) == True:
print 'Solved', levels_traveled
else:
for n in game_state.move_options:
print_levels(n, levels_traveled + 1)
# def find_depths(game_state, move_tracker):
# if game_state.moves_remain = False:
# next
# elif
def make_moves_eff(game_state, move_tracker):
move_set = decide_movers(game_state.current_setup, game_state.elevator_pos)
move_track = move_tracker
for n in range(len(move_set)):
for p in [-1, 1]:
new_move = attempt_move(deepcopy(game_state.current_setup), move_set[n], deepcopy(game_state.elevator_pos), int(game_state.elevator_pos) + p, deepcopy(game_state.moves_made), move_tracker)
move_track = new_move[1]
if new_move[0] != False:
discovered_move = Game_State(new_move[0].current_setup, new_move[0].elevator_pos, new_move[0].moves_made)
if validate_solutions(new_move[0].current_setup) == True:
discovered_move.solution_flag=True
#print 'Move added to log', discovered_move
game_state.add_move(discovered_move)
if len(game_state.move_options)==0:
game_state.moves_remain = False
return move_tracker
else:
return move_tracker
def make_moves(game_state, move_tracker):
move_set = decide_movers(game_state.current_setup, game_state.elevator_pos)
move_track = move_tracker
for n in range(len(move_set)):
for p in [-1, 1]:
#print 'Current Gamestate: ', game_state
new_move = attempt_move(deepcopy(game_state.current_setup), move_set[n], deepcopy(game_state.elevator_pos), int(game_state.elevator_pos) + p, deepcopy(game_state.moves_made), move_tracker)
move_track = new_move[1]
if new_move[0] != False:
discovered_move = Game_State(new_move[0].current_setup, new_move[0].elevator_pos, new_move[0].moves_made)
#print 'Move added to log', discovered_move
game_state.add_move(discovered_move)
if validate_solutions(new_move[0].current_setup) == True:
print new_move[0].moves_made
if len(game_state.move_options)>0:
print 'New Node.'
for r in range(len(game_state.move_options)):
print 'Options to move from here are', game_state.move_options
make_moves(game_state.move_options[r], move_tracker)
else: print game_state.move_options
def attempt_move(gamestate_setup, moving_pieces, elevator_start, elevator_new, moves_made, move_tracker):
if elevator_new > 3 or elevator_new < 0:
return False, move_tracker
elif validate_move(deepcopy(gamestate_setup[elevator_new]), deepcopy(gamestate_setup[elevator_start]), moving_pieces, elevator_new) == True:
#print 'Setup before move being attempted:', gamestate_setup
#print 'Here is what will be moved:', moving_pieces
#print 'The elevator will be moved to floor ', elevator_new, 'from floor ', elevator_start
#move_tracker.append(hash(frozenset())
new_node = Game_State(gamestate_setup, elevator_new, moves_made + 1)
if len(''.join(moving_pieces)) > 2:
new_node.current_setup[elevator_new].extend(moving_pieces)
else: new_node.current_setup[elevator_new].append(moving_pieces)
new_node.current_setup[elevator_new].sort()
new_node.current_setup[elevator_start] = [x for x in new_node.current_setup[elevator_start] if x not in moving_pieces]
new_node.current_setup[elevator_start].sort()
#setup_new[elevator_new].append(elevator_new)
if validate_solutions(new_node.current_setup) == True:
#print 'Puzzle Solved! ', new_node.moves_made
return new_node, move_tracker
elif hash(''.join(new_node.current_setup[0])+ '_' + ''.join(new_node.current_setup[1]) + '_' +''.join(new_node.current_setup[2])+ '_' +''.join(new_node.current_setup[3]) + ''.join(str(elevator_new))) in move_tracker and move_tracker[hash(''.join(new_node.current_setup[0])+ '_' + ''.join(new_node.current_setup[1]) + '_' +''.join(new_node.current_setup[2])+ '_' +''.join(new_node.current_setup[3]) + ''.join(str(elevator_new)))]<=moves_made+1:
#print "We've already tried this move."
return False, move_tracker
else:
move_tracker[hash(''.join(new_node.current_setup[0])+ '_' + ''.join(new_node.current_setup[1]) + '_' +''.join(new_node.current_setup[2])+ '_' +''.join(new_node.current_setup[3]) + ''.join(str(elevator_new)))] = moves_made + 1
return new_node, move_tracker
else:
#print 'Move Invalid'
return False, move_tracker
def valid_floor(proposed_floor):
microchip_only = True
for n in range(len(proposed_floor)):
if proposed_floor[n][1] == 'G':
microchip_only = False
for n in range(len(proposed_floor)):
if proposed_floor[n][1] == 'M':
if proposed_floor[n][0] + 'G' not in proposed_floor and microchip_only == False:
return False
return True
def validate_move(proposed_floor, old_floor, elevator_passengers, elevator_pos):
old_floor_moved = [x for x in old_floor if x not in elevator_passengers]
if len(''.join(elevator_passengers)) > 2:
if elevator_passengers[0][1] == 'G' and elevator_passengers[1][1] == 'M' and elevator_passengers[0][0] != elevator_passengers[1][0]:
return False
elif elevator_passengers[1][1] == 'G' and elevator_passengers[0][1] == 'M' and elevator_passengers[0][0] != elevator_passengers[1][0]:
return False
else:
proposed_floor.extend(elevator_passengers)
return valid_floor(proposed_floor) * valid_floor(old_floor_moved)
else:
proposed_floor.append(elevator_passengers)
return valid_floor(proposed_floor) * valid_floor(old_floor_moved)
def decide_movers(setup, elevator_pos):
possible_movers = []
possible_movers = list(itertools.combinations(setup[elevator_pos], 2)) + setup[elevator_pos]
return possible_movers
def validate_solutions(setup):
if len(setup[0]) + len(setup[1]) + len(setup[2]) == 0:
return True
else: return False
if __name__=='__main__':
main()
##Just general pseudo-code thoughts: We basically want to take each current setup and determine all possible moves from that setup. We then want to check that move against
##a hash table to make sure we haven't tried making it before, then we can make a new branch of the tree containing all possible moves from that point. Then we can return the amount of moves it took to get there to find the min.
|
[
"jshales46@gmail.com"
] |
jshales46@gmail.com
|
a7571ea7181658d263514690d7191439a399b264
|
8c1b60dbbdbc84ae8cbd34f7679540036b04df84
|
/m5.py
|
97a59fdf765e7d91673b005f279dc849831f19c2
|
[] |
no_license
|
KatyaPinich/ECG_classification_project
|
fd654fceaf0df99338a5d083545f0898030be998
|
37c1a21b9fc425be0f86b81272fdecebe96ce327
|
refs/heads/master
| 2023-07-25T09:37:50.177846
| 2020-03-05T12:11:45
| 2020-03-05T12:11:45
| 242,001,325
| 0
| 0
| null | 2023-07-06T21:55:49
| 2020-02-20T22:13:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
import torch.nn as nn
class M5(nn.Module):
def __init__(self, num_classes):
super(M5, self).__init__()
self.conv_block1 = nn.Sequential(
nn.Conv1d(in_channels=1, out_channels=128, kernel_size=80, stride=4),
nn.BatchNorm1d(num_features=128),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4)
)
self.conv_block2 = nn.Sequential(
nn.Conv1d(in_channels=128, out_channels=128, kernel_size=3, stride=1),
nn.BatchNorm1d(num_features=128),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4)
)
self.conv_block3 = nn.Sequential(
nn.Conv1d(in_channels=128, out_channels=256, kernel_size=3, stride=1),
nn.BatchNorm1d(num_features=256),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4)
)
self.conv_block4 = nn.Sequential(
nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, stride=1),
nn.BatchNorm1d(num_features=512),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4)
)
self.avg_pool = nn.AvgPool1d(8)
self.softmax_layer = nn.Linear(512, num_classes)
def forward(self, x):
x = self.conv_block1(x)
x = self.conv_block2(x)
x = self.conv_block3(x)
x = self.conv_block4(x)
# Global avg pooling
x = self.avg_pool(x) # [batch_size, 256, 1]
# Dense
x = x.view(x.size(0), -1) # [batch_size, 256*1=256]
x = self.softmax_layer(x) # [batch_size, 10]
return x
|
[
"katyapinich@gmail.com"
] |
katyapinich@gmail.com
|
4b5730763abcb86812d2a804110e3fc6c15f7c6c
|
27de78beab7b46b08be620e06f8805d14de155d1
|
/Q3_BP.py
|
4cc4d42282a78fcef65f4b5fefb25989a5c01e7a
|
[] |
no_license
|
kzil88/Quant
|
c3f517cf507cbb97774738c152087a660dc59e31
|
711800349a065bd9534f323337147b494c91c156
|
refs/heads/master
| 2021-04-15T14:41:17.189825
| 2018-12-04T08:40:42
| 2018-12-04T08:40:42
| 126,697,511
| 4
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
import DC
from keras.layers import Flatten
import numpy as np
import keras
import datetime
import pymysql
if __name__ == '__main__':
time_temp = datetime.datetime.now() - datetime.timedelta(days=90)
date_seq_start = time_temp.strftime('%Y-%m-%d')
end_dt = (datetime.datetime.now() -datetime.timedelta(days=1)) .strftime('%Y-%m-%d')
# 建立数据库连接,回测时间序列
dc = DC.data_collect2('000725',date_seq_start,end_dt)
score_list = []
resu_list = []
train = dc.data_train
target = dc.data_target
model = Sequential()
model.add(Dense(64, activation='linear', input_dim=14))
model.add(Dropout(0.5))
model.add(Dense(64, activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='relu'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='logcosh', optimizer=sgd, metrics=['accuracy'])
for i in range(5):
model.fit(train, target, epochs=2000)
score = model.evaluate(train, target, batch_size=128)
print('SCORE:' + str(score[0]))
test_case = np.array([dc.test_case])
ans2 = model.predict(test_case)
resu_list.append(ans2[0][0])
score_list.append(score)
print('RESU '+str(i+1)+' : '+str(ans2[0][0]))
dc.refreshDATA(ans2[0][0])
train = dc.data_train
target = dc.data_target
print(score_list)
print(resu_list)
print(date_seq_start)
print(end_dt)
|
[
"noreply@github.com"
] |
noreply@github.com
|
555920b473ecc5e50b86552eb52b4dc9e1a29a9c
|
522303c2fc1840bd3288b1be2ed1787b77ceff7d
|
/279.py
|
b09688cc59bec97c14657835b17bc17c9e976e62
|
[] |
no_license
|
RickyLiTHU/codePractice
|
0b0fc66fc32a651c5288645c98d0a58acdd6f7a1
|
74988e6d02968acb5fe8da811df6c1e706f2b125
|
refs/heads/master
| 2020-03-23T04:36:42.256827
| 2018-08-30T08:15:39
| 2018-08-30T08:15:39
| 141,093,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
class Solution(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
edges = []
for i in range(1, int(math.ceil(math.sqrt(n)))+1):
edges.append(i*i)
depth = 1
nodes = set([n])
while nodes:
nextLevel = set()
for node in nodes:
for e in edges:
if node - e == 0:
return depth
elif node - e > 0:
nextLevel.add(node-e)
else:
break
depth += 1
nodes = nextLevel
|
[
"noreply@github.com"
] |
noreply@github.com
|
a0cb0e3618382fd03b6ca832ea20a7034a40057c
|
cc7dcbc2d2b85c4769ab4bfb5f92bbe6f158b1bc
|
/Competitions/Comp4/start_sound.py
|
87e6432903e6abe11567cff4177b2484faedd6c1
|
[] |
no_license
|
MandyMeindersma/Robotics
|
c091e5b248bb067db4631e2de481d18417996933
|
f58916bb293d68c176847363a25eb7270a304965
|
refs/heads/master
| 2023-01-08T07:34:51.223767
| 2023-01-01T05:40:12
| 2023-01-01T05:40:12
| 118,049,272
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
#!/usr/bin/env python
from sound_play.libsoundplay import SoundClient
# from sound_play.msg import SoundRequest
import rospy
import time
rospy.init_node('sound')
soundthing = SoundClient()
time.sleep(1)
# soundthing.play(SoundRequest.NEEDS_UNPLUGGING)
# soundthing.voiceSound("Testing the new A P I")
soundthing.playWave("/home/mandy/winter18/Robotics/Competitions/Comp4/meow.ogg")
print("meow sound started")
time.sleep(3)
soundthing.playWave("/home/mandy/winter18/Robotics/Competitions/Comp4/moo.ogg")
print("woof sound started")
|
[
"meinders@ualberta.ca"
] |
meinders@ualberta.ca
|
c2d9305312002748edb2d0e5470f541784c71352
|
3fc00c49c6b5a5d3edb4f5a97a86ecc8f59a3035
|
/shared_models/test/test_api.py
|
ae9465bb6b3b41416d097c202b1034470650a378
|
[] |
no_license
|
yc-hu/dm_apps
|
9e640ef08da8ecefcd7008ee2d4f8f268ec9062e
|
483f855b19876fd60c0017a270df74e076aa0d8b
|
refs/heads/master
| 2023-04-07T13:13:55.999058
| 2021-04-12T10:19:21
| 2021-04-12T10:19:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,264
|
py
|
from django.test import tag
from django.urls import reverse
from rest_framework import status
from shared_models.test import SharedModelsFactoryFloor as FactoryFloor
from shared_models.test.common_tests import CommonTest
class TestUserAPIListView(CommonTest):
def setUp(self):
super().setUp()
self.user = self.get_and_login_user()
self.test_url = reverse("user-list", args=None)
@tag("api", 'user')
def test_url(self):
self.assert_correct_url("user-list", test_url_args=None, expected_url_path=f"/api/shared/users/")
@tag("api", 'user')
def test_get(self):
# PERMISSIONS
# authenticated users
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# unauthenticated users
self.client.logout()
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# TODO: build up this test!
# # RESPONSE DATA
# valid_user = None
# self.get_and_login_user(user=None)
# response = self.client.get(self.test_url)
# self.assertEqual(len(response.data), 1)
# self.assertEqual(response.data[0]["id"], self.instance.id)
# # or, for lists with pagination...
# self.assertEqual(len(data["results"]), 1)
# self.assertEqual(data["results"][0]["id"], self.instance.id)
#
# # check query params
# object = FactoryFloor.UserFactory()
# data = self.client.get(self.test_url+f"?={object.id}").data
# keys.extend([
# "",
# ])
# self.assert_dict_has_keys(data, keys)
@tag("api", 'user')
def test_unallowed_methods_only(self):
restricted_statuses = [status.HTTP_405_METHOD_NOT_ALLOWED, status.HTTP_403_FORBIDDEN]
self.assertIn(self.client.put(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.delete(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.post(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.patch(self.test_url, data=None).status_code, restricted_statuses)
|
[
"davjfish@gmail.com"
] |
davjfish@gmail.com
|
32965056a1b7a8f68e29a888ddf16692219f8202
|
6f2675eee55b7ebc5adf9c2176ced8cb59fc64d4
|
/dataInterKingdee/interDebug.py
|
f5873ce9a0c97db0f8dd05bed388d20b019fdced
|
[] |
no_license
|
wildmanwang/proDataInter
|
8c2b65fa96ad45b21165d997b1769a28e12fc42a
|
f5a1f1fb195c66bf586bd999465c7e3b16453369
|
refs/heads/master
| 2023-06-07T11:57:16.763251
| 2023-06-03T08:54:56
| 2023-06-03T08:54:56
| 157,559,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
# -*- coding:utf-8 -*-
"""
"""
__author__ = "Cliff.wang"
import os
from interConfig import Settings
#from interProcess import InterProcess
from interControl import InterControl
if __name__ == "__main__":
try:
path = os.path.abspath(os.path.dirname(__file__))
sett = Settings(path, "config")
inter = InterControl(sett)
inter.interInit()
if 1 == 2:
# 传输基础资料、业务数据
inter.interBusiData()
elif 1 == 2:
# 获取部门ID和用户ID
pass
except Exception as e:
print(str(e))
|
[
"cliff.w@qq.com"
] |
cliff.w@qq.com
|
a7dcd151d0dd3ea4bc81bb4a0fca9c6818c60ec5
|
f03a0d77c4f5524e8958263962ddb04a120ed6d6
|
/Lab8/wordladder5.py
|
1a1c82c4ebcd985f50f3bbdd129ff28bd4f5c4bc
|
[] |
no_license
|
b3rton/OpenSourceBlog
|
0a54566a6d542a41e2e8018287faef705a66fc35
|
4185c7b46629ac054903229d9a5a027110d5d662
|
refs/heads/master
| 2021-05-30T10:09:43.183994
| 2015-11-13T20:00:52
| 2015-11-13T20:00:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,978
|
py
|
"""
Words/Ladder Graph
------------------
Generate an undirected graph over the 5757 5-letter words in the
datafile words_dat.txt.gz. Two words are connected by an edge
if they differ in one letter, resulting in 14,135 edges. This example
is described in Section 1.1 in Knuth's book [1]_,[2]_.
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Brendt Wohlberg',
'hughdbrown@yahoo.com'])
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
#-------------------------------------------------------------------
# The Words/Ladder graph of Section 1.1
#-------------------------------------------------------------------
def generate_graph(words):
from string import ascii_lowercase as lowercase
G = nx.Graph(name="words")
lookup = dict((c,lowercase.index(c)) for c in lowercase)
def edit_distance_one(word):
for i in range(len(word)):
left, c, right = word[0:i], word[i], word[i+1:]
j = lookup[c] # lowercase.index(c)
for cc in lowercase[j+1:]:
yield left + cc + right
candgen = ((word, cand) for word in sorted(words)
for cand in edit_distance_one(word) if cand in words)
G.add_nodes_from(words)
for word, cand in candgen:
G.add_edge(word, cand)
return G
def words_graph():
"""Return the words example graph from the Stanford GraphBase"""
import gzip
fh=gzip.open('words_dat.txt.gz','r') #5 words
#fh=gzip.open('words4_dat.txt.gz','r') #4 words
words=set()
for line in fh.readlines():
line = line.decode()
if line.startswith('*'):
continue
w=str(line[0:5])
#w=str(line[0:4])
words.add(w)
return generate_graph(words)
if __name__ == '__main__':
from networkx import *
G=words_graph()
print("Loaded words_dat.txt containing 5757 five-letter English words.")
print("Two words are connected if they differ in one letter.")
print("Graph has %d nodes with %d edges"
%(number_of_nodes(G),number_of_edges(G)))
print("%d connected components" % number_connected_components(G))
fiveWordsT = [('chaos','order'),('nodes','graph'),('moron','smart'),('pound','marks')]
fourWordsT = [('cold','warm'),('love','hate')]
test = fiveWordsT
for (source,target) in test:
print("Shortest path between %s and %s is"%(source,target))
try:
sp=shortest_path(G, source, target)
for n in sp:
print(n)
except nx.NetworkXNoPath:
print("None")
|
[
"nathan.spero.berton@gmail.com"
] |
nathan.spero.berton@gmail.com
|
a44db705bdc58cdcecdcd4b8200bf85a3d08fc83
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/group/_get_group_join_request_public_v2.py
|
32ba9735f4911a02f803f73dab69c4e7a260ec52
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Group Service (2.18.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.group import (
get_group_join_request_public_v2 as get_group_join_request_public_v2_internal,
)
from accelbyte_py_sdk.api.group.models import ModelsGetMemberRequestsListResponseV1
from accelbyte_py_sdk.api.group.models import ResponseErrorResponse
@click.command()
@click.argument("group_id", type=str)
@click.option("--limit", "limit", type=int)
@click.option("--offset", "offset", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def get_group_join_request_public_v2(
group_id: str,
limit: Optional[int] = None,
offset: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(get_group_join_request_public_v2_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = get_group_join_request_public_v2_internal(
group_id=group_id,
limit=limit,
offset=offset,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"getGroupJoinRequestPublicV2 failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
get_group_join_request_public_v2.operation_id = "getGroupJoinRequestPublicV2"
get_group_join_request_public_v2.is_deprecated = False
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
8a35692c001a9c87e06840d701a8da708dedcbb2
|
8186a0b52da5692178c72e865ab05a08d133a412
|
/MachineLearning.py
|
29e2c182c8e664d6888629192f033295d5bcbf63
|
[] |
no_license
|
DanWertheimer/COS802
|
c4e7d8d3a06f04efef998daaa0a57bdbc6232ed0
|
656e3ca62e44f8fda1967af0ba4b5e38120f2e8b
|
refs/heads/master
| 2021-07-21T15:38:31.895643
| 2017-10-30T08:14:31
| 2017-10-30T08:14:31
| 108,819,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,813
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 29 17:23:39 2017
@author: danwertheimer
"""
1209/10000
import pandas as pd
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import svm
from sklearn.svm import LinearSVC
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import Normalizer
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.pipeline import Pipeline
Data = pd.read_csv("CleanData2.csv",index_col = 0)
Fields =['Insured_First_Name','Insured_Last_Name','Client_ID','Other_Party_Name',\
'Other_Party_Last_Name','Fraudulent_Claim_Reason',\
'Policy_Holder_Street',\
'Policy_Holder_State',\
'Policy_Holder_City',\
'Policy_Holder_Area',\
'Policy_Holder_Postal_Code',\
'Loss_Street',\
'Loss_State',\
'Loss_City',\
'Loss_Area',\
'Loss_Postal_Code']
Data = Data.drop(Fields,axis = 1)
ScaledVariables = ['Amount_Paid','Sum_Insured','Total_Policies_Revenue']
mms = preprocessing.MinMaxScaler()
Normalize = preprocessing.Normalizer()
Data[ScaledVariables] = Normalize.fit_transform(Data[ScaledVariables])
Test1 = Data[Data['Fraudulent_Claim_Indicator'] == 0].sample(n = 10000 )
Test2 = Data[Data['Fraudulent_Claim_Indicator'] == 1]
New = pd.concat([Test1,Test2], axis = 0)
DataX = New[New.columns.difference(['Fraudulent_Claim_Indicator','Date_Of_Birth',\
'Date_Of_Loss','Policy_Start_Date',\
'Policy_End_Date'])]
DataY = New['Fraudulent_Claim_Indicator']
X_train, X_test, y_train, y_test = train_test_split(\
DataX, DataY, test_size=0.8, random_state=48)
glm = LogisticRegression()
glm.fit(X_train,y_train)
glm.score(X_test,y_test)
glmcv = cross_val_score(glm, DataX, DataY, cv=10,scoring = 'roc_auc')
clf = svm.SVC(kernel='linear', C=2).fit(X_train, y_train)
clf.score(X_test, y_test)
clfcv = cross_val_score(clf, DataX, DataY, cv=10,scoring = 'roc_auc')
NNet = MLPClassifier(solver='lbfgs', alpha=1e-5,\
hidden_layer_sizes=(3, 2), random_state=47)
NNet.fit(X_train,y_train);
NNet.score(X_test, y_test)
NNetcv = cross_val_score(NNet, DataX, DataY, cv=10,scoring = 'roc_auc')
###############################################################################
FeatureData = Data
DateFeatures = ['Date_Of_Birth','Date_Of_Loss','Policy_Start_Date',\
'Policy_End_Date']
FeatureData[DateFeatures] = FeatureData[DateFeatures].astype(str)
for i in DateFeatures:
FeatureData[i] = pd.to_datetime(FeatureData[i])
# Creating feature for days between policy start and loss
FeatureData['Days_Between_Policy_Loss'] = FeatureData['Date_Of_Loss'] - FeatureData['Policy_Start_Date']
FeatureData['Days_Between_Policy_Loss'] = FeatureData['Days_Between_Policy_Loss'].apply(lambda x:x.days)
# Creating feature for days between policy loss and policy end
FeatureData['Days_Before_Policy_End_Loss'] = FeatureData['Policy_End_Date'] - FeatureData['Date_Of_Loss']
FeatureData['Days_Before_Policy_End_Loss'] = FeatureData['Days_Before_Policy_End_Loss'].apply(lambda x:x.days)
FeatureData['Number_Of_Claims'] = FeatureData.groupby(['Date_Of_Birth','Policy_Start_Date',\
'Policy_End_Date']).cumcount()+1
# Rescaling New Features
NewFeatures = ['Days_Between_Policy_Loss','Days_Before_Policy_End_Loss','Number_Of_Claims']
FeatureData[NewFeatures] = Normalize.fit_transform(FeatureData[NewFeatures])
###############################################################################
# Retraining Models
Test1 = FeatureData[FeatureData['Fraudulent_Claim_Indicator'] == 0].sample(n = 10000 )
Test2 = FeatureData[FeatureData['Fraudulent_Claim_Indicator'] == 1]
NewFeatureData = pd.concat([Test1,Test2], axis = 0)
DataX = NewFeatureData[NewFeatureData.columns.difference(['Fraudulent_Claim_Indicator','Date_Of_Birth',\
'Date_Of_Loss','Policy_Start_Date',\
'Policy_End_Date'])]
DataY = NewFeatureData['Fraudulent_Claim_Indicator']
# Checking Variable Importance
Tree = ExtraTreesClassifier()
TreeC = Tree.fit(DataX,DataY)
TreeC.feature_importances_
model = SelectFromModel(TreeC, prefit=True)
X_new = model.transform(DataX)
X_train_newfeature, X_test_newfeature, y_train_newfeature, y_test_newfeature = train_test_split(\
X_new, DataY, test_size=0.8, random_state=48)
glm_newfeature = LogisticRegression()
glm_newfeature.fit(X_train_newfeature,y_train_newfeature)
glm_newfeature.score(X_test_newfeature,y_test_newfeature)
glmcv2 = cross_val_score(glm_newfeature, X_new, DataY, cv=10, scoring = 'roc_auc')
clf_newfeature = svm.SVC(kernel='linear', C=1).fit(X_train_newfeature, y_train_newfeature)
clf_newfeature.score(X_test_newfeature, y_test_newfeature)
clfcv2 = cross_val_score(clf_newfeature, X_new, DataY, cv=10, scoring = 'roc_auc')
NNet_newfeature = MLPClassifier(solver='lbfgs', alpha=1e-5,\
hidden_layer_sizes=(3, 2), random_state=47)
NNet_newfeature.fit(X_train_newfeature,y_train_newfeature);
NNet_newfeature.score(X_test_newfeature, y_test_newfeature)
NNetcv2 = cross_val_score(NNet_newfeature, X_new, DataY, cv=10, scoring = 'roc_auc')
Q = Pipeline([
('feature_selection', SelectFromModel(LinearSVC())),
('classification', RandomForestClassifier())
])
Q.fit(X_train_newfeature,y_train_newfeature)
Q.score(X_test_newfeature, y_test_newfeature)
|
[
"noreply@github.com"
] |
noreply@github.com
|
270875ed2be025781a975375972379cf8f211f80
|
dfad28a2e1a0199c0117e551fd1e31804804d5b9
|
/app/auth/views.py
|
d2df7a97666207276aa6648ef9f85af4a25d98bc
|
[
"MIT"
] |
permissive
|
wilbrone/Pitches
|
c33d60b142b43de9ccf60a86cf59acbc262c6711
|
b20d234fd930a6551f26d9cf863c6d1631b62bc2
|
refs/heads/master
| 2022-12-09T08:02:08.631177
| 2019-11-25T23:47:13
| 2019-11-25T23:47:13
| 223,405,696
| 0
| 0
|
MIT
| 2022-12-08T06:55:48
| 2019-11-22T13:09:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
from flask import render_template,redirect,url_for, flash,request
from flask_login import login_user,logout_user,login_required
from . import auth
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "One Minute Perfect Pitch login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,full_name= form.full_name.data,password = form.password.data)
# saving the data
db.session.add(user)
db.session.commit()
mail_message("Welcome to One Minute Perfect Pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
|
[
"wilbroneokoth@gmail.com"
] |
wilbroneokoth@gmail.com
|
b9e795b45a5b99bd04447a64e926dfb936b8a89e
|
4308886d6562c87b9fff3f5bc3696dd4968209b5
|
/Whats Your Name.py
|
79103203d37c6605d8e1e9fcdd6b7b7e5b911152
|
[] |
no_license
|
rivalTj7/Primera_Tarea_Python
|
e3f10d8f372e55078b30a835851e3f12a5607db1
|
a74ce4af39f0de46e831adc568a2c0bbf61909fb
|
refs/heads/master
| 2023-03-01T17:50:22.619024
| 2021-02-07T07:29:01
| 2021-02-07T07:29:01
| 336,726,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
#11- What's Your Name?
def print_full_name(a, b):
print("Hello "+a+" " +b+"! You just delved into python.")
first_name = 'Ross'
last_name = 'Taylor'
print_full_name(first_name, last_name)
|
[
"rival.alex7@gmail.com"
] |
rival.alex7@gmail.com
|
9f04557904bdeeb5a5b0b9e265605429682ff434
|
a867b1c9da10a93136550c767c45e0d8c98f5675
|
/G_11_RemoveKthNode.py
|
408aa2a8a0bdec884c65ff5c410cb79045ed72b6
|
[] |
no_license
|
Omkar02/FAANG
|
f747aacc938bf747129b8ff35b6648fb265d95b6
|
ee9b245aa83ea58aa67954ab96442561dbe68d06
|
refs/heads/master
| 2023-03-25T19:45:08.153403
| 2021-03-28T07:13:08
| 2021-03-28T07:13:08
| 280,783,785
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
import __main__ as main
from Helper.TimerLogger import CodeTimeLogging
fileName = main.__file__
fileName = fileName.split('\\')[-1]
CodeTimeLogging(Flag='F', filename=fileName, Tag='Linked-List', Difficult='Medium')
from Datastruct.masterLinkedList import l
arr = [1, 2, 3, 4, 5, 6]
# arr = [1, 2]
for i in arr:
l.insertStart(i)
# l.traverseList()
def removeKNodeFromEnd(head, k):
print(f'Removed {k} node: ',end = '')
first = head
second = head
count = 1
while count <= k and second is not None:
second = second.nextNode
count += 1
if second is None:
head.data = first.nextNode.data
head.nextNode = first.nextNode.nextNode
l.traverseList()
return
while second.nextNode is not None:
second = second.nextNode
first = first.nextNode
first.nextNode = first.nextNode.nextNode
l.traverseList()
removeKNodeFromEnd(l.getHead(), 3)
|
[
"omkarjoshi4031@live.com"
] |
omkarjoshi4031@live.com
|
62ec86a4fa3abd1261e1c0a8452250ff222b6759
|
dedbf1f67bc741203f685745ecfde3d00f3f3d87
|
/src/simpleseq/encodings.py
|
b5629cc5604c31b327f2eb6875bc5d37b3b73f34
|
[] |
no_license
|
ambrosejcarr/simpleseq
|
1bee31b806dc19b7801ed52d73c47a5482db7d96
|
a9760db8470ccd578e6b82837bed12187389dbb8
|
refs/heads/master
| 2016-08-12T05:58:12.885835
| 2016-02-17T16:33:14
| 2016-02-17T16:33:14
| 50,446,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
class DNA3Bit:
"""
Compact encoding scheme for sequence data.
"""
_str2bindict = {65: 0b100, 67: 0b110, 71: 0b101, 84: 0b011, 78: 0b111,
97: 0b100, 99: 0b110, 103: 0b101, 116: 0b011, 110: 0b111}
_bin2strdict = {0b100: b'A', 0b110: b'C', 0b101: b'G', 0b011: b'T', 0b111: b'N'}
bin_nums = [0b100, 0b110, 0b101, 0b011]
@classmethod
def encode(cls, s: bytes) -> int:
"""Convert string nucleotide sequence into binary, note: string is reversed so
that the first nucleotide is in the LSB position"""
res = 0
for c in s:
res <<= 3
res += cls._str2bindict[c]
return res
@classmethod
def decode(cls, i: int) -> bytes:
"""Convert binary nucleotide sequence into string"""
if i < 0:
message = 'i must be an unsigned (positive) integer, not {0!s}'.format(i)
raise ValueError(message)
r = b''
while i > 0:
r = cls._bin2strdict[i & 0b111] + r
i >>= 3
return r
@staticmethod
def gc_content(i: int) -> float:
"""calculate percentage of i that is G or C"""
gc = 0
length = 0
while i > 0:
length += 1
masked = i & 111
if masked == 0b100 or masked == 0b100:
gc += 1
i >>= 3
return gc / length
@staticmethod
def seq_len(i: int) -> int:
"""Return the length of a sequence based on its binary representation"""
l = 0
while i > 0:
l += 1
i >>= 3
return l
@staticmethod
def contains(s: int, char: int) -> bool:
"""
return true if the char (bin representation) is contained in seq (binary
representation)
"""
while s > 0:
if char == (s & 0b111):
return True
s >>= 3
return False
@staticmethod
def bitlength(i: int) -> int:
"""return the bitlength of the sequence"""
bitlen = i.bit_length()
# correct for leading T-nucleotide (011) whose leading 0 gets trimmed
if bitlen % 3:
bitlen += 1
return bitlen
|
[
"mail@ambrosejcarr.com"
] |
mail@ambrosejcarr.com
|
ab7c71a677644efe5b14cfcd69d86aae4be88786
|
20766840efca8977b1246c2c8ad05a15388e826c
|
/모듈/insa2.py
|
0b6ccc8964acb9f3544b984470b2522ce8237833
|
[] |
no_license
|
Chaenini/Programing-Python-
|
0780c7880b2d15b7a210f11975a7c851b56a1d3f
|
a4aa9f7b021bae02677815f1a8b74d2420637958
|
refs/heads/master
| 2020-07-10T20:41:25.957058
| 2019-12-09T00:55:30
| 2019-12-09T00:55:30
| 204,366,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6
|
py
|
#139p
|
[
"s2018w16@e-mirim.hs.kr"
] |
s2018w16@e-mirim.hs.kr
|
2866adf3865f8ad42fe7d0810cf0266c2c3ec479
|
77ef4019ee6ce45abf3b5e21f2b33f3998620cd1
|
/base/message.py
|
9b4845620508555ad671bf3bd1d942c5554df6fe
|
[
"MIT"
] |
permissive
|
kevinrpb/rfid-protocols
|
243ef09a248c8b3229f60d93784e13d372baa3f3
|
01543f995f17d92fab0b159cf1c85f4ff65cd402
|
refs/heads/main
| 2023-02-16T03:21:56.322256
| 2021-01-19T17:41:59
| 2021-01-19T17:41:59
| 318,453,516
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
from enum import Enum
from bitarray import bitarray
class MessageKind(Enum):
READER_TO_TAG = 0
TAG_TO_READER = 1
def __str__(self) -> str:
if self == MessageKind.READER_TO_TAG:
return 'READER_TO_TAG'
elif self == MessageKind.TAG_TO_READER:
return 'TAG_TO_READER'
else:
return ''
class Message(object):
def __init__(self, label: str, kind: MessageKind, content: bitarray):
self.label = label
self.kind = kind
self.content = content
def size(self) -> int:
return self.content.length()
|
[
"kevinrpb@hotmail.com"
] |
kevinrpb@hotmail.com
|
4bbf47389bde47d911e2861fb4f2fc9e2599284a
|
8ebca2bcb8c73daecc912f00fffb5fea8d918c32
|
/Lib/site-packages/tensorflow/contrib/summary/summary_test_util.py
|
9ad53269d8398a006219e01c4ebdc2491fc707b4
|
[] |
no_license
|
YujunLiao/tensorFlowLearing
|
510ed61689a72dcb53347bd3e4653470893ecc4a
|
1a383b5183a409e017657001eda4dc68e4a6bcf9
|
refs/heads/master
| 2022-12-07T16:01:26.942238
| 2019-06-04T16:34:46
| 2019-06-04T16:34:46
| 177,408,903
| 0
| 0
| null | 2022-11-21T21:21:36
| 2019-03-24T12:01:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,874
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to code summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import sqlite3
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.platform import gfile
class SummaryDbTest(test_util.TensorFlowTestCase):
"""Helper for summary database testing."""
def setUp(self):
super(SummaryDbTest, self).setUp()
self.db_path = os.path.join(self.get_temp_dir(), 'DbTest.sqlite')
if os.path.exists(self.db_path):
os.unlink(self.db_path)
self.db = sqlite3.connect(self.db_path)
self.create_db_writer = functools.partial(
summary_ops.create_db_writer,
db_uri=self.db_path,
experiment_name='experiment',
run_name='run',
user_name='user')
def tearDown(self):
self.db.close()
super(SummaryDbTest, self).tearDown()
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def get_one(db, q, *p):
return db.execute(q, p).fetchone()[0]
def get_all(db, q, *p):
return unroll(db.execute(q, p).fetchall())
def unroll(list_of_tuples):
return sum(list_of_tuples, ())
|
[
"18916108830@163.com"
] |
18916108830@163.com
|
a5252f74fdb425b662bfc873101bded2e39d470d
|
52e7007ed2b9a9525cfb0c483065bffd6ecbcded
|
/基本操作.py
|
ec4c8ab1568c364a568e001f46a5f8a5c01a427a
|
[] |
no_license
|
hnzhangbinghui/selenium
|
2801618b60c2b7622fbd80945809ccfe5b50309e
|
15e2dbde337abf856038df72263ae1245293a36b
|
refs/heads/master
| 2022-11-14T08:56:16.500728
| 2020-07-12T10:15:48
| 2020-07-12T10:15:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,701
|
py
|
a="asdfggjytehtrwgrevreqf"
print(len(a))
b='123456'
#连接字符串,用‘+’号;
print(a+b)
#python不允许在+表达式中出现其他类型;
#字符串转换
age=33
print(int(age))
print(len(str(age)))
print(float(age))
#字符串和列表的转换
#list()方法用于将元祖或字符串转换为列表(重点)
string='Hello,world!!'
l=list(string)
print(l)
#元祖转化为列表
uname=('laozhang','zhangbinghui','binghui')
listu=list(uname)
print("列表元素:",listu)
#join()方法用于将序列中的元素以指定的字符串连接生产一个新的字符串;
s='-'
ss=s.join(listu)
print(ss)
#replace()方法把字符串中的old字符串,替换成new字符串,如果指定第三个参max,则替换不超过max次
#str.replace(old,new[,max])
str="this is string example... owe,this is really string!!"
print(str.replace('is','was'));
print(str.replace('is','was',3));
name="zhangbinghui"
print(name[0])
print(name[-1])
#不含上边界(重点)
print(name[1:5])
print(len(name))
print(name[1:10:2])
#s[a:b:-2] 步长为负数,两个边界意义反转了,表示从b+1到a,步长-2
print(name[10:1:-2])
#字符串内建函数
print(name.capitalize())#第一个字符大写
print(name.title())
print(name.upper())
print(name.lower())
#center() 方法返回一个指定的宽度 width 居中的字符串,fillchar 为填充的字符,默认为空格。
#原字符居中,空格填充至width长度
#返回一个指定的宽度 width 居中的字符串,如果 width 小于字符串宽度直接返回字符串,否则使用 fillchar 去填充。
print(name.center(30,'*'))
#Python count() 方法用于统计字符串里某个字符出现的次数。可选参数为在字符串搜索的开始与结束位置。
#str.count(sub, start= 0,end=len(string))
print(name.count('i',0,12))
print(name.count('i'))
#decode() 方法以指定的编码格式解码 bytes 对象。默认编码为 'utf-8'。
bianma="张冰辉"
name1=bianma.encode('utf-8')
print(name1)
print(bianma.encode('GBK','strict'))
"""str = "菜鸟教程";
str_utf8 = str.encode("UTF-8")
str_gbk = str.encode("GBK")
print(str)
print("UTF-8 编码:", str_utf8)
print("GBK 编码:", str_gbk)
print("UTF-8 解码:", str_utf8.decode('UTF-8','strict'))
print("GBK 解码:", str_gbk.decode('GBK','strict'))"""
"""endswith() 方法用于判断字符串是否以指定后缀结尾,
如果以指定后缀结尾返回True,否则返回False。可选参数"start"与"end"为检索字符串的开始与结束位置"""
#str.endswith(suffix[, start[, end]])
print(name.endswith('hui'))
print(name.endswith('zhagn'))
print(name.endswith('hui',3,5))
print(name.endswith('hui',0,12))
print("aaaaaaaaaaaaaa")
print(name.startswith('zhang'))
print('\n')
#expandtabs() 方法把字符串中的 tab 符号('\t')转为空格,tab 符号('\t')默认的空格数是 8。
name1="zhang\tbing\thui"
print(name)
print(name1.expandtabs())
print(name1.expandtabs(12))
"""find() 方法检测字符串中是否包含子字符串 str ,如果指定 beg(开始)
和 end(结束) 范围,则检查是否包含在指定范围内,如果指定范围内如果包含指定索引值,
返回的是索引值在字符串中的起始位置。如果不包含索引值,返回-1。"""
print(name.find('bing'))
print(name.find('bing',0,len(name)))
print(name.find('zhagn'))
"""index() 方法检测字符串中是否包含子字符串 str ,如果指定 beg(开始) 和 end(结束) 范围,则检查是否包含在指定范围内,该方法与 python find()方法一样,只不过如果str不在 string中会报一个异常。"""
"""isalnum() 方法检测字符串是否由字母和数字组成"""
"""如果 string 至少有一个字符并且所有字符都是字母或数字则返回 True,否则返回 False"""
print(name.isalnum())
print(bianma.isalnum())
bm="www.baidu.com"
print(bm.isalnum())
print('\n')
"""
Python isalpha() 方法检测字符串是否只由字母组成。
如果字符串至少有一个字符并且所有字符都是字母则返回 True,否则返回 False
"""
daima="abc123"
print(daima.isalnum())
print(daima.isalpha())
print('\n')
"""
ljust() 方法返回一个原字符串左对齐,
并使用空格填充至指定长度的新字符串。
如果指定的长度小于原字符串的长度则返回原字符串。
返回一个原字符串左对齐,并使用空格填充至指定长度的新字符串。
如果指定的长度小于原字符串的长度则返回原字符串。
"""
print(name.ljust(30,'.'))
print(name.ljust(30,'*'))
print(name.center(30,'*'))
print('\n')
"""
lstrip([chars]) 方法用于截掉字符串左边的空格或指定字符。
chars --指定截取的字符。
"""
str1=" zhangbinghui"
print(len(str1))
print(str1.lstrip())
print(len(str1.lstrip()))
str2='22222222zhangbinghui'
print(str2.lstrip('2'))
"""
partition() 方法用来根据指定的分隔符将字符串进行分割。
如果字符串包含指定的分隔符,则返回一个3元的元组,
第一个为分隔符左边的子串,
第二个为分隔符本身,第三个为分隔符右边的子串
"""
a2='www.baidu.com'
print(a2.partition('.'))
"""
Python split() 通过指定分隔符对字符串进行切片,
如果参数 num 有指定值,则分隔 num+1 个子字符串
str.split(str="", num=string.count(str)).
num -- 分割次数。默认为 -1, 即分隔所有。
"""
print(a2.split('.'))
a3='q.w.e.r.t.y.u.i.4.5.6'
a4=a3.split('.')
print(a4)
print(list(a4))
a5='qwtaqtadtlllt'
print(a5.split('t'))
print('\n')
"""
Python splitlines() 按照行('\r', '\r\n', \n')分隔,
返回一个包含各行作为元素的列表,如果参数 keepends 为 False,
不包含换行符,如果为 True,则保留换行符。
str.splitlines([keepends])
keepends -- 在输出结果里是否去掉换行符('\r', '\r\n', \n'),
默认为 False,不包含换行符,如果为 True,则保留换行符。
"""
atr='ab c\n\nde fg\rkl\r\n'
print(atr.splitlines())
print(atr.splitlines(True))
"""
Python strip() 方法用于移除字符串头尾指定的字符(默认为空格)或字符序列。
注意:该方法只能删除开头或是结尾的字符,不能删除中间部分的字符。
str.strip([chars]);
"""
str3='*****zhangbing*hui******'
print(str3.strip('*'))
#swapcase() 方法用于对字符串的大小写字母进行转换。
str5='ZHANGbingHUI'
print(str5.swapcase())
"""
Python zfill() 方法返回指定长度的字符串,原字符串右对齐,前面填充0。
width -- 指定字符串的长度。原字符串右对齐,前面填充0。
"""
print(name.zfill(30))
print(name.zfill(20))
print(name,'%o')
print(name,'%s')
|
[
"hnzhangbinghui@163.com"
] |
hnzhangbinghui@163.com
|
48a3c15283ec705f100a9181029b8e252e62f99e
|
a58689339cf11a04280cb6f627da442d2e6d2128
|
/detector.py
|
e101ec893dc4d28dedc51a5a11c210fd2a101bee
|
[] |
no_license
|
thuyngch/CISDL-DMAC
|
d1928fa7023986220d4d7b21d0e8eb73991a98fd
|
4a4e24051dedb4e534291a71ec32571b07ba7217
|
refs/heads/master
| 2020-05-25T12:10:26.804658
| 2019-06-02T14:38:16
| 2019-06-02T14:38:16
| 187,793,220
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: liuyaqi
"""
import torch
import torch.nn as nn
import math
affine_par = True
class Detector(nn.Module):
def __init__(self,pool_stride):
super(Detector, self).__init__()
'The pooling of images needs to be researched.'
self.img_pool = nn.AvgPool2d(pool_stride,stride=pool_stride)
self.input_dim = 3
'Feature extraction blocks.'
self.conv = nn.Sequential(
nn.Conv2d(self.input_dim, 16, 3, 1, 1),
nn.BatchNorm2d(16,affine = affine_par),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, 3, 1, 1),
nn.BatchNorm2d(32,affine = affine_par),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(32, 64, 3, 1, 1),
nn.BatchNorm2d(64,affine = affine_par),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, 3, 1, 1),
nn.BatchNorm2d(128,affine = affine_par),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
'Detection branch.'
self.classifier_det = nn.Sequential(
nn.Linear(128*8*8,1024),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(1024,2),
)
self._initialize_weights()
def forward(self,x1,x2,m1,m2):
x1 = self.img_pool(x1)
x2 = self.img_pool(x2)
x1 = torch.mul(x1,m1)
x2 = torch.mul(x2,m2)
x1 = self.conv(x1)
x2 = self.conv(x2)
x1 = x1.view(x1.size(0),-1)
x2 = x2.view(x2.size(0),-1)
x12_abs = torch.abs(x1-x2)
x_det = self.classifier_det(x12_abs)
return x_det
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
|
[
"noreply@github.com"
] |
noreply@github.com
|
5be0edf09990b940847ed51efb8d7cc5cde7d449
|
70ead0a39a0217c3c1bc6b48f902987c883c0868
|
/templatemail/backends/locmem.py
|
87fd0941385de853fc14caa37e8ac9140c79ae53
|
[
"MIT"
] |
permissive
|
timdownsisarealboy/django-template-mail
|
a5f369fff8f3d147f63196705490c1782a9b99bb
|
64ab909da41d1a90c14969687cfd97512eaedc60
|
refs/heads/master
| 2021-01-20T19:06:20.796790
| 2013-07-12T17:18:28
| 2013-07-12T17:18:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from django.core.mail.backends import locmem
from base import BaseEmailBackend
class EmailBackend(locmem.EmailBackend, BaseEmailBackend):
def send_messages(self, email_messages):
email_messages = self.render_messages(email_messages)
super(EmailBackend, self).send_messages(email_messages)
|
[
"bar.benoit@gmail.com"
] |
bar.benoit@gmail.com
|
3bdd06f837466e17a98dd8946a3ad205b882c0ee
|
8c801a9606722a3ed960c0472c85987254beaab9
|
/VirtEnv2/bin/html2text
|
ef10b835a3d4cb89975973af434d23204ccf1837
|
[] |
no_license
|
boyleconnor/MacWorld
|
0377f24417b09e952edee4b4983ac17eb53be806
|
89fb982a23d5965f452f7c0594fdde16185b966e
|
refs/heads/master
| 2022-07-09T00:28:55.856046
| 2014-07-25T02:06:25
| 2014-07-25T02:06:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
#!/Users/connor/PycharmProjects/MacWorld/VirtEnv2/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'html2text==2014.7.3','console_scripts','html2text'
__requires__ = 'html2text==2014.7.3'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('html2text==2014.7.3', 'console_scripts', 'html2text')()
)
|
[
"cboyle@macalester.edu"
] |
cboyle@macalester.edu
|
|
4cc39e7bddd75222d0771f991900ed2d1d80c680
|
fe1d902383ec4d9884bbc0438461b6960c15bb7d
|
/models/farkas.py
|
6bf3a06d42dec68e0b7ff7aeaf76b2b682f1a936
|
[] |
no_license
|
APooladian/FarkasLayers
|
63f40d58f7965a0094672fbf3ce866407e3b77a3
|
85710800a7dd959c7bb82e97210bec2afc4a426b
|
refs/heads/master
| 2020-07-07T15:36:55.298600
| 2019-11-12T03:49:33
| 2019-11-12T03:49:33
| 203,391,987
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,912
|
py
|
import math
import numpy as np
import torch as th
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.utils import _pair
from .utils import View, Avg2d
from .blocks import Conv
class FarkasLinear(nn.Module):
def __init__(self, in_dim, out_dim, bn=True, nonlinear=True, dropout=0.,
init_type='standard',**kwargs):
"""A linear block, with guaranteed non-zero gradient. The linear layer
is followed by batch normalization (if active) and a ReLU (again, if
active)
Args:
in_dim: number of input dimensions
out_dim: number of output dimensions
bn (bool, optional): turn on batch norm (default: False)
"""
super().__init__()
self.weight = nn.Parameter(th.randn(out_dim-1, in_dim))
self.bias = nn.Parameter(th.randn(out_dim))
self.out_dim = out_dim
self.in_dim = in_dim
self.nonlinear=nonlinear
if bn:
self.bn = nn.BatchNorm1d(out_dim, affine=False)
else:
self.bn = False
if dropout>0.:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.init_type = init_type
if self.init_type == 'standard':
self.reset_parameters()
elif self.init_type == 'xavier':
nn.init.xavier_normal_(self.weight.data)
elif self.init_type == 'kaiming':
nn.init.kaiming_normal(self.weight.data,mode='fan_in',nonlinearity='relu')
elif self.init_type == 'zero_init':
self.weight.data = nn.Parameter(th.zeros(out_dim,in_dim))
def reset_parameters(self):
n = self.in_dim
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if self.dropout:
x = self.dropout(x)
y = F.linear(x, self.weight, None)
ybar = (-y).mean(dim=1,keepdim=True)
y = th.cat([y,ybar],dim=1)
bbar = th.max(-(self.bias[0:-1]).mean(),self.bias[-1])
b = th.cat([self.bias[0:-1],bbar.unsqueeze(0)],dim=0)
y = y + b.view(1,self.out_dim)
if self.nonlinear=='leaky_relu':
y = F.leaky_relu(y)
elif self.nonlinear=='selu':
y = F.selu(y)
elif self.nonlinear=='elu':
y = F.elu(y)
elif self.nonlinear:
y = F.relu(y)
if self.bn:
y = self.bn(y)
return y
def extra_repr(self):
s = ('{in_dim}, {out_dim}')
if self.bn:
s += ', batchnorm=True'
else:
s += ', batchnorm=False'
return s.format(**self.__dict__)
class FarkasConv(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, padding=None,
kernel_size=(3,3), bn=True, nonlinear=True, dropout=0.,
init_type='standard',**kwargs):
"""A 2d convolution block, with guaranteed non-zero gradient. The
convolution is followed by batch normalization (if active).
Args:
in_channels: number of input channels
out_channels: number of output channels
stride (int, optional): stride of the convolutions (default: 1)
kernel_size (tuple, optional): kernel shape (default: 3)
bn (bool, optional): turn on batch norm (default: False)
"""
super().__init__()
if out_channels <2:
raise ValueError('need out_channels>=2')
self.weight = nn.Parameter(th.randn(out_channels-1, in_channels, *kernel_size))
self.bias = nn.Parameter(th.randn(out_channels))
self.stride = stride
self.out_channels = out_channels
self.in_channels = in_channels
self.kernel_size=_pair(kernel_size)
if padding is None:
self.padding = tuple([k//2 for k in kernel_size])
else:
self.padding = _pair(padding)
self.nonlinear = nonlinear
if bn:
self.bn = nn.BatchNorm2d(out_channels, affine=False)
else:
self.bn = False
if dropout>0.:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.init_type = init_type
if self.init_type == 'standard':
self.reset_parameters()
elif self.init_type == 'xavier':
nn.init.xavier_normal_(self.weight.data)
elif self.init_type == 'kaiming':
nn.init.kaiming_normal(self.weight.data,mode='fan_in',nonlinearity='relu')
elif self.init_type == 'zero_init':
self.weight.data = nn.Parameter(th.zeros(out_channels-1, in_channels, *kernel_size))
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if self.dropout:
x = self.dropout(x)
y = F.conv2d(x, self.weight, None, self.stride, self.padding,
1, 1)
ybar = (-y).mean(dim=1,keepdim=True)
y = th.cat([y,ybar],dim=1)
bbar = th.max( - (self.bias[0:-1]).mean() , self.bias[-1])
b = th.cat([self.bias[0:-1],bbar.unsqueeze(0)],dim=0)
y = y + b.view(1,self.out_channels,1,1)
if self.nonlinear=='leaky_relu':
y = F.leaky_relu(y)
elif self.nonlinear=='selu':
y = F.selu(y)
elif self.nonlinear=='elu':
y = F.elu(y)
elif self.nonlinear:
y = F.relu(y)
if self.bn:
y = self.bn(y)
return y
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.bn:
s += ', batchnorm=True'
else:
s += ', batchnorm=False'
return s.format(**self.__dict__)
class FarkasBlock(nn.Module):
def __init__(self, channels, kernel_size=(3,3), bn=True, nonlinear=True,
dropout = 0., residual=True, weight_init='standard',zero_last=False,**kwargs):
"""A basic 2d ResNet block, with modifications on original ResNet paper
[1]. Every convolution is followed by batch normalization (if active).
The gradient is guaranteed to be non-zero.
Args:
channels: number of input and output channels
kernel_size (tuple, optional): kernel shape (default: 3)
bn (bool, optional): turn on batch norm (default: False)
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun, 2016.
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
super().__init__()
self.in_channels = channels
self.out_channels = channels+1
self.kernel_size = _pair(kernel_size)
self.nonlinear=nonlinear
self.residual = residual
self.conv0 = FarkasConv(channels, channels,
kernel_size=kernel_size, bn=bn, nonlinear=nonlinear, init_type=weight_init)
if zero_last:
self.weight = nn.Parameter(th.zeros(channels,channels,*kernel_size))
self.bias=nn.Parameter(th.zeros(channels+1))
else:
self.weight = nn.Parameter(th.randn(channels, channels, *kernel_size))
self.bias = nn.Parameter(th.randn(channels+1))
self.padding = tuple([k//2 for k in kernel_size])
if bn:
self.bn = nn.BatchNorm2d(channels+1, affine=False)
else:
self.bn = False
if dropout>0.:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.init_type = weight_init
if not zero_last:
if self.init_type == 'standard':
self.reset_parameters()
elif self.init_type == 'xavier':
nn.init.xavier_normal_(self.weight.data)
elif self.init_type == 'kaiming':
nn.init.kaiming_normal(self.weight.data,mode='fan_in',nonlinearity='relu')
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if self.dropout:
y = self.dropout(x)
else:
y=x
y = self.conv0(y)
if self.dropout:
y = self.dropout(y)
y = F.conv2d(y, self.weight, None, 1, self.padding,
1, 1)
if self.residual:
ybar = (-x-y).mean(dim=1,keepdim=True)
y = th.cat([x+y,ybar],dim=1)
else:
ybar = (-y).mean(dim=1,keepdim=True)
y = th.cat([y,ybar],dim=1)
bbar = th.max( - (self.bias[0:-1]).mean(),self.bias[-1])
b = th.cat([self.bias[0:-1],bbar.unsqueeze(0)],dim=0)
y = y + b.view(1,self.out_channels,1,1)
if self.nonlinear=='leaky_relu':
y = F.leaky_relu(y)
elif self.nonlinear=='selu':
y = F.selu(y)
elif self.nonlinear=='elu':
y = F.elu(y)
elif self.nonlinear:
y = F.relu(y)
if self.bn:
y = self.bn(y)
return y
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}')
if self.bn:
s += ', batchnorm=True'
else:
s += ', batchnorm=False'
return s.format(**self.__dict__)
class FarkasBottleneck(nn.Module):
def __init__(self, channels, kernel_size=(3,3), bn=True, nonlinear=True,
dropout = 0., residual=True, weight_init='standard',zero_last=False,**kwargs):
"""A basic 2d ResNet block, with modifications on original ResNet paper
[1]. Every convolution is followed by batch normalization (if active).
The gradient is guaranteed to be non-zero.
Args:
channels: number of input and output channels
kernel_size (tuple, optional): kernel shape (default: 3)
bn (bool, optional): turn on batch norm (default: False)
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun, 2016.
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
super().__init__()
self.in_channels = channels
self.out_channels = channels+1
self.kernel_size = _pair(kernel_size)
self.nonlinear = nonlinear
self.residual = residual
self.conv0 = FarkasConv(channels, channels//4,
kernel_size=(1,1), bn=bn,
nonlinear=nonlinear, init_type=weight_init)
self.conv1 = FarkasConv(channels//4, channels//4,
kernel_size=kernel_size, bn=bn,
nonlinear=nonlinear,init_type=weight_init)
if zero_last:
self.weight = nn.Parameter(th.zeros(channels,channels//4, 1,1))
self.bias = nn.Parameter(th.zeros(channels+1))
else:
self.weight = nn.Parameter(th.randn(channels, channels//4, 1,1))
self.bias = nn.Parameter(th.randn(channels+1))
if bn:
self.bn = nn.BatchNorm2d(channels+1, affine=False)
else:
self.bn = False
if dropout>0.:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.init_type = weight_init
if self.init_type == 'standard':
self.reset_parameters()
elif self.init_type == 'xavier':
nn.init.xavier_normal_(self.weight.data)
elif self.init_type == 'kaiming':
nn.init.kaiming_normal(self.weight.data,mode='fan_in',nonlinearity='relu')
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if self.dropout:
y = self.dropout(x)
else:
y=x
y = self.conv0(y)
if self.dropout:
y = self.dropout(y)
y = self.conv1(y)
if self.dropout:
y = self.dropout(y)
y = F.conv2d(y, self.weight, None, 1, 0,
1, 1)
if self.residual:
ybar = (-x - y).mean(dim=1,keepdim=True)
y = th.cat([x+y,ybar],dim=1)
else:
ybar = (-y).mean(dim=1,keepdim=True)
y = th.cat([y,ybar],dim=1)
bbar = th.max(-(self.bias[0:-1]).mean(),self.bias[-1])
b = th.cat([self.bias[0:-1],bbar.unsqueeze(0)],dim=0)
y = y + b.view(1,self.out_channels,1,1)
if self.nonlinear=='leaky_relu':
y = F.leaky_relu(y)
elif self.nonlinear=='selu':
y = F.selu(y)
elif self.nonlinear=='elu':
y = F.elu(y)
elif self.nonlinear:
y = F.relu(y)
if self.bn:
y = self.bn(y)
return y
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}')
if self.bn:
s += ', batchnorm=True'
else:
s += ', batchnorm=False'
return s.format(**self.__dict__)
class FarkasNet(nn.Module):
def __init__(self, layers, block=FarkasBlock, in_channels=3,
classes=10, kernel_size=(3,3), nonlinear=True,
conv0_kwargs = {'kernel_size':(3,3), 'stride':1},
conv0_pool=None, downsample_pool=nn.AvgPool2d,
last_layer_nonlinear=False, last_layer_bn=None,
dropout=0.,weight_init='standard',zero_last=False,
bn=True, base_channels=16, **kwargs):
if last_layer_bn is None:
last_layer_bn=bn
super().__init__()
kernel_size = _pair(kernel_size)
def make_layer(n, block, in_channels, out_channels, stride):
sublayers = []
if not in_channels==out_channels:
conv = FarkasConv
sublayers.append(conv(in_channels, out_channels, kernel_size=(1,1),
nonlinear=True, dropout=dropout, bn=bn,init_type=weight_init))
if stride>1:
sublayers.append(downsample_pool(stride))
for k in range(n):
u = k
sublayers.append(block(out_channels+u, kernel_size=kernel_size, dropout=dropout,
bn=bn, nonlinear=nonlinear, weight_init=weight_init,zero_last=zero_last,**kwargs))
return nn.Sequential(*sublayers)
conv = FarkasConv
pdsz = [k//2 for k in conv0_kwargs['kernel_size'] ]
self.layer0 = conv(in_channels, base_channels, padding=pdsz,
**conv0_kwargs, dropout=dropout, bn=bn, nonlinear=nonlinear,weight_init=weight_init)
if conv0_pool:
self.maxpool = conv0_pool
else:
self.maxpool = False
_layers = []
for i, n in enumerate(layers):
if i==0:
_layers.append(make_layer(n, block, base_channels,
base_channels, 1))
else:
u = layers[i-1]
_layers.append(make_layer(n, block, base_channels*(2**(i-1))+u,
base_channels*(2**i), 2))
self.layers = nn.Sequential(*_layers)
self.pool = Avg2d()
u = layers[-1]
self.view = View((2**i)*base_channels+u)
if dropout>0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.fc = nn.Linear((2**i)*base_channels+u,classes)
self.nonlinear=nonlinear
self.bn = bn
@property
def num_parameters(self):
return sum([w.numel() for w in self.parameters()])
def forward(self, x):
x = self.layer0(x)
if self.maxpool:
x = self.maxpool(x)
x = self.layers(x)
x = self.pool(x)
x = self.view(x)
if self.dropout:
x = self.dropout(x)
x = self.fc(x)
return x
def FarkasNet18(**kwargs):
m = FarkasNet([3,3,3],block=FarkasBlock,**kwargs)
return m
def FarkasNet50(**kwargs):
m = FarkasNet([3,4,6,3],base_channels=64,block=FarkasBottleneck,**kwargs)
return m
def FarkasNet101(**kwargs):
m = FarkasNet([3,4,23,3],base_channels=64,block=FarkasBottleneck,**kwargs)
return m
def FarkasNet110(**kwargs):
m = FarkasNet([18,18,18],block=FarkasBlock,**kwargs)
def FarkasNet34(**kwargs):
m = FarkasNet([5,5,5],block=FarkasBlock,**kwargs)
return m
|
[
"aram-alexandre.pooladian@mail.mcgill.ca"
] |
aram-alexandre.pooladian@mail.mcgill.ca
|
2fa4ab95d64e2940ff958f0cf6fc45151207da79
|
67819ca1c5030d936413ddbaa08ed245b7b9358d
|
/app/backend/hiStoryBackend/hiStoryBackend/wsgi.py
|
dd4233b7ffe0739198a5a1c86e59932b7f74728a
|
[] |
no_license
|
bounswe/bounswe2018group7
|
9ac94fb93113571fdd43c2e9b91ea2ba318cce9c
|
9c56cb2f28f189853f4aacdb587b85544f25b2c3
|
refs/heads/master
| 2023-03-05T09:18:43.445698
| 2022-04-23T19:13:44
| 2022-04-23T19:13:44
| 120,274,361
| 12
| 3
| null | 2023-03-03T15:20:37
| 2018-02-05T08:12:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hiStoryBackend.settings')
application = get_wsgi_application()
|
[
"cburakaygun@gmail.com"
] |
cburakaygun@gmail.com
|
8a55a174178d00541f365a08542d4d792b52fcc5
|
7f456f36ecb35b2f898f3257a45ec79cf248f4e0
|
/project/source/DQN_old.py
|
92afa0878256b031c322a5ea8741476c00ad77b8
|
[] |
no_license
|
Akihiro-Nishihara/ActionGameAI
|
0dcbd511bf54837dd145ae548452c2e7d1986ffe
|
d3c9e91cb84f1eb6125588338ea2a6e1567def3b
|
refs/heads/master
| 2022-12-08T15:53:13.160680
| 2020-09-16T17:23:12
| 2020-09-16T17:23:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,436
|
py
|
import os
import numpy as np
import datetime
import math
import sys
import shutil
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D
from keras.optimizers import Adam
from keras.utils import plot_model
from collections import deque
from keras import backend as K # Kerasは自身で行列計算とかしない,それをするためのやーつ
import tensorflow as tf
import pygame
from project.source import myenv, header as h
FUNC_REWARD = 1 # 強化学習における報酬の設定
LEARNING_RATE = 0.1 # Q-networkの学習係数
# LEARNING_RATE = 0.01 # Q-networkの学習係数
OBS_LEFT = 0
OBS_TOP = -1
OBS_RIGHT = 3
OBS_BOTTOM = 2
SIZE_STATE = (OBS_RIGHT - OBS_LEFT) * (OBS_BOTTOM - OBS_TOP) - 1 + 2 # 観測マス(キャラ位置除く)+マス内の座標
SIZE_ACTION = 8
SIZE_HIDDEN = 32
SEED = 1
NUM_EPISODES = 19 # 総試行回数
SIZE_LOOP = 1000
GAMMA = 0.99 # 割引係数
# memory_size = 10000 # バッファーメモリの大きさ
MEMORY_SIZE = 1000 # バッファーメモリの大きさ
BATCH_SIZE = 32 # Q-networkを更新するバッチの大記載
# MODE PARAMETER
OBSERVE_PLAYER = 'RIGHT'
DQN_MODE = 1 # 1がDQN、0がDDQNです
LENDER_MODE = 0 # 0は学習後も描画なし、1は学習終了後に描画する
# 損失関数の定義(huber関数)
def huberloss(_y_true, _y_pred):
EPSILON = 1.0
err = _y_true - _y_pred
condition = K.abs(err) < EPSILON
L2 = K.square(err) / 2
L1 = EPSILON * (K.abs(err) - EPSILON / 2)
loss = tf.where(condition, L2, L1)
return K.mean(loss)
# Q関数をDLのネットワーククラスとして定義
class QNetwork:
def __init__(self, _learning_rate=LEARNING_RATE, _state_size=SIZE_STATE, _action_size=SIZE_ACTION,
_hidden_size=SIZE_HIDDEN):
self.model = Sequential()
self.model.add(Dense(_hidden_size, activation='relu', input_dim=_state_size))
self.model.add(Dense(_hidden_size, activation='relu'))
self.model.add(Dense(_hidden_size, activation='relu'))
self.model.add(Dense(_action_size, activation='linear'))
self.optimizer = Adam(lr=_learning_rate)
self.model.compile(loss=huberloss, optimizer=self.optimizer)
# CNNの構造(未完成)
# self.model = Sequential()
# self.model.add(Conv2D(16, (3, 3), padding='same', input_shape=(5, 5), activation='relu'))
# self.model.add(MaxPool2D(2, 2))
# self.model.add(Flatten())
# self.model.add(Dense(SIZE_HIDDEN, activation='relu'))
# self.model.add(Dense(_action_size, activation='linear'))
# self.optimizer = Adam(lr=_learning_rate)
# self.model.compile(loss=huberloss, optimizer=self.optimizer)
# 重みの学習 _memoryには(state, action, reward, next_state)群が格納
def replay(self, _memory, _batch_size, _gamma, _targetQN):
inputs = np.zeros((_batch_size, SIZE_STATE))
targets = np.zeros((_batch_size, SIZE_ACTION))
mini_batch = _memory.sample(_batch_size)
# 学習用の入力および出力を獲得
for i, (state_b, action_b, reward_b, next_state_b) in enumerate(mini_batch):
inputs[i:i + 1] = state_b
target = reward_b
if not (next_state_b == np.zeros(state_b.shape)).all(axis=1):
# 価値計算
retmainQs = self.model.predict(next_state_b)[0]
next_action = np.argmax(retmainQs) # 配列内で最大要素のインデックスを返す
target = reward_b + _gamma * _targetQN.model.predict(next_state_b)[0][next_action]
targets[i] = self.model.predict(state_b) # Qネットワークの出力
int_action_b = 1 * action_b['right'] + 2 * action_b['left'] + 4 * action_b['space']
targets[i][int_action_b] = target # 教師信号
self.model.fit(inputs, targets, epochs=1, verbose=0)
def save_network(self, _path_dir, _name_network):
string_json_model = self.model.to_json()
fp_model = open(_path_dir + '/' + _name_network + '_model.json', 'w')
fp_model.write(string_json_model)
self.model.save_weights(_path_dir + '/' + _name_network + '_weights.hdf5')
# Experience replay と fixed target Q-networkを実現するためのメモリクラス
class Memory:
def __init__(self, _max_size=1000):
self.buffer = deque(maxlen=_max_size)
def add(self, _experience):
self.buffer.append(_experience)
def sample(self, _batch_size):
# buffer内のインデックスを復元抽出で取り出す
idx = np.random.choice(np.arange(len(self.buffer)), size=_batch_size, replace=False)
return [self.buffer[ii] for ii in idx]
def len(self):
return len(self.buffer)
# 状態に応じて行動を決定するクラス
class Actor:
# 確率epsilonに応じて報酬を最高にする行動を返す関数
def get_action(self, _state, _episode, _mainQN):
# 徐々に最適な行動をとるΕ-greedy法
# Eが徐々に小さくなることで,最適行動をとる確率が高まる.
# epsilon = 0.001 + 0.9 / (1.0 + _episode)
epsilon = 1.0 - (_episode / NUM_EPISODES)
if epsilon <= np.random.uniform(0, 1):
list_return_target_Qs = _mainQN.model.predict(_state)[0] # 各行動への報酬のリストを返す
action = np.argmax(list_return_target_Qs)
else:
action = np.random.choice(list(range(0, SIZE_ACTION)))
dict_action = get_dict_action(action)
return dict_action
def get_dict_action(_int_act):
if _int_act not in range(0, SIZE_ACTION):
print('Error: _int_act in get_list_bin_action is out of range', file=sys.stderr)
os.system('PAUSE')
exit(-1)
# actoin をバイナリの文字列で表現
str_bin_action = format(_int_act, 'b')
for i in range(int(math.log2(SIZE_ACTION)) - len(str_bin_action)):
str_bin_action = '0' + str_bin_action
list_str_bin_action = list(str_bin_action)
key_right = int(list_str_bin_action[2])
key_left = int(list_str_bin_action[1])
key_space = int(list_str_bin_action[0])
dict_pressed_key = {'right': key_right, 'left': key_left, 'space': key_space}
return dict_pressed_key
# メイン関数
def main():
# env = gym.make('CartPole-v0')
# env = wrappers.Monitor(env, './movie/cartpoleDDQN', video_callable=(lambda ep: ep % 100 == 0)) # 動画保存する場合
# original environment
os.environ['PYTHONHASHSEED'] = str(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
# rn.seed(SEED)
pygame.init()
pygame.display.set_caption("Action Game AI")
screen = pygame.display.set_mode((h.SCREEN_WIDTH, h.SCREEN_HEIGHT))
screen_sub1 = pygame.display.set_mode((h.SCREEN_WIDTH, h.SCREEN_HEIGHT))
screen_sub2 = pygame.display.set_mode((h.SCREEN_WIDTH, h.SCREEN_HEIGHT))
# env = myenv.MyEnv(_path_file_stage='./stage_sample.txt', _screen=screen)
env = myenv.MyEnv(_path_file_stage='./stage_sample.txt', _screen=screen)
env_sub1 = myenv.MyEnv(_path_file_stage='./stage_sub1.txt', _screen=screen_sub1)
env_sub2 = myenv.MyEnv(_path_file_stage='./stage_sub2.txt', _screen=screen_sub2)
islearned = 0 # 学習が終わったフラグ
isrender = 0 # 描画フラグ
# ---
# ネットワーク・メモリ・Actorの生成
mainQN = QNetwork(_hidden_size=SIZE_HIDDEN, _learning_rate=LEARNING_RATE)
targetQN = QNetwork(_hidden_size=SIZE_HIDDEN, _learning_rate=LEARNING_RATE)
memory = Memory(_max_size=MEMORY_SIZE)
actor = Actor()
# メインルーチン
for episode in range(NUM_EPISODES):
env.reset()
act_ini = env.action_space.sample()
action = {'right': act_ini[0], 'left': act_ini[1], 'space': act_ini[2]}
state, reward, is_done, _ = env.step(action) # 行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
state = np.reshape(state, [1, SIZE_STATE])
env_sub1.reset()
state_sub1, reward_sub1, is_done_sub1, _ = env_sub1.step(action) # 行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
state_sub1 = np.reshape(state_sub1, [1, SIZE_STATE])
env_sub2.reset()
state_sub2, reward_sub2, is_done_sub2, _ = env_sub2.step(action) # 行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
state_sub2 = np.reshape(state_sub2, [1, SIZE_STATE])
targetQN.model.set_weights(mainQN.model.get_weights())
# 1試行のループ
list_reward = []
count_loop = 0
is_train_sub1 = False
is_train_sub2 = False
# for count_loop in range(SIZE_LOOP):
# print(str(count))
while not is_done:
count_loop += 1
# if (islearned == 1) and LENDER_MODE: # 学習終了時にcart-pole描画
# env.render()
# time.sleep(0.1)
# print(state[0, 0])
action = actor.get_action(state, episode, mainQN) # 時刻tでの行動を決定
if count_loop % 20 == 0:
print(action)
# (メインゲーム)行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
next_state, reward, is_done, info = env.step(action)
next_state = np.reshape(next_state, [1, SIZE_STATE])
memory.add((state, action, reward, next_state)) # memory update
state = next_state # state update
list_reward.append(reward)
# 終了判定
if is_done:
if info['GAMEOVER']:
if info['TIME'] == 0:
print('MAIN {0}/{1}: TIME OVER'.format(episode + 1, NUM_EPISODES))
else:
print('MAIN {0}/{1}: FALL GROUND'.format(episode + 1, NUM_EPISODES))
elif info['CLEAR']:
print('MAIN {0}/{1}: CLEAR!'.format(episode + 1, NUM_EPISODES))
else:
print('Error: Wrong information of main stage', file=sys.stderr)
os.system('PAUSE')
exit(-1)
next_state = np.zeros(state.shape)
next_state_sub1 = np.zeros(state_sub1.shape)
next_state_sub2 = np.zeros(state_sub2.shape)
break
if is_train_sub1:
action_sub1 = actor.get_action(state_sub1, episode, mainQN) # 時刻tでの行動を決定
# (サブゲーム)行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
next_state_sub1, reward_sub1, is_done_sub1, info_sub1 = env_sub1.step(action_sub1)
next_state_sub1 = np.reshape(next_state_sub1, [1, SIZE_STATE])
memory.add((state_sub1, action_sub1, reward_sub1, next_state_sub1)) # memory update
state_sub1 = next_state_sub1
# サブステージがゴールまで到着したら,メインの基礎学習を十分と判断し,このエピソード内では学習終了.
if is_done_sub1:
if info_sub1['GAMEOVER']:
if info_sub1['TIME'] == 0:
print('sub1 {0}/{1}: TIME OVER'.format(episode + 1, NUM_EPISODES))
else:
print('sub1 {0}/{1}: FALL GROUND'.format(episode + 1, NUM_EPISODES))
elif info_sub1['CLEAR']:
print('sub1 {0}/{1}: CLEAR!'.format(episode + 1, NUM_EPISODES))
else:
print('Error: Wrong information of sub1 stage', file=sys.stderr)
os.system('PAUSE')
exit(-1)
is_train_sub1 = False
if is_train_sub2:
action_sub2 = actor.get_action(state_sub2, episode, mainQN) # 時刻tでの行動を決定
# (サブゲーム)行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
next_state_sub2, reward_sub2, is_done_sub2, info_sub2 = env_sub2.step(action_sub2)
next_state_sub2 = np.reshape(next_state_sub2, [1, SIZE_STATE])
memory.add((state_sub2, action_sub2, reward_sub2, next_state_sub2)) # memory update
state_sub2 = next_state_sub2
# サブステージがゴールまで到着したら,メインの基礎学習を十分と判断し,このエピソード内では学習終了.
if is_done_sub2:
if info_sub2['GAMEOVER']:
if info_sub2['TIME'] == 0:
print('sub2 {0}/{1}: TIME OVER'.format(episode + 1, NUM_EPISODES))
else:
print('sub2 {0}/{1}: FALL GROUND'.format(episode + 1, NUM_EPISODES))
elif info_sub2['CLEAR']:
print('sub2 {0}/{1}: CLEAR!'.format(episode + 1, NUM_EPISODES))
else:
print('Error: Wrong information of sub2 stage', file=sys.stderr)
os.system('PAUSE')
exit(-1)
is_train_sub2 = False
# Q-networkの重みの学習と更新
if (memory.len() > BATCH_SIZE) and not is_done:
mainQN.replay(memory, BATCH_SIZE, GAMMA, targetQN)
if DQN_MODE:
targetQN.model.set_weights(mainQN.model.get_weights())
print('{0}/{1}: {2}'.format(episode + 1, NUM_EPISODES, sum(list_reward) / len(list_reward)))
# print(count_loop)
dt_now = datetime.datetime.now()
str_time = dt_now.strftime('%Y-%m-%d_%H-%M-%S')
path_dirs = '../network/model_{0}'.format(str_time)
os.makedirs(path_dirs, exist_ok=True)
mainQN.save_network(_path_dir=path_dirs, _name_network='mainQN')
plot_model(mainQN.model, to_file=path_dirs + '/Qnetwork.png', show_shapes=True) # Qネットワークの可視化
shutil.copy('./stage_sample.txt', path_dirs)
if __name__ == '__main__':
main()
|
[
"ocean90light@gmail.com"
] |
ocean90light@gmail.com
|
b7c80a21298c1316985aac7e42d5886a612a3783
|
a95f9fb15eccaf4c8a25549aeb52fb1bb517f8ce
|
/label_extractor.py
|
98bffcf2ee33da1d7d0999b0f1516d714e3bf091
|
[] |
no_license
|
Kirich2323/ml
|
441144b26eac19f10c0b773e6c9aff82fa58246d
|
5ced175d0e5ffedeb56edb07c809decc77e1154f
|
refs/heads/master
| 2020-03-11T18:27:33.204495
| 2018-04-26T01:10:47
| 2018-04-26T01:10:47
| 130,177,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,625
|
py
|
import xml.etree.ElementTree as ET
import re
class BaseLabelExtractor:
def __init__(self, *args, **kwargs):
pass
def get_labels(self, data):
ans = []
for f in data:
ans.append(self.extract_label(f))
return ans
class ProblemExtractor(BaseLabelExtractor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def extract_label(self, item):
r = r'(.+)-(.+)-(\d+).*'
m = re.search(r, item)
return m.group(2)
class VerdictExtractor(BaseLabelExtractor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.xml = kwargs.get("xml", "")
self.root = ET.parse(self.xml).getroot()
self.teams = {}
for session in self.root[0][1:]:
#print(session.attrib['alias'])
tasks = []
for problem in session:
task = []
for solution in problem:
task.append(solution.attrib['accepted'])
tasks.append(task)
self.teams[session.attrib["alias"]] = tasks
def extract_label(self, item):
r = r'(.+)-(.+)-(\d+)\..*'
m = re.search(r, item)
print(item)
print(m.group(1))
print(m.group(2))
print(m.group(3))
print(self.teams[m.group(1)])
print(self.teams[m.group(1)][ord(m.group(2))-ord('a')])
print(self.teams[m.group(1)][ord(m.group(2))-ord('a')][int(m.group(3)) - 1])
print('-'*40)
return self.teams[m.group(1)][ord(m.group(2))-ord('a')][int(m.group(3)) - 1]
|
[
"ivadik2323@gmail.com"
] |
ivadik2323@gmail.com
|
b695146b5baec03e2372b136427fca9502a8b6e6
|
6a47b50684e9a0dcbf145acea402bd97e298c89d
|
/Python Programs/helloAll.py
|
10f799d276892bb3a0572f7e1c3782922396aac3
|
[
"MIT"
] |
permissive
|
AkshayPradeep6152/letshack
|
a37e132c408aa68a2232cbab7eadaafb58267e26
|
f820e438921c6706fb2565379db6681184676698
|
refs/heads/main
| 2023-08-13T10:38:11.495481
| 2021-10-03T05:05:01
| 2021-10-03T05:05:01
| 300,655,139
| 8
| 96
|
MIT
| 2021-10-03T05:05:02
| 2020-10-02T15:17:34
|
Java
|
UTF-8
|
Python
| false
| false
| 17
|
py
|
print("helloAll")
|
[
"noreply@github.com"
] |
noreply@github.com
|
747403576f24d62c684e4cad16f2b82581d8a8fb
|
bb048e7cc8ffd76a1c0a5b041b2ec5ea23fe95b8
|
/conftest.py
|
2f442eac6695282b90be09a6bf59a08ffce8a8b9
|
[] |
no_license
|
Carling-Kody/pura_demo
|
af68f17fc3b1424cddaf63ede793df064dea3a14
|
4d7870995cc88b34c36db00173c6510dadc69186
|
refs/heads/main
| 2023-08-13T18:46:39.230402
| 2021-07-08T22:40:09
| 2021-07-08T22:40:09
| 381,835,920
| 0
| 0
| null | 2021-07-08T22:40:10
| 2021-06-30T21:22:41
|
Python
|
UTF-8
|
Python
| false
| false
| 9,084
|
py
|
"""
`conftest.py` and `pylenium.json` files should stay at your Workspace Root.
conftest.py
Although this file is editable, you should only change its contents if you know what you are doing.
Instead, you can create your own conftest.py file in the folder where you store your ui_tests.
pylenium.json
You can change the values, but DO NOT touch the keys or you will break the schema.
py
The only fixture you really need from this is `py`. This is the instance of Pylenium for each test.
Just pass py into your test and you're ready to go!
Examples:
def test_go_to_google(py):
py.visit('https://google.com')
assert 'Google' in py.title()
"""
import json
import logging
import os
import shutil
import sys
from pathlib import Path
import pytest
import requests
from faker import Faker
from pytest_reportportal import RPLogger, RPLogHandler
from pylenium.driver import Pylenium
from pylenium.config import PyleniumConfig, TestCase
from pylenium.a11y import PyleniumAxe
@pytest.fixture(scope='function')
def fake() -> Faker:
"""A basic instance of Faker to make test data."""
return Faker()
@pytest.fixture(scope='function')
def api():
"""A basic instance of Requests to make HTTP API calls."""
return requests
@pytest.fixture(scope="session")
def rp_logger(request):
"""Report Portal Logger"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create handler for Report Portal if the service has been
# configured and started.
if hasattr(request.node.config, 'py_test_service'):
# Import Report Portal logger and handler to the test module.
logging.setLoggerClass(RPLogger)
rp_handler = RPLogHandler(request.node.config.py_test_service)
# Add additional handlers if it is necessary
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
logger.addHandler(console_handler)
else:
rp_handler = logging.StreamHandler(sys.stdout)
# Set INFO level for Report Portal handler.
rp_handler.setLevel(logging.INFO)
return logger
@pytest.fixture(scope='session', autouse=True)
def project_root() -> str:
"""The Project (or Workspace) root as a filepath.
* This conftest.py file should be in the Project Root if not already.
"""
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='session', autouse=True)
def test_run(project_root, request) -> str:
"""Creates the `/test_results` directory to store the results of the Test Run.
Returns:
The `/test_results` directory as a filepath (str).
"""
session = request.node
test_results_dir = f'{project_root}/test_results'
if os.path.exists(test_results_dir):
# delete /test_results from previous Test Run
shutil.rmtree(test_results_dir, ignore_errors=True)
try:
# race condition can occur between checking file existence and
# creating the file when using pytest with multiple workers
Path(test_results_dir).mkdir(parents=True, exist_ok=True)
except FileExistsError:
pass
for test in session.items:
try:
# make the test_result directory for each test
Path(f'{test_results_dir}/{test.name}').mkdir(parents=True, exist_ok=True)
except FileExistsError:
pass
return test_results_dir
@pytest.fixture(scope='session')
def py_config(project_root, request) -> PyleniumConfig:
"""Initialize a PyleniumConfig for each test
1. This starts by deserializing the user-created pylenium.json from the Project Root.
2. If that file is not found, then proceed with Pylenium Defaults.
3. Then any CLI arguments override their respective key/values.
"""
try:
# 1. Load pylenium.json in Project Root, if available
with open(f'{project_root}/pylenium.json') as file:
_json = json.load(file)
config = PyleniumConfig(**_json)
except FileNotFoundError:
# 2. pylenium.json not found, proceed with defaults
config = PyleniumConfig()
# 3. Override with any CLI args/options
# Driver Settings
cli_remote_url = request.config.getoption('--remote_url')
if cli_remote_url:
config.driver.remote_url = cli_remote_url
cli_browser_options = request.config.getoption('--options')
if cli_browser_options:
config.driver.options = [option.strip() for option in cli_browser_options.split(',')]
cli_browser = request.config.getoption('--browser')
if cli_browser:
config.driver.browser = cli_browser
cli_capabilities = request.config.getoption('--caps')
if cli_capabilities:
# --caps must be in '{"name": "value", "boolean": true}' format
# with double quotes around each key. booleans are lowercase.
config.driver.capabilities = json.loads(cli_capabilities)
cli_page_wait_time = request.config.getoption('--page_load_wait_time')
if cli_page_wait_time and cli_page_wait_time.isdigit():
config.driver.page_load_wait_time = int(cli_page_wait_time)
# Logging Settings
cli_pylog_level = request.config.getoption('--pylog_level')
if cli_pylog_level:
config.logging.pylog_level = cli_pylog_level
cli_screenshots_on = request.config.getoption('--screenshots_on')
if cli_screenshots_on:
shots_on = True if cli_screenshots_on.lower() == 'true' else False
config.logging.screenshots_on = shots_on
cli_extensions = request.config.getoption('--extensions')
if cli_extensions:
config.driver.extension_paths = [ext.strip() for ext in cli_extensions.split(',')]
return config
@pytest.fixture(scope='function')
def test_case(test_run, py_config, request) -> TestCase:
"""Manages data pertaining to the currently running Test Function or Case.
* Creates the test-specific logger.
Args:
test_run: The Test Run (or Session) this test is connected to.
Returns:
An instance of TestCase.
"""
test_name = request.node.name
test_result_path = f'{test_run}/{test_name}'
py_config.driver.capabilities.update({'name': test_name})
return TestCase(name=test_name, file_path=test_result_path)
@pytest.fixture(scope='function')
def py(test_case, py_config, request, rp_logger):
"""Initialize a Pylenium driver for each test.
Pass in this `py` fixture into the test function.
Examples:
def test_go_to_google(py):
py.visit('https://google.com')
assert 'Google' in py.title()
"""
py = Pylenium(py_config)
yield py
try:
if request.node.report.failed:
# if the test failed, execute code in this block
if py_config.logging.screenshots_on:
screenshot = py.screenshot(f'{test_case.file_path}/test_failed.png')
with open(screenshot, "rb") as image_file:
rp_logger.info(
"Test Failed - Attaching Screenshot",
attachment={"name": "test_failed.png", "data": image_file, "mime": "image/png"},
)
except AttributeError:
rp_logger.error('Unable to access request.node.report.failed, unable to take screenshot.')
except TypeError:
rp_logger.info('Report Portal is not connected to this test run.')
py.quit()
@pytest.fixture(scope='function')
def axe(py) -> PyleniumAxe:
"""The aXe A11y audit tool as a fixture."""
return PyleniumAxe(py.webdriver)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Yield each test's outcome so we can handle it in other fixtures."""
outcome = yield
report = outcome.get_result()
if report.when == 'call':
setattr(item, "report", report)
return report
def pytest_addoption(parser):
parser.addoption('--browser', action='store', default='', help='The lowercase browser name: chrome | firefox')
parser.addoption('--remote_url', action='store', default='', help='Grid URL to connect ui_tests to.')
parser.addoption('--screenshots_on', action='store', default='', help="Should screenshots be saved? true | false")
parser.addoption('--pylog_level', action='store', default='', help="Set the pylog_level: 'off' | 'info' | 'debug'")
parser.addoption(
'--options',
action='store',
default='',
help='Comma-separated list of Browser Options. Ex. "headless, incognito"',
)
parser.addoption(
'--caps',
action='store',
default='',
help='List of key-value pairs. Ex. \'{"name": "value", "boolean": true}\'',
)
parser.addoption(
'--page_load_wait_time',
action='store',
default='',
help='The amount of time to wait for a page load before raising an error. Default is 0.',
)
parser.addoption(
'--extensions', action='store', default='', help='Comma-separated list of extension paths. Ex. "*.crx, *.crx"'
)
|
[
"kodycarling19@gmail.com"
] |
kodycarling19@gmail.com
|
68d3e5ce7d725d753fe712d6b84fc9b1056b3b77
|
a50badcad45aa17cac0148470a165e89e4d9f352
|
/errors.py
|
56af8f4bb2358b465d314887db56a16ef688abc1
|
[] |
no_license
|
deemoowoor/employee-stats
|
bf53e15e3af7e52a6a6828e8e539a5d945782dee
|
5b29f103c3327fe18ea1998777141b610589e6af
|
refs/heads/master
| 2022-12-12T08:12:28.088128
| 2020-05-20T08:44:47
| 2020-05-20T08:44:47
| 265,500,108
| 0
| 0
| null | 2022-12-08T09:57:35
| 2020-05-20T08:27:27
|
Python
|
UTF-8
|
Python
| false
| false
| 154
|
py
|
class ApiError(BaseException):
def __init__(self, message):
self._message = message
def __str__(self):
return self._message
|
[
"andrei.sosnin@gmail.com"
] |
andrei.sosnin@gmail.com
|
75625cd03c5efadbd61429c15127863b35d48b1a
|
e030b7fc33326c6b885255fd08643413ab871797
|
/pass_through_controllers/examples/script/cartesian_trajectory_action_client.py
|
f4bdf836ba6cc305c64c1509b44e866182a176e2
|
[
"BSD-3-Clause"
] |
permissive
|
gavanderhoorn/cartesian_ros_control
|
1963bf10b32fb67d31cf0e27d4e4c68452fe4bd2
|
2d6262c59a725d6030bbf7bab43fe6aa2915fc5a
|
refs/heads/master
| 2023-04-19T19:11:51.796707
| 2021-04-06T14:34:05
| 2021-04-06T14:34:05
| 355,232,781
| 2
| 0
|
NOASSERTION
| 2021-04-06T15:08:27
| 2021-04-06T15:08:26
| null |
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
#!/usr/bin/env python
"""
Simple action client for testing Cartesian-based PassThroughControllers
Use this to fire-off a quick random Cartesian trajectory goal for testing.
The trajectory will last 10 seconds.
"""
from __future__ import print_function
import rospy
import actionlib
import signal
import sys
import os
import numpy as np
from cartesian_control_msgs.msg import FollowCartesianTrajectoryAction, FollowCartesianTrajectoryGoal, CartesianTrajectoryPoint
from urdf_parser_py.urdf import URDF
from kdl_parser_py.urdf import treeFromUrdfModel
import PyKDL
class Client(object):
def __init__(self):
self.client = actionlib.SimpleActionClient(
'/hw_interface/forward_cartesian_trajectories/follow_cartesian_trajectory',
FollowCartesianTrajectoryAction)
self.client.wait_for_server()
# Suppress spam output of urdf parsing.
# urdf_parser_py is unhappy with various visual tags in the robot_description.
tmp = sys.stderr
sys.stderr = open(os.devnull, 'w')
robot = URDF.from_parameter_server()
sys.stderr = tmp
_, tree = treeFromUrdfModel(robot)
self.fk_solver = PyKDL.ChainFkSolverPos_recursive(tree.getChain('base_link', 'tool0'))
def test(self):
""" Follow two-point, random Cartesian trajectory
This samples uniformly in [-pi, +pi] for each joint to compute two
random poses within the robots reach. It then traverses these points
within 10 seconds.
"""
def random_point():
p_kdl = PyKDL.Frame()
joints = PyKDL.JntArray(6)
for i in range(6):
joints[i] = (np.random.random_sample() * 2 - 1) * np.pi
self.fk_solver.JntToCart(joints, p_kdl)
p = CartesianTrajectoryPoint()
p.pose.position.x = p_kdl.p[0]
p.pose.position.y = p_kdl.p[1]
p.pose.position.z = p_kdl.p[2]
q = PyKDL.Rotation.GetQuaternion(p_kdl.M)
p.pose.orientation.x = q[0]
p.pose.orientation.y = q[1]
p.pose.orientation.z = q[2]
p.pose.orientation.w = q[3]
return p
# Random 2-point trajectory
duration = 10
p1 = random_point()
p2 = random_point()
p1.time_from_start = rospy.Duration(0.5 * duration)
p2.time_from_start = rospy.Duration(duration)
goal = FollowCartesianTrajectoryGoal()
goal.trajectory.points.append(p1)
goal.trajectory.points.append(p2)
self.client.send_goal(goal)
self.client.wait_for_result()
return self.client.get_result()
def clean_shutdown(self, msg=None):
""" Cancel goal on Ctrl-C """
self.client.cancel_goal()
if msg is not None:
print(msg)
sys.exit(0)
if __name__ == '__main__':
try:
rospy.init_node('action_test_client')
client = Client()
signal.signal(signal.SIGINT, lambda sig, frame: client.clean_shutdown("\nGoal canceled."))
result = client.test()
print("Result: {}".format(result))
except rospy.ROSInterruptException:
pass
|
[
"scherzin@fzi.de"
] |
scherzin@fzi.de
|
b49df1cc1b4948c46ef6cec8398200ea89ae37fa
|
67048c855300ffc1fa192eee1da241d7f8e85682
|
/pizza.py
|
97cdb1c8caa56c039d6815b7062e06cb161ab1b7
|
[] |
no_license
|
JennymarBerroteran/Umbrella
|
07e4f286f46da749c04d59769bcef4cf763f9a95
|
c1271e156bf7657179e0f209353d77babe2a06ff
|
refs/heads/master
| 2020-09-08T08:25:09.823352
| 2019-11-24T22:27:57
| 2019-11-24T22:27:57
| 221,077,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
fav_pizza = ['pepperoni', 'cheeze', 'margarita']
for pizza in fav_pizza:
print(f'I like {pizza} pizza \n')
print('I really love pizza')
|
[
"noreply@github.com"
] |
noreply@github.com
|
88267b9d5edb8a48d3ceb3ce7f9c307f1a46e175
|
55965f592cb7e915cd68bd371ee1a6ad2a6e0247
|
/libmngmtsys.py
|
79288d746d1e8cdb428259f150297c49244931cb
|
[] |
no_license
|
Upasna4/Training
|
2b5b57fc3e5229304860f153db93d912a44472bf
|
33c6eeb565c422e40ea88d50af787f58b9f0da6d
|
refs/heads/master
| 2020-08-05T03:50:36.280910
| 2019-10-02T16:36:09
| 2019-10-02T16:36:09
| 212,383,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,733
|
py
|
memberData = {}
bookData = {}
borrowData = {}
m_id = 101
b_id = 201
print("Library Management System\n"
"1.Add Member\n"
"2.Add Book\n"
"3.Book Borrowing\n"
"4.Book Returning\n"
"5.Member Status\n"
"6.Book Status\n"
"7.Exit")
while True:
choice = int(input("Enter Choice: "))
if choice == 1:
print("Add Member Program")
loop1=True
while(loop1):
name = input("Member Name: ")
memberData.update({m_id: name}) #updates value of key and val
print("Member Added. Member id is: ", m_id)
m_id += 1 #incrementing value of m_id
while (True):
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
break
elif choice == 'n':
loop1 = False
break
else:
print("invalid choice")
loop1=False
continue
elif choice == 2:
print("Add Book Program")
while True:
name = input("Book Name: ")
qty = int(input("enter quantity"))
bookData.update({b_id: [name, qty]}) #dict ko update krna
print("Book Added. Book id is: ", b_id)
b_id += 1
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 3:
print("Book Borrowing Program")
while True:
m_id = int(input("Member id: "))
if m_id in memberData: #checks if member id in present in memberData dict
b_name = input("Book Name: ")
for b_id, b_name_qty in bookData.items(): #when we want both key and value
if b_name_qty[0] == b_name: #indexing is done coz we have a list here..at [0] we have name in list
if b_name_qty[1] > 0: #here we compare quantity as it is on 1st index..we see whether it is >0 or not
borrowData.update({m_id: b_id}) #update dict
bookData[b_id][1] -= 1 #decrement quantity of books
break
else:
print("Book out of stock")
else:
print("Book not present")
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 4:
print("Book Returning Program")
m_id = int(input("Member Id: "))
name = input("Book Name: ")
for b_id, b_name in borrowData.items():
if b_name == name:
bookData[b_id][1] += 1
borrowData.pop(m_id) #person is returning book so book will pop from borrowData dict
borrowData.update({m_id: b_id}) #dict is updated
break
else:
print("Book not present")
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 5:
print("Member Status Program")
m_id = int(input("Member Id: "))
if m_id in memberData: #to check mem status we check m_id is in memberData and borrowData or not
if m_id in borrowData: #if b_id is in borrowData then borrowData m se b_id nikalo
b_id = borrowData[m_id] #bid nikal ra h dict m se
print("Member Name: ", memberData[m_id]) #the value of this key is name
print("Allow Book Name: ", bookData[b_id][0]) #the val of this is bookname
elif choice == 6:
print("Book Status Program")
b_id = int(input("Book Id: "))
for m_id, m_name in memberData.items(): #valuefetch
if b_id in borrowData:
b_id = borrowData[m_id]
print("Member name:",memberData[m_id])
print("Book name:",bookData[b_id][0])
print("Book issue to user:", memberData[m_id])
elif choice == 7:
break
else:
print("invalid choice")
|
[
"upasnabhat17@gmail.com"
] |
upasnabhat17@gmail.com
|
c119687b11afe9b22fca389be33ff9b8a804cf22
|
9322c270beaf1019328bf14c836d167145d45946
|
/raoteh/sampler/tests/test_graph_transform.py
|
af315325cddb45fdc81619cf995488fd53736710
|
[] |
no_license
|
argriffing/raoteh
|
13d198665a7a3968aad8d41ddad12c08d36d57b4
|
cdc9cce8fdad0a79dbd90dfcdec6feece8fc931f
|
refs/heads/master
| 2021-01-22T19:41:25.828133
| 2014-03-10T22:25:48
| 2014-03-10T22:25:48
| 10,087,018
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,511
|
py
|
"""Test graph algorithms relevant to Rao-Teh sampling.
"""
from __future__ import division, print_function, absolute_import
import itertools
from collections import defaultdict
import networkx as nx
from numpy.testing import (run_module_suite, TestCase,
assert_equal, assert_allclose, assert_, assert_raises)
from raoteh.sampler._graph_transform import (
get_edge_bisected_graph,
get_node_to_state,
remove_redundant_nodes,
get_redundant_degree_two_nodes,
get_chunk_tree,
add_trajectories,
)
# This is an official itertools recipe.
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s)+1))
class TestGraphTransform(TestCase):
def test_get_edge_bisected_graph(self):
# Create an example from the networkx documentation.
G = nx.Graph()
G.add_weighted_edges_from([
(1, 2, 0.125),
(1, 3, 0.75),
(2, 4, 1.2),
(3, 4, 0.375)])
# Create a new graph by bisecting the edges of the old graph.
H = get_edge_bisected_graph(G)
# The edge-bisected graph has twice as many edges.
assert_equal(len(G.edges()) * 2, len(H.edges()))
assert_equal(G.size()*2, H.size())
# The sum of edge weights is unchanged.
assert_allclose(G.size(weight='weight'), H.size(weight='weight'))
# The node set of the edge-bisected graph includes that of the original.
assert_(set(G) <= set(H))
# The added nodes are each greater than each original node.
assert_(max(G) < min(set(H) - set(G)))
def test_get_chunk_tree(self):
# Define the original tree and its event nodes.
# This is taken from a doodle in my notebook,
# and it is not particularly cleverly chosen.
tree_edges = (
(0, 1),
(1, 2),
(3, 4),
(4, 2),
(2, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(7, 10),
(10, 11),
(11, 12),
(12, 13),
(13, 14),
(13, 15),
(15, 16),
(16, 17),
)
event_nodes = {1, 4, 5, 6, 8, 10, 11, 12, 15, 16}
# Create a tree by specifying the edges.
T = nx.Graph()
T.add_edges_from(tree_edges)
# Run tests, using all possible roots and also a default root.
potential_roots = list(T) + [None]
for root in potential_roots:
# Construct the chunk tree and its associated node maps.
results = get_chunk_tree(T, event_nodes)
chunk_tree, non_event_map, event_map = results
# The nodes pointed to by the non_event_map
# should be nodes in the chunk_tree.
assert_(set(non_event_map.values()) <= set(T))
# The output tree should have 11 nodes and 10 edges.
assert_equal(len(chunk_tree), 11)
assert_equal(len(chunk_tree.edges()), 10)
# The 8 non-event nodes should map to 7 unique chunk nodes.
assert_equal(len(non_event_map), 8)
assert_equal(len(set(non_event_map.values())), 7)
# The non-event nodes 13 and 14 should map to the same chunk.
assert_equal(non_event_map[13], non_event_map[14])
def test_remove_redundant_nodes_short_path(self):
# Define a short path with one redundant
# and one non-redundant internal node.
T = nx.Graph()
T.add_edge(0, 1, state=0, weight=1)
T.add_edge(1, 2, state=0, weight=1)
T.add_edge(2, 3, state=1, weight=1)
# Try removing a redundant node.
redundant_nodes = {1}
T_out = remove_redundant_nodes(T, redundant_nodes)
assert_equal(set(T_out), set(T) - redundant_nodes)
assert_equal(T_out[0][2]['weight'], 2)
# Fail at removing a non-redundant node.
redundant_nodes = {2}
assert_raises(
Exception,
remove_redundant_nodes,
T, redundant_nodes)
def test_remove_redundant_nodes_long_path(self):
# Define a path with multiple consecutive redundant internal nodes.
T = nx.Graph()
T.add_edge(0, 1, state=0, weight=1.1)
T.add_edge(1, 2, state=0, weight=1.2)
T.add_edge(2, 3, state=1, weight=1.3)
T.add_edge(3, 4, state=1, weight=1.4)
T.add_edge(4, 5, state=1, weight=1.5)
T.add_edge(5, 6, state=1, weight=1.6)
T.add_edge(6, 7, state=1, weight=1.7)
# Get the original weighted size.
# This is the sum of weights of all edges.
original_size = T.size(weight='weight')
# Check the set of redundant nodes.
all_redundant_nodes = {1, 3, 4, 5, 6}
obs_nodes = get_redundant_degree_two_nodes(T)
assert_equal(all_redundant_nodes, obs_nodes)
# Try removing all valid combinations of redundant nodes.
for redundant_node_tuple in powerset(all_redundant_nodes):
redundant_nodes = set(redundant_node_tuple)
T_out = remove_redundant_nodes(T, redundant_nodes)
assert_equal(set(T_out), set(T) - redundant_nodes)
assert_allclose(T_out.size(weight='weight'), original_size)
def test_remove_redundant_nodes_small_tree(self):
# Define a short path with one redundant
# and one non-redundant internal node.
T = nx.Graph()
T.add_edge(0, 1, state=0, weight=1)
T.add_edge(0, 2, state=0, weight=1)
T.add_edge(0, 3, state=0, weight=1)
# None of the nodes are considered redundant in the current
# implementation, because each node is of degree 1 or 3.
for redundant_nodes in ({0}, {1}, {2}, {3}):
assert_raises(
Exception,
remove_redundant_nodes,
T, redundant_nodes)
def test_remove_redundant_nodes_medium_tree(self):
# Define a tree.
T = nx.Graph()
T.add_edge(0, 10, state=0, weight=1.1)
T.add_edge(0, 20, state=0, weight=1.2)
T.add_edge(0, 30, state=0, weight=1.3)
T.add_edge(20, 21, state=0, weight=1.4)
T.add_edge(30, 31, state=0, weight=1.5)
T.add_edge(31, 32, state=0, weight=1.6)
# Get the original weighted size.
# This is the sum of weights of all edges.
original_size = T.size(weight='weight')
# Try removing all valid combinations of redundant nodes.
for redundant_node_tuple in powerset((20, 30, 31)):
redundant_nodes = set(redundant_node_tuple)
T_out = remove_redundant_nodes(T, redundant_nodes)
assert_equal(set(T_out), set(T) - redundant_nodes)
assert_allclose(T_out.size(weight='weight'), original_size)
class TestAddTrajectories(TestCase):
def test_compatible_trees(self):
T_base = nx.Graph()
T_base.add_edge(0, 1, weight=0.1)
T_base.add_edge(0, 2, weight=0.1)
T_base.add_edge(0, 3, weight=0.1)
T_traj = nx.Graph()
T_traj.add_edge(0, 1, state=0, weight=0.1)
T_traj.add_edge(0, 20, state=0, weight=0.05)
T_traj.add_edge(20, 2, state=0, weight=0.05)
T_traj.add_edge(0, 3, state=0, weight=0.1)
root = 0
T_merged, dummy_nodes = add_trajectories(T_base, root, [T_traj])
# There should not be any dummy nodes.
assert_equal(dummy_nodes, set())
# The merged tree should have four edges.
assert_equal(T_base.size(), 3)
assert_equal(T_merged.size(), 4)
# The total weight of the merged tree
# should be the same as the total weight of the base tree.
assert_allclose(
T_merged.size(weight='weight'),
T_base.size(weight='weight'))
def test_incompatible_trees(self):
T_base = nx.Graph()
T_base.add_edge(0, 1, weight=0.1)
T_base.add_edge(0, 2, weight=0.1)
T_base.add_edge(0, 3, weight=0.1)
root = 0
# Define a trajectory that is bad because it adds a high degree node.
traj = nx.Graph()
traj.add_edge(0, 4, state=0, weight=0.1)
traj.add_edge(4, 20, state=0, weight=0.05)
traj.add_edge(20, 2, state=0, weight=0.05)
traj.add_edge(4, 3, state=0, weight=0.1)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
# Define a trajectory that is bad because it adds a leaf node.
traj = nx.Graph()
traj.add_edge(0, 1, state=0, weight=0.1)
traj.add_edge(0, 20, state=0, weight=0.05)
traj.add_edge(20, 2, state=0, weight=0.05)
traj.add_edge(0, 3, state=0, weight=0.05)
traj.add_edge(3, 4, state=0, weight=0.05)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
# Define a trajectory that is bad
# because it flips around the nodes in a way that is incompatible
# with the original tree topology.
traj = nx.Graph()
traj.add_edge(1, 0, state=0, weight=0.1)
traj.add_edge(1, 2, state=0, weight=0.1)
traj.add_edge(1, 3, state=0, weight=0.1)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
def test_complicated_incompatible_trees(self):
T_base = nx.Graph()
T_base.add_edge(0, 1, weight=0.1)
T_base.add_edge(0, 2, weight=0.1)
T_base.add_edge(0, 3, weight=0.1)
T_base.add_edge(3, 4, weight=0.1)
T_base.add_edge(3, 5, weight=0.1)
root = 0
# Define a trajectory that is bad
# because the topology is different in a way that cannot be detected
# by checking the degrees of the nodes.
traj = nx.Graph()
traj.add_edge(3, 1, state=0, weight=0.1)
traj.add_edge(3, 2, state=0, weight=0.1)
traj.add_edge(3, 0, state=0, weight=0.1)
traj.add_edge(0, 4, state=0, weight=0.1)
traj.add_edge(0, 5, state=0, weight=0.1)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
def test_edge_to_event_times(self):
# The merged tree will look like the following,
# where 'x' is a node in the original tree,
# and 'a' is a node introduced by trajectory merging,
# and 'o' is an event node.
#
# x
# /|\
# / | \
# | | |
# o o x
# | | |
# x | | (0, 0)
# x |
# x
# /| (0, 0)
# / a
# / | (0, 10)
# | a
# x | (5, 10)
# a
# | (5, 0)
# o
# | (5, 0)
# a
# | (0, 0)
# x
#
T = nx.Graph()
T.add_edge(0, 1, weight=0.1)
T.add_edge(0, 2, weight=0.1)
T.add_edge(0, 3, weight=0.1)
T.add_edge(3, 4, weight=0.1)
T.add_edge(3, 5, weight=0.1)
T.add_edge(4, 6, weight=0.1)
root = 0
# Define a trajectory with an extra segment along one edge.
traj_a = nx.Graph()
traj_a.add_edge(0, 1, weight=0.1, state=0)
traj_a.add_edge(0, 2, weight=0.1, state=0)
traj_a.add_edge(0, 3, weight=0.1, state=0)
traj_a.add_edge(3, 4, weight=0.1, state=0)
traj_a.add_edge(3, 5, weight=0.1, state=0)
traj_a.add_edge(4, 10, weight=0.025, state=0)
traj_a.add_edge(10, 11, weight=0.05, state=5)
traj_a.add_edge(11, 6, weight=0.025, state=0)
# Define a trajectory with an interleaving segment.
traj_b = nx.Graph()
traj_b.add_edge(0, 1, weight=0.1, state=0)
traj_b.add_edge(0, 2, weight=0.1, state=0)
traj_b.add_edge(0, 3, weight=0.1, state=0)
traj_b.add_edge(3, 4, weight=0.1, state=0)
traj_b.add_edge(3, 5, weight=0.1, state=0)
traj_b.add_edge(4, 20, weight=0.02, state=0)
traj_b.add_edge(20, 21, weight=0.02, state=10)
traj_b.add_edge(21, 6, weight=0.06, state=0)
# Define a few event times along directed edges,
# where the edge direction radiates away from the root.
edge_to_event_times = {
(0, 1) : {0.06},
(0, 2) : {0.02},
(4, 6) : {0.045},
}
# Construct the merged tree.
T_merged, event_nodes = add_trajectories(
T, root,
[traj_a, traj_b],
edge_to_event_times=edge_to_event_times)
# After this point are some tests.
# Check the total number of nodes in the merged tree.
assert_equal(len(T_merged.edges()), 13)
# Check the multiset of edge state pairs in the merged tree.
state_pair_to_count = defaultdict(int)
for edge in nx.bfs_edges(T_merged, root):
na, nb = edge
states = T_merged[na][nb]['states']
state_pair = tuple(states)
assert_equal(len(state_pair), 2)
state_pair_to_count[state_pair] += 1
assert_equal(state_pair_to_count[(0, 10)], 1)
assert_equal(state_pair_to_count[(5, 10)], 1)
assert_equal(state_pair_to_count[(5, 0)], 2)
expected_state_pairs = set([(0, 0), (0, 10), (5, 10), (5, 0)])
assert_equal(set(state_pair_to_count), expected_state_pairs)
# Check that the number of event nodes is correct.
assert_equal(len(edge_to_event_times), len(event_nodes))
# The merged tree must contain all of the nodes of the original tree.
missing_nodes = set(T) - set(T_merged)
assert_equal(missing_nodes, set())
# The base tree, the two trajectories, and the merged tree
# should all have the same weighted size.
weighted_size = T.size(weight='weight')
assert_allclose(traj_a.size(weight='weight'), weighted_size)
assert_allclose(traj_b.size(weight='weight'), weighted_size)
assert_allclose(T_merged.size(weight='weight'), weighted_size)
# Each event node must be adjacent to exactly two edges
# in the merged tree, and both of these edges
# must be annotated with the same sequence of state values.
for node in event_nodes:
assert_equal(T_merged.degree(node), 2)
na, nb = T_merged[node]
na_states = T_merged[node][na]['states']
nb_states = T_merged[node][nb]['states']
assert_equal(na_states, nb_states)
# Print the edges of the merged tree.
"""
print()
print('--- add_trajectories test output ---')
print(event_nodes)
for edge in nx.bfs_edges(T_merged, root):
na, nb = edge
weight = T_merged[na][nb]['weight']
states = T_merged[na][nb]['states']
print(na, nb, weight, states)
print()
"""
"""
0 8 0.02 [0, 0]
0 3 0.1 [0, 0]
0 7 0.06 [0, 0]
8 2 0.08 [0, 0]
3 4 0.1 [0, 0]
3 5 0.1 [0, 0]
7 1 0.04 [0, 0]
4 9 0.02 [0, 0]
9 10 0.005 [0, 10]
10 11 0.015 [5, 10]
11 12 0.005 [5, 0]
12 13 0.03 [5, 0]
13 6 0.025 [0, 0]
"""
class TestGetNodeToState(TestCase):
def test_get_node_to_state_simple_tree_identical_states(self):
T = nx.Graph()
T.add_edge(0, 1, state=42)
T.add_edge(1, 2, state=42)
all_query_nodes = {0, 1, 2}
for query_nodes in powerset(all_query_nodes):
nnodes = len(query_nodes)
node_to_state = get_node_to_state(T, query_nodes)
assert_equal(set(node_to_state), set(query_nodes))
assert_equal(set(node_to_state.values()), set([42]*nnodes))
def test_get_node_to_state_simple_tree_different_states(self):
T = nx.Graph()
T.add_edge(0, 1, state=42)
T.add_edge(1, 2, state=42)
T.add_edge(2, 3, state=99)
# Some of the nodes have defined states.
query_nodes = {0, 1, 3}
node_to_state = get_node_to_state(T, query_nodes)
assert_equal(node_to_state, {0:42, 1:42, 3:99})
# But node 2 does not have a defined state
# because it represents a state transition.
query_nodes = {0, 1, 2, 3}
assert_raises(ValueError, get_node_to_state, T, query_nodes)
def test_complicated_tree(self):
T = nx.Graph()
T.add_edge(0, 1, state=2)
T.add_edge(0, 2, state=2)
T.add_edge(0, 3, state=2)
T.add_edge(3, 4, state=10)
T.add_edge(4, 5, state=10)
T.add_edge(4, 6, state=10)
# Most of the nodes have defined states.
query_nodes = {0, 1, 2, 4, 5, 6}
expected_node_to_state = {0:2, 1:2, 2:2, 4:10, 5:10, 6:10}
node_to_state = get_node_to_state(T, query_nodes)
assert_equal(node_to_state, expected_node_to_state)
# One of the nodes is a transition without a defined state.
query_nodes = {0, 1, 2, 3, 4, 5, 6}
assert_raises(ValueError, get_node_to_state, T, query_nodes)
if __name__ == '__main__':
run_module_suite()
|
[
"argriffi@ncsu.edu"
] |
argriffi@ncsu.edu
|
723e222cf53b147363a4bf4988a03938b087787c
|
ee341e95e484a8594f9942c920bf739b11b70658
|
/Job1/MapReduce/mapper.py
|
8be43816fbde16505668fa1db7d9d4d617c8bf12
|
[] |
no_license
|
FedericoCialini/Progetto1BigDataRomaTre
|
acbd7ac78c10448197abb85a59ccc6c73fb03477
|
84313d1d4613763c1276f33186eadbad950ddb19
|
refs/heads/master
| 2022-09-09T13:10:04.974717
| 2020-05-18T20:57:47
| 2020-05-18T20:57:47
| 258,604,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
#!/home/federico/anaconda3/bin/python
import sys
def mapping():
lines = sys.stdin.readlines()
prices = lines[1:]
for line in prices:
Ticker, OpenValue, CloseValue, Adj_close, LowThe, HighThe, Volume, Date = line.strip().split(",")
year = Date.split("-")[0]
if year >= '2008':
print(Ticker, CloseValue, Volume, Date, sep='\t')
if __name__ == '__main__':
mapping()
|
[
"federicocialini@gmail.com"
] |
federicocialini@gmail.com
|
50ff4a273d3f0c6af02555598bfcf6c504af52f4
|
931221727641ed3e56e9a30b13e3c15055722a85
|
/btre/listings/migrations/0003_auto_20190409_2240.py
|
2e5fba8bea5cace42d1efa490a847837222ef1bf
|
[] |
no_license
|
gowthamseenu/ML_Shopping
|
c4ed9db5db2cf85477add6be6f16b6186e92d004
|
099f9c45df4398647610f5dbf44abce91ac8b562
|
refs/heads/master
| 2021-09-08T20:08:51.768035
| 2021-08-30T09:53:07
| 2021-08-30T09:53:07
| 239,966,290
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
# Generated by Django 2.2 on 2019-04-09 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('listings', '0002_auto_20190409_2235'),
]
operations = [
migrations.AlterField(
model_name='product',
name='brand',
field=models.CharField(choices=[('dell', 'dell'), ('apple', 'apple'), ('onepluse', 'onepluse'), ('hp', 'hp')], max_length=200),
),
migrations.AlterField(
model_name='product',
name='display_size',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=2),
),
migrations.AlterField(
model_name='product',
name='processor_spped',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='product',
name='storage',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='product',
name='sub_type',
field=models.CharField(choices=[('Electronic_product', 'Electronic_product'), ('accessories', 'accessories')], max_length=100),
),
migrations.AlterField(
model_name='product',
name='weight',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=6),
),
]
|
[
"gowthamseenu@biztechnosys.com"
] |
gowthamseenu@biztechnosys.com
|
45bd5115c7a3405823961182633a568318a1d2ef
|
7234e6c72eb3f09c4a66dbe91f00fdf7742f010f
|
/algo/arrays/binarysearch/shiftedBinarySearch.py
|
fc901758206f1662bac912102f0b1b7740f4186f
|
[] |
no_license
|
srinathalla/python
|
718ac603473e7bed060ba66aa3d39a90cf7ef69d
|
b6c546070b1738350303df3939888d1b0e90e89b
|
refs/heads/master
| 2021-06-13T06:11:42.653311
| 2021-02-19T06:01:41
| 2021-02-19T06:01:41
| 150,374,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
#
# T.C : O(logn) S.C : O(1)
# #
def shiftedBinarySearch(array, target):
l = 0
r = len(array)-1
while l < r:
m = (l + r)//2
if array[m] == target:
return m
elif array[m] < array[r]:
if array[m] < target and target <= array[r]:
l = m + 1
else:
r = m - 1
elif array[m] > array[r]:
if array[l] <= target and target < array[m]:
r = m - 1
else:
l = m + 1
return l if array[l] == target else -1
print(shiftedBinarySearch([5, 23, 111, 1], 111))
print(shiftedBinarySearch([45, 61, 71, 72, 73, 0, 1, 21, 33, 45], 33))
|
[
"srinathb10j.ik@gmail.com"
] |
srinathb10j.ik@gmail.com
|
87015919007428f2852be00dba827a3230d85010
|
3abb60c61f1e5aba68620d4c5f9e81700100bbf5
|
/model/SVM/test.py
|
d63d07628926294ee8999f8053402735843f191e
|
[
"Apache-2.0"
] |
permissive
|
chan8616/PoAI
|
249ff39e49b781c9142ea5da5265dd0479c0a7b6
|
9bc4b69f434c8be4215f483cefbf2bd171803219
|
refs/heads/master
| 2023-02-04T17:00:42.750265
| 2020-12-16T08:25:06
| 2020-12-16T08:25:06
| 141,091,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pandas as pd
import pickle
import numpy as np
import matplotlib.pyplot as plt
import time
import os
def test(config):
data_path = config.data_path
save_directory = config.save_directory
save_figure = config.save_figure
pretrained_file_path = config.pretrained_file_path
data = pd.read_csv(data_path)
x_columns = config.x_columns
x_columns = x_columns.split(',')
X = data[x_columns]
y_column = config.y_column
Y = data[y_column]
X_test = X
Y_test = Y
model = pickle.load(open(pretrained_file_path, 'rb'))
print("load pretrained model")
y_test_predict = model.predict(X_test)
acc = accuracy_score(Y_test, y_test_predict)
print("The model performance for testing set")
print("--------------------------------------")
print('accuracy score is {}'.format(acc))
if save_figure is True:
X_test_a = np.array(X_test)
h = .02 # step size in the mesh
x_min, x_max = X_test_a[:, 0].min() - 1, X_test_a[:, 0].max() + 1
y_min, y_max = X_test_a[:, 1].min() - 1, X_test_a[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X_test_a[:, 0], X_test_a[:, 1], c=Y_test, cmap=plt.cm.Paired, edgecolors='k')
plt.title('classification result')
plt.axis('tight')
time_stamp = time.strftime("%Y%m%d_%H%M%S", time.localtime((time.time())))[2:]
file_name = 'svm_model_' + time_stamp + '.png'
plt.savefig(os.path.join(save_directory, file_name))
|
[
"dudgus1727@postech.ac.kr"
] |
dudgus1727@postech.ac.kr
|
7b5a81f5531be906c6c75c6ea6ee45ae41407e10
|
188950fb7b1fce4840b41e1e9454f0133a8d75ce
|
/src/Server/Controller/guess_controller.py
|
a2518f5c1fdefce113aeaec0371319b7b16a82fa
|
[] |
no_license
|
cloew/WordGuessAngular
|
3f5c6a1e0e14f6e905ec78a618b606ff3cb3e798
|
0d889cd3bb9cafe35a6e7e2ccba97914a26825b9
|
refs/heads/master
| 2021-01-01T05:53:26.776161
| 2014-09-01T14:55:39
| 2014-09-01T14:55:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
from Server.game_wrapper import GameWrapper
from kao_flask.controllers.json_controller import JSONController
class GuessController(JSONController):
""" Controller to allow a player to guess the word for the current Round """
def performWithJSON(self, gameId):
game = GameWrapper(id=gameId)
results = game.guess(self.json['guesses'])
return game.toJSON()
|
[
"cloew123@gmail.com"
] |
cloew123@gmail.com
|
23e10462cf68f0d4848893ca60ea2362f183a88f
|
16da6040330dd1e8f88478b31e958dba88d96cbf
|
/ddpg_agent.py
|
46bb0ccd5e835eb29f8695de83ac4e17026128ab
|
[] |
no_license
|
vgudapati/DRLND_Continuous_Control
|
0c5c5098a167b44f0f2a1f957ab3080e28e55265
|
e55f5df74d4489821b322754570a26e552a2da59
|
refs/heads/master
| 2020-04-16T01:52:14.500904
| 2019-01-12T01:54:21
| 2019-01-12T01:54:21
| 165,188,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,194
|
py
|
import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
'''
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-4 # learning rate of the actor
LR_CRITIC = 1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
'''
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class DDPGAgent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size = 4, random_seed = 0,
BUFFER_SIZE = int(1e5),
BATCH_SIZE = 128,
GAMMA = 0.99,
TAU = 1e-3,
LR_ACTOR = 1e-4,
LR_CRITIC = 1e-3,
WEIGHT_DECAY = 0):
"""
Initialize an Agent object.
Params
======
state_size (int) : dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
BUFFER_SIZE (int): replay buffer size
BATCH_SIZE (int): minibatch size
GAMMA (float): discount factor
TAU (float): for soft update of target parameters
LR_ACTOR (float): learning rate for critic
LR_CRITIC (float): learning rate for critic
WEIGHT_DECAY (float): L2 weight decay
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.batch_size = BATCH_SIZE
self.gamma = GAMMA
self.tau = TAU
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Time step
self.timestep = 0
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def step(self, state, action, reward, next_state, done):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
for s, a, r, ns, d in zip(state, action, reward, next_state, done):
self.memory.add(s, a, r, ns, d)
#self.memory.add(state, action, reward, next_state, done)
'''
self.timestep = (self.timestep + 1) % 2
# Learn every 2 time steps
if self.timestep == 0:
# if enough samples are available in memory
'''
if len(self.memory) > self.batch_size:
experiences = self.memory.sample()
self.learn(experiences, self.gamma)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
#print(experiences)
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_local(next_states) ## --------------------------------------------------
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
#print(rewards.shape)
#print(Q_targets_next.shape)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
torch.nn.utils.clip_grad_norm_(self.actor_local.parameters(), 1)
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, self.tau)
self.soft_update(self.actor_local, self.actor_target, self.tau)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.size = size
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.size)
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.