blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71ebe5d419fded0ab53a1753b8fc2126ccbcff49
|
7c1badc28768370bca5782e3d540984b80aac3ed
|
/newblogs/newblogs/mysqlstore.py
|
9ba20e6ae649b795ac1dd705f0b095d0c9c77fe2
|
[
"Apache-2.0"
] |
permissive
|
luckyTang168/spider_study
|
1b66f712c57e3ddf97d1e08bf5d375f1f79ce469
|
d0534c28f784f553924137d98c60c8a0198f8c8d
|
refs/heads/master
| 2021-05-29T22:30:14.525296
| 2015-04-27T16:27:54
| 2015-04-27T16:27:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
# -*- coding: utf-8 -*-
# __author__ = 'gavin'
import MySQLdb
import sys
def save_blog(blog):
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',port=3306)
cur = conn.cursor()
conn.select_db('newblogs')
# cur.execute('SET NAMES \'utf8\';')
command = 'insert into blog(id,title,url,content,time) value(%s,%s,%s,%s,%s)'
values = [long(blog['id'][0]),blog['title'][0].encode('utf-8'),blog['url'][0],blog['content'][0].encode('utf-8'),blog['time'][0]]
cur.execute(command, values)
conn.commit()
except MySQLdb.Error,e:
with open('error.txt','a') as f:
f.write(str(e.args[1])+'\n')
print "Mysql Error %s" % (e.args[0])
else:
cur.close()
conn.close()
def is_exist(id):
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',port=3306)
cur = conn.cursor()
conn.select_db('newblogs')
command = 'select count(*) from blog where id=%s'
values=[long(id)]
cur.execute(command, values)
result = cur.fetchone()
conn.commit()
if result[0] > 0:
return True
else:
return False
except MySQLdb.Error,e:
with open('error.txt','a') as f:
f.write(str(e.args[0])+'\n')
print "Mysql Error %s" % (e.args[0])
else:
cur.close()
conn.close()
def get_all():
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',port=3306)
cur = conn.cursor()
conn.select_db('newblogs')
command = 'select title from blog ORDER BY time desc'
cur.execute(command)
result = cur.fetchall()
conn.commit()
for row in result:
with open('test.txt', 'a') as f:
f.write(row[0]+'\n')
# print row[0]
except MySQLdb.Error,e:
with open('error.txt','a') as f:
f.write(str(e.args[0])+'\n')
print "Mysql Error %s" % (e.args[0])
else:
cur.close()
conn.close()
if __name__ == "__main__":
get_all()
|
[
"gavin42333@hotmail.com"
] |
gavin42333@hotmail.com
|
bd9d88f2c4b2d02cd056f7cd917cf2dad1473002
|
ae83eeae91ef5cc55f48e1c30bedda8c96f5daec
|
/kaynak/kod3.py
|
03dcc6096eb8288664a03d6055e7b8bb8f473cfd
|
[
"MIT"
] |
permissive
|
MimVavKaf/pgn2gif
|
967d0397b8e6e6be8e0c3b1c2ed81306127fea0b
|
d2b374ea1e918a3c4fa3b550dc32b6bae1e71c6c
|
refs/heads/master
| 2021-01-10T16:24:15.584867
| 2016-03-15T12:16:32
| 2016-03-15T12:16:32
| 53,724,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
#!/usr/bin/python
from PIL import Image, ImageDraw, ImageFont, ImageSequence
from images2gif import writeGif
import os, subprocess, sys, time
FRAMES = 12
FRAME_DELAY = 0.75
WIDTH, HEIGHT = 650, 300
PIE_POS = (WIDTH-50,10, WIDTH-10,50)
FONT = ImageFont.truetype('/usr/share/fonts/truetype/liberation/LiberationMono-Regular.ttf', 12)
def make_frame(txt, count, font=FONT):
image = Image.new("RGBA", (WIDTH, HEIGHT), (255,255,255))
draw = ImageDraw.Draw(image)
fontsize = font.getsize('')[1]
for row, line in enumerate(txt.split('\n')):
draw.text((5, fontsize * row), line, (0,0,0), font=font)
draw.pieslice(PIE_POS, 0, 360, (255,255,204))
draw.pieslice(PIE_POS, 0, int(360.0/FRAMES*(1+count)), (0,128,0))
return image
frames = []
for count in range(FRAMES):
txt = subprocess.Popen('top -c -n 1 -b'.split(), stdout=subprocess.PIPE).stdout.read()
frames.append(make_frame(txt, count))
time.sleep(FRAME_DELAY)
writeGif("topmovie.gif", frames, duration=FRAME_DELAY, repeat=True, dither=False, nq=0, subRectangles=True, dispose=None)
#loops=10, dither=0)
|
[
"m.vahit.kapar@gmail.com"
] |
m.vahit.kapar@gmail.com
|
74be05a906bcc6451b5e7fabb397130bb0a870f7
|
78e336c290ce3d5e5b7ad31694fe4af9092fc562
|
/enfermeras.py
|
ed07f6dd4edb6a726509ccd6bf3a03c0875c6e5c
|
[] |
no_license
|
Ivvnh/BackendPr2
|
7478e5672b65ce247eae0d38ca51fda8a552f2cb
|
e3f532263115b7b16c37718a31191de6f353944c
|
refs/heads/main
| 2023-04-15T18:25:44.895932
| 2021-05-02T04:04:07
| 2021-05-02T04:04:07
| 359,986,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
class enfermeras:
def __init__(self,nombre,apellido,nacimiento,sexo,usuario,contraseña,telefono):
self.nombre = nombre
self.apellido = apellido
self.nacimiento = nacimiento
self.sexo = sexo
self.usuario = usuario
self.contraseña = contraseña
self.telefono = telefono
def getNombre(self):
return self.nombre
def getApellido(self):
return self.apellido
def getNacimiento(self):
return self.nacimiento
def getSexo(self):
return self.sexo
def getUsuario(self):
return self.usuario
def getContraseña(self):
return self.contraseña
def getTelefono(self):
return self.telefono
def setNombre(self, nombre):
self.nombre = nombre
def setApellido(self, apellido):
self.apellido = apellido
def setNacimiento(self,nacimiento):
self.nacimiento = nacimiento
def setSexo(self,sexo):
self.sexo = sexo
def setUsuario(self,usuario):
self.usuario = usuario
def setContraseña(self,contraseña):
self.contraseña = contraseña
def setTelefono(self,telefono):
self.telefono = telefono
|
[
"noreply@github.com"
] |
Ivvnh.noreply@github.com
|
589b652b926f92d384451364fae341e07839509c
|
9ad5d8450027f53f05ea1ec90b7bc2673168ff76
|
/src/decimalConversion.py
|
ebcbdacac574ff4f972ac9aa00196cb16c49a02d
|
[] |
no_license
|
LordNewt/romanNumeralTest
|
615edaa8f7960a3e5a6be50e4a360eb9e19a48ce
|
7f52e4879fe88d9d0f573c8973e67fc9862e2735
|
refs/heads/master
| 2020-04-02T05:25:08.824089
| 2016-07-29T03:09:06
| 2016-07-29T03:09:06
| 64,433,613
| 0
| 0
| null | 2016-07-29T03:09:07
| 2016-07-28T23:00:03
| null |
UTF-8
|
Python
| false
| false
| 3,927
|
py
|
class DecimalConversion():
decimalValues = {
'M': { 'value': 1000, 'repeats': 3, 'reductor': 'C' },
'D': { 'value': 500, 'repeats': 0, 'reductor': 'C' },
'C': { 'value': 100, 'repeats': 3, 'reductor': 'X' },
'L': { 'value': 50, 'repeats': 0, 'reductor': 'X' },
'X': { 'value': 10, 'repeats': 3, 'reductor': 'I' },
'V': { 'value': 5, 'repeats': 0, 'reductor': 'I' },
'I': { 'value': 1, 'repeats': 3 }
}
#
# Convenience method for checking for a valid numeral
#
def isValidNumeral(self, numeral):
if not numeral in self.decimalValues:
# Unknown letter - print it and return False
print('Invalid character: {0}'.format(numeral))
return False
return True
def toDecimalValue(self, input):
# Break the input into a character list that we can parse through
input_list = list(input.upper())
# Set the output value, the decimal value of the numerals, to zero
total = 0
# Set the initial "read" position in the character list to zero
position = 0
# A numeral can't be legal if it has more than 3 of the same character
# in a row, so keep track of how many have been done
repeats = 0
last_numeral = ''
# Iterate over the numerals in the list until no more remain
while position < len(input_list):
# Check validity of the numeral
numeral = input_list[position]
if not self.isValidNumeral(numeral):
return -1
# Make sure the maximum iteration limit hasn't been exceeded
if numeral == last_numeral:
repeats += 1
if repeats >= self.decimalValues[numeral]['repeats']:
print('Too many of {0} numeral in a row, invalid'.format(numeral))
return -1
else:
repeats = 0
last_numeral = numeral
# Next check if this is a "reduce by" numeral (like the I in IV)
if position < len(input_list)-1:
next_numeral = input_list[(position+1)]
if not self.isValidNumeral(next_numeral):
return -1
if self.decimalValues[numeral]['value'] < self.decimalValues[next_numeral]['value']:
# It IS a "reduce by" numeral. Check if it's a valid option
if numeral != self.decimalValues[next_numeral]['reductor']:
print('Invalid reductor numeral {0} for {1}'.format(numeral, next_numeral))
return -1
if repeats:
print('Illegal format, cannot repeat reduction digits')
return -1
if position < len(input_list)-2:
# A little more complex check: having two VALID "reduce by" numerals
# in a row is still invalid (i.e. IXC)
two_ahead_numeral = input_list[(position+2)]
if not self.isValidNumeral(next_numeral):
return -1
if self.decimalValues[next_numeral]['value'] < self.decimalValues[two_ahead_numeral]['value']:
print('Illegal format, cannot have two reduce-by digits in a row')
return -1
total -= self.decimalValues[numeral]['value']
# Then move forward to the next numeral, since this one is done
position += 1
numeral = input_list[position]
# Add the value of the numeral to the total
total += self.decimalValues[numeral]['value']
# Always end an iteration by incrementing position
position += 1
# Return the total
return total
|
[
"sschmit@gmail.com"
] |
sschmit@gmail.com
|
aea29e49c0f2a470d0ba6f4f9b9b6f855fcff52b
|
648a6e11bea6a15bd677ecf152592edd71203734
|
/Lab9/task7/task7_images/task7_background.py
|
463225397c9b59f3a2857c760735db9d25e7d2de
|
[] |
no_license
|
paulkirwan6/AllLabs
|
70975233c2a545e0acf116f9326bc1153fd4d497
|
5c660eb352e61047519e87deaa5a3f0a0155785e
|
refs/heads/master
| 2020-09-20T12:13:17.593871
| 2019-11-27T18:59:30
| 2019-11-27T18:59:30
| 224,473,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,179
|
py
|
# ROSHAN GEORGE 2
# Copyright (c) 2017 Andrey Voroshilov
# Modifications made by Joseph Lemley for use in EE5116 MDT lab. 25/10/2017
import os
import tensorflow as tf
import numpy as np
import scipy.io
import time
import sys
import csv
from PIL import Image
def imread_resize(path):
img_orig = scipy.misc.imread(path)
img = scipy.misc.imresize(img_orig, (227, 227)).astype(np.float)
if len(img.shape) == 2:
# grayscale
img = np.dstack((img, img, img))
return img, img_orig.shape
def imsave(path, img):
img = np.clip(img, 0, 255).astype(np.uint8)
Image.fromarray(img).save(path, quality=95)
def get_dtype_np():
return np.float32
def get_dtype_tf():
return tf.float32
# SqueezeNet v1.1 (signature pool 1/3/5)
########################################
def load_net(data_path):
if not os.path.isfile(data_path):
parser.error("Network %s does not exist. (Did you forget to download it?)" % data_path)
weights_raw = scipy.io.loadmat(data_path)
# Converting to needed type
conv_time = time.time()
weights = {}
for name in weights_raw:
weights[name] = []
# skipping '__version__', '__header__', '__globals__'
if name[0:2] != '__':
kernels, bias = weights_raw[name][0]
weights[name].append(kernels.astype(get_dtype_np()))
weights[name].append(bias.astype(get_dtype_np()))
print("Converted network data(%s): %fs" % (get_dtype_np(), time.time() - conv_time))
mean_pixel = np.array([104.006, 116.669, 122.679], dtype=get_dtype_np())
return weights, mean_pixel
def preprocess(image, mean_pixel):
swap_img = np.array(image)
img_out = np.array(swap_img)
img_out[:, :, 0] = swap_img[:, :, 2]
img_out[:, :, 2] = swap_img[:, :, 0]
return img_out - mean_pixel
def unprocess(image, mean_pixel):
swap_img = np.array(image + mean_pixel)
img_out = np.array(swap_img)
img_out[:, :, 0] = swap_img[:, :, 2]
img_out[:, :, 2] = swap_img[:, :, 0]
return img_out
def get_weights_biases(preloaded, layer_name):
weights, biases = preloaded[layer_name]
biases = biases.reshape(-1)
return (weights, biases)
def fire_cluster(net, x, preloaded, cluster_name):
# central - squeeze
layer_name = cluster_name + '/squeeze1x1'
weights, biases = get_weights_biases(preloaded, layer_name)
x = _conv_layer(net, layer_name + '_conv', x, weights, biases, padding='VALID')
x = _act_layer(net, layer_name + '_actv', x)
# left - expand 1x1
layer_name = cluster_name + '/expand1x1'
weights, biases = get_weights_biases(preloaded, layer_name)
x_l = _conv_layer(net, layer_name + '_conv', x, weights, biases, padding='VALID')
x_l = _act_layer(net, layer_name + '_actv', x_l)
# right - expand 3x3
layer_name = cluster_name + '/expand3x3'
weights, biases = get_weights_biases(preloaded, layer_name)
x_r = _conv_layer(net, layer_name + '_conv', x, weights, biases, padding='SAME')
x_r = _act_layer(net, layer_name + '_actv', x_r)
# concatenate expand 1x1 (left) and expand 3x3 (right)
x = tf.concat([x_l, x_r], 3)
net[cluster_name + '/concat_conc'] = x
return x
def net_preloaded(preloaded, input_image, pooling, needs_classifier=False, keep_prob=None):
net = {}
cr_time = time.time()
x = tf.cast(input_image, get_dtype_tf())
# Feature extractor
#####################
# conv1 cluster
layer_name = 'conv1'
weights, biases = get_weights_biases(preloaded, layer_name)
x = _conv_layer(net, layer_name + '_conv', x, weights, biases, padding='VALID', stride=(2, 2))
x = _act_layer(net, layer_name + '_actv', x)
x = _pool_layer(net, 'pool1_pool', x, pooling, size=(3, 3), stride=(2, 2), padding='VALID')
# fire2 + fire3 clusters
x = fire_cluster(net, x, preloaded, cluster_name='fire2')
fire2_bypass = x
x = fire_cluster(net, x, preloaded, cluster_name='fire3')
x = _pool_layer(net, 'pool3_pool', x, pooling, size=(3, 3), stride=(2, 2), padding='VALID')
# fire4 + fire5 clusters
x = fire_cluster(net, x, preloaded, cluster_name='fire4')
fire4_bypass = x
x = fire_cluster(net, x, preloaded, cluster_name='fire5')
x = _pool_layer(net, 'pool5_pool', x, pooling, size=(3, 3), stride=(2, 2), padding='VALID')
# remainder (no pooling)
x = fire_cluster(net, x, preloaded, cluster_name='fire6')
fire6_bypass = x
x = fire_cluster(net, x, preloaded, cluster_name='fire7')
x = fire_cluster(net, x, preloaded, cluster_name='fire8')
x = fire_cluster(net, x, preloaded, cluster_name='fire9')
# Classifier
#####################
if needs_classifier == True:
# Dropout [use value of 50% when training]
x = tf.nn.dropout(x, keep_prob)
# Fixed global avg pool/softmax classifier:
# [227, 227, 3] -> 1000 classes
layer_name = 'conv10'
weights, biases = get_weights_biases(preloaded, layer_name)
x = _conv_layer(net, layer_name + '_conv', x, weights, biases)
x = _act_layer(net, layer_name + '_actv', x)
# Global Average Pooling
x = tf.nn.avg_pool(x, ksize=(1, 13, 13, 1), strides=(1, 1, 1, 1), padding='VALID')
net['classifier_pool'] = x
x = tf.nn.softmax(x)
net['classifier_actv'] = x
print("Network instance created: %fs" % (time.time() - cr_time))
return net
def _conv_layer(net, name, input, weights, bias, padding='SAME', stride=(1, 1)):
conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, stride[0], stride[1], 1),
padding=padding)
x = tf.nn.bias_add(conv, bias)
net[name] = x
return x
def _act_layer(net, name, input):
x = tf.nn.relu(input)
net[name] = x
return x
def _pool_layer(net, name, input, pooling, size=(2, 2), stride=(3, 3), padding='SAME'):
if pooling == 'avg':
x = tf.nn.avg_pool(input, ksize=(1, size[0], size[1], 1), strides=(1, stride[0], stride[1], 1),
padding=padding)
else:
x = tf.nn.max_pool(input, ksize=(1, size[0], size[1], 1), strides=(1, stride[0], stride[1], 1),
padding=padding)
net[name] = x
return x
def main():
global timer
global timerold
timerold = 0
while True:
timer = time.time()
if (timer - timerold) > 2 :
timerold = time.time()
with open('annotations.txt', 'rt') as csvfile:
imagereader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in imagereader:
# print(row[1])
predicted = loop(row[0])
data = predicted.split(" ",1)
print(data[0])
def loop(imgname):
# Loading image
img_content, orig_shape = imread_resize(imgname)
img_content_shape = (1,) + img_content.shape
# Loading ImageNet classes info
classes = []
with open('synset_words.txt', 'r') as classes_file:
classes = classes_file.read().splitlines()
# Loading network
data, sqz_mean = load_net('sqz_full.mat')
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
config.gpu_options.allocator_type = 'BFC'
g = tf.Graph()
# 1st pass - simple classification
with g.as_default(), tf.Session(config=config) as sess:
# Building network
image = tf.placeholder(dtype=get_dtype_tf(), shape=img_content_shape, name="image_placeholder")
keep_prob = tf.placeholder(get_dtype_tf())
sqznet = net_preloaded(data, image, 'max', True, keep_prob)
# Classifying
sqznet_results = \
sqznet['classifier_actv'].eval(feed_dict={image: [preprocess(img_content, sqz_mean)], keep_prob: 1.})[0][0][0]
# Outputting result
sqz_class = np.argmax(sqznet_results)
#print(classes[sqz_class])
print(
"\nclass: [%d] '%s' with %5.2f%% confidence" % (sqz_class, classes[sqz_class], sqznet_results[sqz_class] * 100))
return classes[sqz_class]
if __name__ == '__main__':
main()
|
[
"paulkirwan6@gmail.com"
] |
paulkirwan6@gmail.com
|
c382fb6f31fe22160db0400fc69095f40f7bd880
|
d2312dcdb1ce5e823f3a1d10594f33cc99bcdf19
|
/users/admin.py
|
2c0405e6b8d311551036ccac20bed4330d011e2f
|
[] |
no_license
|
danik-tro/Django-dev-Shop
|
6c82cb23c1ed243b81d655d578a613399c241b6a
|
701d0a80a486d97ee9707d2aaed80ad485feda5c
|
refs/heads/master
| 2023-02-24T10:11:51.402482
| 2021-01-22T14:28:55
| 2021-01-22T14:28:55
| 331,242,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User
class CustomAdmin(UserAdmin):
fieldsets = (("User", {"fields": ("image",)}),) + UserAdmin.fieldsets
admin.site.register(User, CustomAdmin)
|
[
"s.danik.tro.c@gmail.com"
] |
s.danik.tro.c@gmail.com
|
e4871b3dedd337522eb56878d15c3f912bdf166a
|
63d30d928631494863cd8f40a86c6e51f01d076e
|
/core/__init__.py
|
4417f09f5dd824a8655044a1b32d5f69ab86c751
|
[
"MIT"
] |
permissive
|
akshitgupta2029/Portfolio_CCS
|
9e5430304d408b56a4a44e56eabba0be5668a70d
|
ab5794d14a07e33c5559f125b6cd2e9a789c420a
|
refs/heads/main
| 2023-08-31T06:17:27.849253
| 2021-10-12T08:07:23
| 2021-10-12T08:07:23
| 415,581,330
| 0
| 0
|
MIT
| 2021-10-10T12:36:22
| 2021-10-10T12:36:22
| null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
import os
from flask import Flask
def get_context():
context = []
for file in os.listdir("templates/pages"):
if file.endswith(".html"):
title = file.split(".")[0].replace("_", " ")
name = title.split(" ")[1:]
name = " ".join(name)
name = name.upper()
rollNo = title.split(" ")[0]
url = "pages/" + file
context.append({"name": name, "rollNo": rollNo, "url": url})
return context
app = Flask(__name__, template_folder="../templates", static_folder="../static")
app.context = get_context()
from core import urls
|
[
"rupanshijain45678@gmail.com"
] |
rupanshijain45678@gmail.com
|
b79d85a95efe3acf2be9f2b266ceb524f9dabe32
|
a16f791e5cf1196d4b1cb47c8098b6f9ae4aba65
|
/cleanStream.py
|
e1bc968774d74210c1b4f3a575d10ed9b0c9a15f
|
[] |
no_license
|
circumlocutory/pytwybot
|
4a1eb648acf21ead18d8464ae0c2382e5d8f2954
|
c4b86a400d3a9c153b537ffcfbf0ad683f20b090
|
refs/heads/master
| 2021-01-11T22:27:08.083304
| 2017-01-15T01:13:03
| 2017-01-15T01:13:03
| 78,964,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,745
|
py
|
#This script follows a user's Twitter stream and messages them when they tweet.
#The interval between tweets can be adjusted using the sleep() function
from twython import TwythonStreamer, Twython
from datetime import date
import random
import time
#auth.py is the second file, containing your dev.twitter.com credentials
from auth import (
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
def timediff():
#timediff() gets the difference between the previous launch date and today
#d0 should be formatted yyyy/m//d
#Example date added
d0 = date(2017, 1, 3)
d1 = date.today()
result = d1 - d0
result = result.days
return result
#Populate this messages array with various openers. A few examples are included for inspiration
messages = [
"Get back to work. ",
"Stop this. ",
"Finish the game. ",
"We're waiting. ",
"Back to development! ",
"You're talking nonsense. ",
"It's all irrelevant. ",
"The time is short. ",
"Focus on the task at hand. "
]
#This block performs initial setup when the script first runs
flavor = random.choice(messages)
result = timediff()
#message must begin with the Twitter handle of whom you wish to tweet
#after flavor, add gameTitle
message = "@someonesTwitterHandle "+ flavor + "gameTitle shipped " + str(result) + " days ago!"
lastMessage = message
twitter = Twython(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
def buildTweet(messages):
#buildTweet() creates the message for you, and checks it isn't the same as your last message, to avoid flagging as spam
global lastMessage
flavor = random.choice(messages)
result = timediff()
message = "@someonesTwitterHandle "+ flavor + "gameTitle shipped " + str(result) + " days ago!"
#if lastMessage == message, then buildTweet() again
if lastMessage == message:
buildTweet(messages)
return message
#This is the real focus of the bot's functionality, where the magic happens
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if 'text' in data:
try:
username = data['user']['screen_name']
tweet = data['text']
print("@%s: %s" % (username, tweet))
#Bot only tweets if user has tweeted
#username == 'someonesTwitterHandle'
if username == 'someonesTwitterHandle':
message = buildTweet(messages)
print("Built tweet")
#waits 30 seconds before tweeting, for a more natural cadence
time.sleep(30)
twitter.update_status(status=message)
print("Tweeted: %s" % message)
global lastMessage
lastMessage = message
print("Waiting 6 hours before tweeting again")
#Bot stops looking
self.disconnect()
#Waits 21600 seconds - 6 hours
time.sleep(21600)
#Attempts to re-open the stream
stream.statuses.filter(follow=['6348742'])
except BaseException as e:
print("Threw an exception: " + str(e))
#if an exception is thrown, it will state why, and will wait for the next tweet before trying again
pass
stream = MyStreamer(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
print("Stream is now running")
#this code searches for tweets from a given userID
#Get the id of the account from here: http://gettwitterid.com/
stream.statuses.filter(follow=['userID'])
|
[
"hello@christopherfloyd.net"
] |
hello@christopherfloyd.net
|
28a0c224413f20e7bf9dc36fdc29fe4cc09204be
|
4a238068e29a1f6871cc049a0486b20b27e781de
|
/Graphcore/benchmarks/bert/implementations/popart/create_submission.py
|
ddf8731745cc7b133b05ae92aeabe36a4686f711
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Pixelz-Inc/training_results_v1.0
|
61b4555ad482b189d1966be3edd127858addd628
|
c507130c4e04c1f274a9ae8b7284aac79f26325a
|
refs/heads/master
| 2023-08-18T22:46:13.316503
| 2021-10-22T04:01:57
| 2021-10-22T04:01:57
| 399,047,712
| 0
| 0
|
NOASSERTION
| 2021-08-23T09:37:25
| 2021-08-23T09:37:25
| null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import argparse
import numpy as np
import subprocess
import os
import re
# Specify which pod to run
parser = argparse.ArgumentParser("Config Parser", add_help=False)
parser.add_argument("--pod", type=int, choices=[16, 64], default=16)
parser.add_argument("--submission-division", type=str, choices=["open", "closed"], default="closed")
parser.add_argument("--start-index", type=int, default=0)
parser.add_argument("--end-index", type=int, default=10)
args = parser.parse_args()
# Each submission consist of 10 runs
for result_index in range(args.start_index, args.end_index):
command = f"python bert.py --config=configs/mk2/pod{args.pod}-{args.submission_division}.json --seed {result_index + 42}"
options = f"--submission-run-index={result_index}"
# Launch the run
with open(f"internal_log_{result_index}", "w+") as f:
# Clear the cache
# subprocess.call(['sudo sh -c "sync; echo 3 > /proc/sys/vm/drop_caches"'], stdout=f, stderr=f, shell=True)
# Run training
subprocess.call([command + " " + options], stdout=f, stderr=f, shell=True)
|
[
"vsb@fb.com"
] |
vsb@fb.com
|
250aa6a87d1c1c48865dcffcdf97c902d333221a
|
6d47251d3c0238ea6bfe71dc38bf1d303f3fa5ee
|
/lpfhpfbilinear.py
|
b88e12850858d037f9e21f0e02d7360d5fa34bb2
|
[
"Unlicense"
] |
permissive
|
raneavit/signal-processing
|
4de8aa3d274e0ccc73a3bfbc9d4a9f1db6015b85
|
8be6d8a1d53589549c25d7f2f16743bff80bd104
|
refs/heads/main
| 2023-04-16T04:07:13.040292
| 2021-04-04T13:10:13
| 2021-04-04T13:10:13
| 354,471,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
# Low pass filter and High pass filter using Bilinear transform
import numpy as np
import math
from matplotlib import pyplot as plt
R = 10
C = 100e-6
Toe = 10e-3
Ts = 0.1e-3
h = Ts/Toe
dt = 0.1e-3
f = 100
w = 2*(math.pi)*f
t = list()
t.append(0.0)
u = list()
u.append(0.0)
v = list()
v.append(1.0)
HP = list()
HP.append(0.0)
x = list()
x.append(0.0)
y = list()
y.append(0.0)
for n in range(0, 4000):
t.append(1+t[n])
# Harmonic Oscillator
u.append(u[n]+w*dt*v[n])
v.append(v[n]-w*dt*u[n+1])
x.append(10+u[n+1])
# LPF
y.append((2*y[n]-h*y[n]+h*x[n+1]+h*x[n])/(h+2))
# HPF
HP.append((2*HP[n]-h*HP[n]+2*x[n+1]-2*x[n])/(h+2))
plt.subplot(2, 1, 2)
plt.plot(x, 'b-', label='harmonic oscillator')
plt.plot(y, 'g-', linewidth=1, label='low pass filtered data')
plt.plot(HP, 'r-', linewidth=1, label='hi pass filtered data')
plt.xlabel('Time [sec]')
plt.grid()
plt.legend(fontsize='small')
plt.show(block=True)
|
[
"noreply@github.com"
] |
raneavit.noreply@github.com
|
9b9f7826b5bcb857fba59c2c6fdcb94b0b4bc2cb
|
627b497ec17062654ba9c84ddd49d2372f016611
|
/week10/Informatics/1/module3.py
|
39b874b62115ac65382a9e35e96549188e312660
|
[] |
no_license
|
madinamantay/webdev2019
|
d25fcedf4920bc90f6e9309150fb46a4ce005921
|
55cad68fdfde1fbf69d0d792493fc8fee0548037
|
refs/heads/master
| 2022-11-13T21:43:07.563310
| 2019-04-22T22:43:46
| 2019-04-22T22:43:46
| 169,204,888
| 0
| 1
| null | 2022-10-23T22:49:03
| 2019-02-05T07:32:35
|
Python
|
UTF-8
|
Python
| false
| false
| 54
|
py
|
a = int(input())
b = int(input())
c=b//a
print(c)
|
[
"madinamantay@gmail.com"
] |
madinamantay@gmail.com
|
251fe325f06001deee40e6b87d71240c28d5b55d
|
4fbb25aaa1ec149ebc3fd99c57ccdc892890036c
|
/ovmc2.py
|
014495a143d3de0a2c9e33aaaff97082d5876398
|
[] |
no_license
|
hpersh/oovm40
|
705d552dd2a270846ce80f3d417464e77511d234
|
3df78d1712ad8c1593b607fea7a159416ceb8f97
|
refs/heads/master
| 2021-04-07T12:56:12.318473
| 2020-11-20T07:08:28
| 2020-11-20T07:08:28
| 248,677,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,205
|
py
|
#!/usr/bin/python
# Compiler pass 2 - Optimize parse tree
#
# - Flatten operations like add, sub, etc.
# - Collect constants in arithmetic expressions
# - Highlight assignments to local variables
import sys
import copy
import xml.etree.ElementTree as et
outf = None
line_num = 0
def copy_node(nd):
return et.Element(nd.tag, attrib = nd.attrib)
def node_is_num(nd):
return nd.tag in ('int', 'float')
def node_intval(nd):
return int(nd.get('val'))
def node_floatval(nd):
return float(nd.get('val'))
def num_from_node(nd):
if nd.tag == 'int':
return node_intval(nd)
if nd.tag == 'float':
return node_floatval(nd)
assert(False)
def int_node(val):
return et.Element('int', attrib = {'val': str(val), 'line': line_num})
def float_node(val):
return et.Element('float', attrib = {'val': str(val), 'line': line_num})
def node_from_num(val):
t = type(val)
if t == int:
return int_node(val)
if t == float:
return float_node(val)
assert(False)
def num_node_eq(nd, val):
t = type(val)
if t == int:
return nd.tag == 'int' and node_intval(nd) == val
if t == float:
return nd.tag == 'float' and node_floatval(nd) == val
assert(False)
def num_node_op(op, arg):
return node_from_num(op(num_from_node(arg)))
def num_node_binop(op, arg1, arg2):
return node_from_num(op(num_from_node(arg1), num_from_node(arg2)))
def simp_minus(nd):
ch = simp_node(nd[0])
if node_is_num(ch):
return num_node_op(lambda x: -x, ch)
if ch.tag == 'minus':
return ch[0]
return nd
def flatten(nd):
ch = [simp_node(x) for x in nd]
result = copy_node(nd)
for c in ch:
if c.tag == nd.tag:
for cc in c:
result.append(cc)
else:
result.append(c)
return result
def nums_collect(nd, func_combine, func_test):
num = None
result = copy_node(nd)
for c in nd:
if node_is_num(c):
if num is None:
num = c
else:
num = num_node_binop(func_combine, num, c)
else:
result.append(c)
if num is not None and func_test(num):
result.append(num)
return result
def simp_add(nd):
temp = nums_collect(flatten(nd), lambda x,y: x + y, lambda x: not (num_node_eq(x, 0) or num_node_eq(x, 0.0)))
ch = list(temp)
result = copy_node(temp)
last = ch[-1]
if node_is_num(last):
num = last
ch = ch[:-1]
else:
num = int_node(0)
for c in ch:
if c.tag != 'sub':
result.append(c)
continue
last = c[-1]
if not node_is_num(last):
result.append(c)
continue
num = num_node_binop(lambda x,y: x - y, num, last)
c.remove(last)
if len(c) == 1:
result.append(c[0])
continue
result.append(c)
if not (num_node_eq(num, 0) or num_node_eq(num, 0.0)):
if len(result) == 0:
return num
result.append(num)
return result
def simp_sub(nd):
a = nd[0]
b = nd[1]
if node_is_num(a) and node_is_num(b):
return node_from_num(num_from_node(a) - num_from_node(b))
if num_node_eq(b, 0) or num_node_eq(b, 0.0):
return a
if num_node_eq(a, 0) or num_node_eq(a, 0.0):
if node_is_num(b):
return node_from_num(-num_from_node(b))
return et.Element('minus', attrib={'line': line_num}).append(b)
return nd
def simp_mul(nd):
result = nums_collect(flatten(nd), lambda x,y: x * y, lambda x: not (num_node_eq(x, 1) or num_node_eq(x, 1.0)))
last = result[-1]
if num_node_eq(last, 0) or num_node_eq(last, 0.0):
return last
return result
def simp_div(nd):
a = nd[0]
b = nd[1]
if (num_node_eq(a, 0) or num_node_eq(a, 0.0)) and not (num_node_eq(b, 0) or num_node_eq(b, 0.0)):
return a
return nd
def simp_land(nd):
return flatten(nd)
def simp_lor(nd):
return flatten(nd)
def simp_band(nd):
return flatten(nd)
def simp_bor(nd):
return flatten(nd)
def simp_bxor(nd):
return flatten(nd)
def simp_anon(nd):
x = et.Element('anon', attrib = nd.attrib)
x.append(nd[0])
parse_node(x, nd[1])
return x
def simp_func(nd):
x = et.Element('func', attrib = nd.attrib)
x.append(nd[0])
parse_node(x, nd[1])
return x
def simp_default(nd):
return nd
def simp_node(nd):
f = 'simp_' + nd.tag
return (globals().get(f, simp_default))(nd)
def parse_minus(parent, nd):
parent.append(simp_mius(nd))
def parse_add(parent, nd):
parent.append(simp_add(nd))
def parse_sub(parent, nd):
parent.append(simp_sub(nd))
def parse_mul(parent, nd):
parent.append(simp_mul(nd))
def parse_land(parent, nd):
parent.append(simp_land(nd))
def parse_lor(parent, nd):
parent.append(simp_lor(nd))
def parse_band(parent, nd):
parent.append(simp_band(nd))
def parse_bor(parent, nd):
parent.append(simp_bor(nd))
def parse_bxor(parent, nd):
parent.append(simp_bxor(nd))
def parse_assign(parent, nd):
lhs = nd[0]
rhs = simp_node(nd[1])
if lhs.tag != 'obj1':
parse_node_default(parent, nd)
return
if rhs.tag in ['nil', 'bool', 'int', 'float', 'str']:
t = 'assign1c'
elif rhs.tag == 'obj1':
t = 'assign11'
else:
t = 'assign1'
x = et.Element(t, attrib = nd.attrib)
x.append(lhs)
x.append(rhs)
parent.append(x)
def parse_module(parent, nd):
global outf
outf = copy_node(nd)
for c in nd:
parse_node(outf, c)
def parse_node_default(parent, nd):
nd2 = copy_node(nd)
for c in nd:
parse_node(nd2, c)
parent.append(nd2)
def parse_node(parent, nd):
line_num_ = nd.get('line')
if line_num_ is not None:
global line_num
line_num = line_num_
f = 'parse_' + nd.tag
(globals().get(f, parse_node_default))(parent, nd)
def process_file(infile):
parse_node(None, et.parse(open(infile)).getroot())
et.ElementTree(outf).write(sys.stdout)
def main():
process_file(sys.argv[1])
if __name__ == '__main__':
main()
|
[
"hpersh@yahoo.com"
] |
hpersh@yahoo.com
|
6d913c96f57de466f1ae308dfc7f3356ce6f2a8f
|
e5325df2e1257bc552f474c61c153a67dc78bbeb
|
/online/section05-1.py
|
5726615c8ca075de91afb243af1cbfd9bdff1268
|
[] |
no_license
|
O-Seok/python_basic
|
2733719ef6942d4f9d90b3c0cfefc397583bcfe3
|
7e0786c5a1d16c464ed0c9c3242cbd16e75030e1
|
refs/heads/master
| 2022-09-12T19:14:02.959532
| 2020-05-27T06:51:53
| 2020-05-27T06:51:53
| 267,207,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,874
|
py
|
# Section05-1
# python 흐름제어(제어문)
# 조건문 실습
# boolean
print(type(True), type(False))
print('boolean')
# example 1
if True:
print('Yes')
# example 2
if False:
print('No')
# example 3
if False:
print('No')
else:
print('Yes')
# 관계연산자
# >, >=, <, <=, ==, !=
print()
print('관계연산자')
a = 10
b = 0
print(a == b)
print(a != b)
print(a > b)
print( a >= b)
print(a < b)
print( a<= b)
# 참 거짓 종류(True, False)
# 참 : "내용", [내용], (내용), {내용}, 1, True
# 거짓 : "", [], (), {}, 0, False
print()
print('참 거짓 종류별 출력')
city = ""
if city:
print("True")
else:
print("False")
# 논리 연산자
# and or not
print()
print('논리연산자')
a = 100
b = 60
c = 15
print('and : ', a > b and b > 3)
print('or : ', a > b or c > b)
print('not : ', not a > b)
print(not False)
print(not True)
# 산술, 관계, 논리 연산자
# 우선순위 : 산술 > 관계 > 논리 순서로 적용
print()
print('산술,관계,논리 연산자 순서')
print('ex1 : ', 5 + 10 > 0 and not 7 + 3 == 10)
score1 = 90
score2 = 'A'
if score1 >= 90 and score2 == 'A':
print('합격 하셨습니다.')
else:
print('죄송합니다. 불합격입니다.')
# 다중조건문
# if 다음의 또 다른 조건들이 필요하다면 elif를 통해서 여러가지 조건문을 주어서 흐름문을 이용할 수 있다.
print()
print('다중 조건문')
num = 70
if num >= 90:
print('num 등급 A', num)
elif num >= 80:
print('num 등급 B', num)
elif num >= 70:
print('num 등급 C', num)
else:
print('꽝')
# 중첩 조건문
print()
print('충접조건문')
age = 27
height = 175
if age >= 20:
if height >= 170:
print('A지망 지원 가능')
elif height >= 160:
print('B지망 지원 가능')
else:
print('지원 불가')
else:
print('20세 이상 지원 가능')
|
[
"totkfa789@gmail.com"
] |
totkfa789@gmail.com
|
6f5f440ff15c9ebb1f90e81dfbae8583fd351f0f
|
b8ab7943e6d3daa033f0436574f350c12be08830
|
/SpeechConverter.py
|
808d1d082be2fcb205af421c85f7b7172254ba30
|
[] |
no_license
|
Manan-Rastogi/Speech-Text-Converter
|
bba83841a39d6a67ff1d2e4617a7c3a3beb9351d
|
89559adc7ec408c5b6eff6329377d50eb9d1a46a
|
refs/heads/main
| 2023-01-21T09:06:52.931925
| 2020-12-03T15:12:07
| 2020-12-03T15:12:07
| 318,208,077
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
import speech_recognition as sr
from tkinter import *
r = sr.Recognizer()
def Convert(label1):
with sr.Microphone() as s2:
label1.config(text="Talk .... ")
r.adjust_for_ambient_noise(s2,duration=0.2)
audio2 = r.listen(s2)
try:
label1.config(text = f"Text: {r.recognize_google(audio2)}")
except:
label1.config(text = f"Sorry, I did not get that")
|
[
"mananras87@gmail.com"
] |
mananras87@gmail.com
|
dd706abd123163b2c7bfa7004b9a46bfd4c21a3b
|
4f6b68af8fdca18ac31d01e131fdc4e9a99b07de
|
/suche/engine/result.py
|
2a05a1dafc8fec4541c1160e8cf4dcb50311ce57
|
[] |
no_license
|
gpsgroup/suche
|
cdda90a16d02e44caa09c58550a7a70d9f3ae5e8
|
adb31db6fdb6913b7e92a3e8164b330bd130bf61
|
refs/heads/master
| 2021-01-21T13:11:57.622249
| 2014-09-07T14:48:10
| 2014-09-07T14:48:10
| 21,489,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
'''
result.py
contains class for search result
'''
from indexer.models import SucheURL
from linguistic.queryhandler import QueryHandler
import string
import re
class SucheResult:
def __init__(self):
self.title = ''
def getHighlightedTitle(self):
pass
def setQuery(self,query):
self.query = ''
validsymbols =' ' + string.ascii_letters +'0123456789'
for char in query:
if char in validsymbols:
self.query += char
else:
self.query += " "
self.query = re.sub(' +',' ',self.query)
self.querylist = self.query.split(' ')
self.querylist = sorted(self.querylist, key = lambda x:len(x), reverse = True) # sort from largest to smallest word
def highlightedtitle(self):
title = self.title
for key in self.querylist:
if len(key) > 2:
title = title.replace(key,"<strong>"+key+"</strong>")
return title
def highlightedurl(self):
url = self.url
for key in self.querylist:
if len(key) > 2:
url = url.replace(key,"<strong>"+key+"</strong>")
return url
def highlightedbody(self):
firstoccur = -1
for key in self.querylist:
if self.body.find(key) > 0:
firstoccur = self.body.find(key)
break
if firstoccur > -1:
exstart = firstoccur - 128
while exstart > 0 and self.body[exstart] != " ":
exstart -= 1
if exstart < 0:
exstart = 0
exend = exstart + 250
while exend < len(self.body)-1 and self.body[exend] != " ":
exend += 1
if exend > len(self.body)-1:
exend = len(self.body)-1
bodyportion = self.body[exstart:exend]
else:
bodyportion = self.body[:128]
for key in self.querylist:
if len(key) > 2:
bodyportion = bodyportion.lower().replace(key,"<strong>"+key+"</strong>")
return " ..."+bodyportion+" ..."
|
[
"virtualanup@gmail.com"
] |
virtualanup@gmail.com
|
0c3c1376b7dde485487ea32f0df347686f32d7bb
|
257802d6f3bd5433aab09c870333de4451bb347e
|
/src/models/optimized_model.py
|
7b6101a6d61803b41faaa2cb1a67812a80b1a5aa
|
[] |
no_license
|
DavidColasRomanos/Minsait_Land_Classification
|
a9971e833500f46360b864efccf5920f122cca75
|
378f04a0bde095225330c62aae90f56d647715a8
|
refs/heads/master
| 2021-06-10T20:12:34.710738
| 2020-04-06T20:47:54
| 2020-04-06T20:47:54
| 254,346,828
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,618
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 28 11:25:25 2020
@author: javier.moral.hernan1
"""
import pandas as pd
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import balanced_accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from skopt import BayesSearchCV
from lightgbm import LGBMClassifier
from skopt.space import Real, Integer
class OptimizedSingleModel():
def __init__(self, X_train, X_test, y_train, y_test, external_data,
model_selected, search_method):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.external_data = external_data
self.model_selected = model_selected
self.select_model()
self.fit(search_method)
self.predict()
self.score()
def select_model(self):
'''
Gets the model seleted and its param grid to optimize.
Returns
-------
None.
'''
if self.model_selected == 'randomforest':
self.model = RandomForestClassifier(class_weight='balanced')
self.param_grid = {'bootstrap': [True, False],
'max_depth': [10, 20, 40, 60,
80, 100, None],
'max_features': ['auto', 'sqrt'],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10],
'n_estimators': [200, 600, 800,
1200, 2000]}
self.param_grid_bayes = {'max_depth': Integer(10, 100, None),
'min_samples_split': Integer(2, 50, None),
'min_samples_leaf': Integer(2, 50, None),
'n_estimators': Integer(10, 2000, None),
'bootstrap': [True, False]}
if self.model_selected == 'gradientboosting':
self.model = GradientBoostingClassifier()
self.param_grid = {'max_depth': [10, 40, 60, 80, 100, 200, None],
'validation_fraction': [0.1, 0.2, 0.3, None],
'n_iter_no_change': [1, 2, 3, 4, 5],
'min_samples_split': [2, 5, 10, 20, 50],
'n_estimators': [200, 600, 800,
1200, 2000],
'learning_rate': [0.001, 0.01, 0.05, 0.1, 0.15,
0.2, 0.3, 0.6]}
self.param_grid_bayes = {'max_depth': Integer(10, 200, None),
'min_samples_split': Integer(2, 50, None),
'validation_fraction': Real(0.05,
0.3, None),
'n_estimators': Integer(10, 2000, None),
'n_iter_no_change': Integer(1, 5),
'learning_rate': Real(0.001, 0.5, None)}
if self.model_selected == 'xgboost':
self.model = XGBClassifier(n_jobs=-1)
self.target_encoder = LabelEncoder()
self.y_train = self.target_encoder.fit_transform(self.y_train)
self.y_test = self.target_encoder.fit_transform(self.y_test)
self.param_grid = {'min_child_weight': [1, 5, 10],
'gamma': [0.5, 1, 1.5, 2, 5],
'subsample': [0.6, 0.8, 1.0],
'colsample_bytree': [0.6, 0.8, 1.0],
'max_depth': [10, 40, 80, 100],
'n_estimators': [200, 600, 1000],
'learning_rate': [0.001, 0.01, 0.05, 0.15,
0.2, 0.3, 0.6], }
self.param_grid_bayes = {'min_child_weight': Integer(1, 15, None),
'n_estimators': Integer(10, 2000, None),
'learning_rate': Real(0.001, 0.5, None),
'gamma': Real(0.1, 5, None),
'subsample': Real(0.6, 1.0, None),
'colsample_bytree': Real(0.6, 1.0, None),
'max_depth': Integer(10, 200, None), }
if self.model_selected == 'lightgbm':
self.model = LGBMClassifier(n_jobs=-1, class_weight='balanced')
self.param_grid = {'min_child_weight': [1, 5, 10],
'subsample': [0.6, 0.8, 1.0],
'colsample_bytree': [0.6, 0.8, 1.0],
'max_depth': [10, 40, 80, 100],
'n_estimators': [200, 600, 1000],
'learning_rate': [0.001, 0.01, 0.05, 0.15,
0.2, 0.3, 0.6], }
self.param_grid_bayes = {'min_child_weight': Integer(1, 15, None),
'n_estimators': Integer(10, 2000, None),
'learning_rate': Real(0.001, 0.5, None),
'gamma': Real(0.1, 5, None),
'subsample': Real(0.6, 1.0, None),
'colsample_bytree': Real(0.6, 1.0, None),
'max_depth': Integer(10, 200, None), }
if self.model_selected == 'adaboost':
self.model = AdaBoostClassifier()
self.param_grid = {'n_estimators': [200, 400, 600, 800, 1000,
1200, 1800, 2000, 3000],
'learning_rate': [0.001, 0.01, 0.05, 0.1, 0.15,
0.2, 0.3, 0.6]}
self.param_grid_bayes = {'n_estimators': Integer(10, 3000, None),
'learning_rate': Real(0.001, 0.5, None)}
def fit(self, method):
'''
Fits the model seleted using train data and optimizes its
hyperparameters with the selected method.
Returns
-------
None.
'''
if method == 'grid':
search = GridSearchCV(self.model, self.param_grid, cv=3,
scoring='f1_macro', n_jobs=-1,
refit=True, verbose=10)
self.best_model = search.fit(self.X_train, self.y_train)
if method == 'random':
search = RandomizedSearchCV(self.model, self.param_grid, cv=3,
scoring='f1_macro', n_jobs=-1,
refit=True, verbose=10, n_iter=10)
self.best_model = search.fit(self.X_train, self.y_train)
if method == 'bayes':
search = BayesSearchCV(self.model, self.param_grid_bayes,
scoring='f1_macro', cv=3,
refit=True, n_jobs=-1,
iid=True, return_train_score=True,
n_points=10, n_iter=10, verbose=10)
self.best_model = search.fit(self.X_train, self.y_train)
def inverse_encoding(self, data):
'''
Decodes previously encoded data.
Returns
-------
None.
'''
data_uncoded = self.target_encoder.inverse_transform(data)
return data_uncoded
def predict(self):
'''
Computes predictions for each datset using the fitted model.
Returns
-------
None.
'''
self.preds_train = self.best_model.predict(self.X_train)
self.preds_test = self.best_model.predict(self.X_test)
self.preds_ext = self.best_model.predict(self.external_data)
if self.model_selected == 'xgboost':
self.y_train = self.inverse_encoding(self.y_train)
self.y_test = self.inverse_encoding(self.y_test)
self.preds_train = self.inverse_encoding(self.preds_train)
self.preds_test = self.inverse_encoding(self.preds_test)
self.preds_ext = self.inverse_encoding(self.preds_ext)
def score(self):
'''
Computes and stores the accuracy, balanced accuracy, confussion matrix
and f1_macro metrics for train an tests predictions.
Returns
-------
None.
'''
self.acc_train = accuracy_score(self.y_train, self.preds_train)
self.acc_test = accuracy_score(self.y_test, self.preds_test)
self.balanced_acc_train = balanced_accuracy_score(self.y_train,
self.preds_train)
self.balanced_acc_test = balanced_accuracy_score(self.y_test,
self.preds_test)
self.f1_train = f1_score(self.y_train, self.preds_train,
average='macro')
self.f1_test = f1_score(self.y_test, self.preds_test,
average='macro')
self.cf_train = confusion_matrix(self.y_train, self.preds_train)
self.cf_test = confusion_matrix(self.y_test, self.preds_test)
def get_score(self):
'''
Computes the score using some metrics for train and test predictions.
Returns
-------
Scores. dict. Dictionary with all the stored metrics
'''
scores = {'Accuracy_train': self.acc_train,
'Accuracy_test': self.acc_test,
'Balanced_Accuracy_train': self.balanced_acc_train,
'Balanced_Accuracy_test': self.balanced_acc_test,
'F1_train': self.f1_train,
'F1_test': self.f1_test,
'Confussion_Matrix_train': self.cf_train,
'Confussion_Matrix_test': self.cf_test}
return scores
def get_predictions(self, dataset='test'):
'''
Computes the preditions for the selected dataset using the
fitted model.
Returns
-------
preds_comp. pandas.DataFrame.
'''
if dataset == 'train':
dict_preds = {'y_train': list(self.y_train),
'preds': list(self.preds_train)}
preds_comp = pd.DataFrame(dict_preds)
if dataset == 'test':
dict_preds = {'y_test': list(self.y_test),
'preds': list(self.preds_test)}
preds_comp = pd.DataFrame(dict_preds)
if dataset == 'external':
preds_comp = pd.DataFrame(self.preds_ext)
preds_comp = pd.DataFrame(preds_comp)
return preds_comp
def visualize_predict(self, dataset='test'):
'''
Plots the confussion matrix of the seleted predictions dataset.
Returns
-------
fig.figure_. matplotlib.pyplot.figure.
'''
if self.model_selected == 'xgboost':
y_train_aux = self.target_encoder.fit_transform(self.y_train)
y_test_aux = self.target_encoder.fit_transform(self.y_test)
else:
y_train_aux = self.y_train.copy()
y_test_aux = self.y_test.copy()
if dataset == 'train':
fig = plot_confusion_matrix(self.best_model,
self.X_train,
y_train_aux,
cmap=plt.cm.Blues)
fig.figure_
if dataset == 'test':
fig = plot_confusion_matrix(self.best_model,
self.X_test,
y_test_aux,
cmap=plt.cm.Blues)
fig.figure_
return fig.figure_
def get_var_importance(self):
'''
Shows the variable importance of the trained model.
Returns
-------
fig. matplotlib.pyplot.figure. Barplot with feature importances
'''
features_imp = {}
for column, importance in zip(self.X_train.columns,
(self.best_model
.best_estimator_.feature_importances_)):
features_imp[column] = importance
features_imp_df_train = (pd.DataFrame.from_dict(features_imp,
orient='index')
.reset_index()
.sort_values(by=[0], ascending=False))
features_imp_df_train.columns = ['Variable', 'Importance']
features_imp_df_train = features_imp_df_train.iloc[0:10, :]
fig = plt.figure(figsize=(5, 4))
sns.barplot(y='Variable', x='Importance',
data=features_imp_df_train)
plt.title('Model Variable Importance')
return fig
|
[
"javier.moral.hernan1@SCMS.ms.corp"
] |
javier.moral.hernan1@SCMS.ms.corp
|
4ceace8e8c5366914edf2306319b88873fdc68cd
|
ed7636f2e3df24370ce7d05b63b0ded939c18d6a
|
/app/models.py
|
43ece4709c3ab556b059c6d8f44fe84814b816fc
|
[] |
no_license
|
kflavin/stock_screener_web
|
977117f6451ea7b6948b64accf34f070529323d9
|
3d59179c298c243246ed6f66ac555cbabbd39c7e
|
refs/heads/master
| 2021-05-24T03:08:53.310277
| 2017-10-06T06:02:51
| 2017-10-06T06:02:51
| 49,934,242
| 2
| 0
| null | 2017-10-06T06:02:52
| 2016-01-19T07:19:30
|
Python
|
UTF-8
|
Python
| false
| false
| 22,448
|
py
|
import json
import uuid
from calendar import timegm
import jwt
import datetime
from datetime import date
import re
import requests
import sys
from sqlalchemy import UniqueConstraint, desc, func, Float
#from flask.ext.security.utils import verify_password
#from flask.ext.security import UserMixin, RoleMixin
from flask_sqlalchemy import SQLAlchemy
from random import seed, choice
from string import ascii_uppercase
from flask import current_app, abort
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.expression import bindparam
from sqlalchemy import inspect
# from app.external.companies import get_name_from_symbol
from app import db, bcrypt
from app.utils import DateToJSON, float_or_none
# Define models
from populators.external.companies import get_name_from_symbol
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(255))
current_login_ip = db.Column(db.String(255))
login_count = db.Column(db.Integer)
last_password_change = db.Column(db.DateTime, default=datetime.datetime.utcnow())
registration_code = db.Column(db.String(36))
companies = db.relationship('Company', backref='user', lazy='dynamic')
def __init__(self, email, password, active=False, confirmed_at=datetime.datetime.utcnow):
self.email = email
self.password = bcrypt.generate_password_hash(password, current_app.config.get('BCRYPT_LOG_ROUNDS')).decode()
self.active = active
self.registration_code = str(uuid.uuid4())
if callable(confirmed_at):
self.confirmed_at = confirmed_at()
else:
self.confirmed_at = confirmed_at
def encode_auth_token(self, user_id, exp=86400):
"""
Generate auth token
:param user_id:
:param exp: token expiration in seconds, set in global config per environment
:return: the encoded payload or exception on error
"""
user = User.query.filter_by(id=user_id).first()
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=exp),
'iat': datetime.datetime.utcnow(),
'id': user_id,
# Need this to serialize datetime like exp and iat. In their case, it's handled in the jwt module
'last_password_change': timegm(user.last_password_change.utctimetuple())
}
return jwt.encode(
payload,
current_app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
current_app.logger.debug(e)
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Decode auth token
:param auth_token:
:return: user id (int) or error string
"""
try:
payload = jwt.decode(auth_token, current_app.config.get('SECRET_KEY'))
user_id = payload.get('id')
if user_id:
user = User.query.filter_by(id=user_id).first()
last_reported_password_change = payload.get('last_password_change')
last_actual_password_change = timegm(user.last_password_change.utctimetuple())
if user and (last_reported_password_change >= last_actual_password_change):
return user_id
return 'Signature expired. Please log in again.'
except jwt.ExpiredSignature:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again'
def set_password(self, password):
"""
Change the password, and update the timestamp so we can verify it against the token
:param password: new password string
:return:
"""
self.password = bcrypt.generate_password_hash(password, current_app.config.get('BCRYPT_LOG_ROUNDS')).decode()
self.last_password_change = func.now()
db.session.add(self)
db.session.commit()
def verify_password(self, password):
return bcrypt.check_password_hash(self.password, password)
def to_json(self):
indicators = {}
for k,v in self.get_attributes_no_fk().iteritems():
if k == "symbol":
indicators[k] = self.company.symbol
else:
indicators[k] = getattr(self, k)
indicators['id'] = self.id
indicators['date'] = self.date.isoformat()
return indicators
#@property
#def password(self):
# raise AttributeError('password not a readable attribute')
#@password.setter
#def password(self, password):
# self.password = encrypt_password(password)
# def verify_password(self, password):
# return verify_password(password, self.password)
strategy = db.relationship('Strategy', backref='user', lazy='dynamic')
def __repr__(self):
return "<Userid: {0}, Email: {1}>".format(self.id, self.email)
class BlacklistToken(db.Model):
id = db.Column(db.Integer, primary_key=True)
token = db.Column(db.String(500), unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.datetime.now()
@staticmethod
def check_blacklist(auth_token):
res = BlacklistToken.query.filter_by(token=str(auth_token)).first()
if res:
return True
else:
return False
def __repr__(self):
return '<id: token: {}'.format(self.token)
class Strategy(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
public = db.Column(db.Boolean, default=True)
filter = db.relationship('Filters', backref='strategy', lazy='dynamic')
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
class Filters(db.Model):
id = db.Column(db.Integer, primary_key=True)
roe = db.Column(db.Float, default=0.15)
fcf = db.Column(db.Float, default=0)
strategy_id = db.Column(db.Integer, db.ForeignKey('strategy.id'))
ExchangeMembership = db.Table('exchange_membership',
db.Column('exchange_id', db.Integer, db.ForeignKey('exchange.id')),
db.Column('company_id', db.Integer, db.ForeignKey('company.id'))
)
class Exchange(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
companies = db.relationship('Company',
secondary=ExchangeMembership,
backref=db.backref('exchanges', lazy='dynamic'),
lazy='dynamic'
)
@staticmethod
def add_exchange(name):
if name == "NYSE" or name == "NASDAQ":
exchange = Exchange(name=name)
return exchange
else:
return None
@staticmethod
def get_exchange(name):
exchange = Exchange.query.filter(Exchange.name == name).first()
if not exchange:
exchange = Exchange.add_exchange(name)
return exchange
class Company(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200), unique=True)
symbol = db.Column(db.String(20), nullable=False, unique=True)
sic_code = db.Column(db.Integer, nullable=True)
sector = db.Column(db.String(200), nullable=True)
industry = db.Column(db.String(200), nullable=True)
active = db.Column(db.Boolean, default=True)
indicators = db.relationship('Indicators', backref='company', lazy='dynamic')
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# "Special" attributes that we ignore
ignore_attrs = ['id', 'indicators']
# Define attributes here for lookups.
attributes = {'name': "Name",
'symbol': "Ticker",
"sic_code": "SIC",
"sector": "Sector",
"industry": "Industry",
}
@staticmethod
def generate_symbol():
seed()
symbol = ""
sym_len = int(choice("34")) + 1
for i in range(1,sym_len):
symbol += choice(ascii_uppercase)
return symbol
@classmethod
def get_attributes(cls):
return cls.attributes.keys()
@classmethod
def get_attributes_no_fk(cls):
order_bys = cls.attributes.keys()
order_bys_no_fk = {}
for k,v in cls.attributes.iteritems():
if k.find(".") == -1:
order_bys_no_fk[k] = v
else:
order_bys_no_fk[k.split(".")[1]] = v
return order_bys_no_fk
@staticmethod
def generate_fake(count=20):
import forgery_py
from sqlalchemy.exc import IntegrityError
for i in range(count):
c = Company(name=forgery_py.lorem_ipsum.word(),
symbol=Company.generate_symbol()
)
db.session.add(c)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
@staticmethod
def update(j):
passed_keys = j.keys()
symbol = j.get('symbol')
if not symbol:
current_app.logger.debug("No symbol found in JSON: {}".format(j))
return False
bind_params = {}
realized_params = {}
mapper = inspect(Company)
for col in mapper.attrs.keys():
if col not in Company.ignore_attrs and col in passed_keys:
bind_params[col] = bindparam(col)
realized_params[col] = j.get(col)
if not Company.validate_company_values(realized_params):
current_app.logger.debug("Failed to validate company values: {}".format(j))
return False
company_table = mapper.mapped_table
stmt = company_table.update().where(company_table.c.symbol == symbol).values(**bind_params)
db.session.execute(stmt, realized_params)
db.session.commit()
return Company.query.filter(Company.symbol == symbol).first()
def dates_to_json(self):
"""
Returns: a list object that can be converted to json
"""
indicators = Indicators.query.join(Company).filter_by(id=self.id).all()
for i in indicators:
print i.date
return [indicator.date.strftime("%Y-%m-%d") for indicator in indicators]
def to_json(self):
company = {}
for k,v in self.attributes.iteritems():
company[k] = getattr(self, k)
company['id'] = self.id
return company
@staticmethod
def from_json(j):
company = {}
for k, v in Company.attributes.iteritems():
company[k] = j.get(k)
# name = j.get('name')
# symbol = j.get('symbol')
exchange = j.get('exchange')
if not Company.validate_name(company['name']):
raise ValueError('Invalid name')
if not Company.validate_symbol(company['symbol']):
raise ValueError('Invalid symbol')
# Use company validation for the index name too
clean_exchange = Exchange.get_exchange(exchange)
company['active'] = j.get('active') if j.get('active') else True
c = Company(**company)
if clean_exchange:
c.exchanges.append(clean_exchange)
return c
@staticmethod
def validate_company_values(values):
"""
Args:
d: dictionary of Company attributes
Returns:
True if valid, false if not
"""
d = values.copy()
symbol = d.get('symbol')
if symbol:
d.pop('symbol')
if not Company.validate_symbol(symbol):
current_app.logger.debug("Failed to validate symbol: {}".format(values))
return False
for key in d.keys():
value = d.get(key)
if not Company.validate_name(value):
current_app.logger.debug("Failed to validate name: {}".format(values))
return False
return True
@staticmethod
def validate_symbol(symbol):
if not symbol:
return False
match = re.match(current_app.config['VALID_COMPANY_SYMBOL'], symbol)
return True if match else False
@staticmethod
def validate_name(name):
if not name:
return False
match = re.match(current_app.config['VALID_COMPANY_NAME'], name)
return True if match else False
@staticmethod
def load_json(data):
companies = json.loads(data).get('company')
for company in companies:
c = Company.from_json(company)
db.session.add(c)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __repr__(self):
return "<{cls}|Symbol: {symbol}, Name: {company}>".format(cls=self.__class__, symbol=self.symbol, company=self.name)
class Indicators(db.Model):
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.Date, default=date.today)
roe = db.Column(db.Float, nullable=True)
fcf = db.Column(db.Float, nullable=True)
ev2ebitda = db.Column(db.Float, nullable=True)
company_id = db.Column(db.Integer, db.ForeignKey('company.id'))
attributes = {
'Company.symbol': "Ticker",
'roe': "ROE (%)",
'fcf': "Free Cash Flow",
'ev2ebitda': "EV/EBITDA",
}
ignore_attrs = ['id', 'company_id']
UniqueConstraint(date, company_id, name="one_per_company_per_day")
@classmethod
def get_attributes(cls, with_symbol=True):
"""
Return all attributes of the class
"""
if with_symbol:
return cls.attributes.keys()
else:
return [i for i in Indicators.get_attributes() if i != 'Company.symbol']
@classmethod
def get_attributes_no_fk(cls):
"""
Get all attributes, excluding the foreign keys prefix (ie: company.symbol).
"""
order_bys = cls.attributes.keys()
order_bys_no_fk = {}
for k,v in cls.attributes.iteritems():
if k.find(".") == -1:
order_bys_no_fk[k] = v
else:
order_bys_no_fk[k.split(".")[1]] = v
return order_bys_no_fk
@staticmethod
def generate_fake(count=10):
import forgery_py
from random import random, seed
from sqlalchemy.exc import IntegrityError
seed()
companies = Company.query.all()
for c in range(1, count):
date = forgery_py.date.date(True, 0, 1500)
for company in companies:
i = Indicators(date=date,
roe="{0:.2f}".format(random()*0.5),
fcf="{0:.2f}".format(random()*0.5),
ev2ebitda="{0:.2f}".format(random()*0.5),
company_id = company.id
)
db.session.add(i)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def is_duplicate_of_last(self):
"""
Check if an indicator is a duplicate of the last collected value
"""
last_date = self.last_indicator_date_by_company(1)
if not last_date:
return False
i = Indicators.query.filter((Indicators.date == last_date) &
(Indicators.company_id == self.company.id)).first()
# return Indicators.equal_values(self, i)
return self == i
# @staticmethod
# def equal_values(i1, i2):
# """
# Check if an indicator has equal values, other than the date.
# """
# print "compare", i1, i2
# attribs = Indicators.get_attributes_no_fk()
# for k, v in attribs.iteritems():
# if k != "symbol" and k != "ev2ebitda" and k != "id":
# if getattr(i1, k) != getattr(i2, k):
# return False
#
# return True
def __eq__(self, other):
"""
Check if an indicator has equal values, other than the date.
"""
attribs = Indicators.get_attributes_no_fk()
for k, v in attribs.iteritems():
if k != "symbol" and k != "ev2ebitda":
if getattr(self, k) != getattr(other, k):
return False
return True
@staticmethod
def from_json(json_indicators):
"""
Args:
json_indicators: name, symbol, **attributes
If company does not exist, must provide a name and symbol to create it.
Returns:
An Indicator object, with sanitized values
"""
symbol = json_indicators.get('symbol')
if not symbol:
current_app.logger.debug("Indicator's symbol not found.")
return None
indicators = Indicators()
# Get company if it exists, otherwise create it
if not Company.query.filter_by(symbol=symbol).first():
name = json_indicators.get('name') or get_name_from_symbol(symbol)
if not name:
current_app.logger.debug("Company '{}' does not exist.".format(symbol))
return None
company = Company(symbol=symbol, name=name)
db.session.add(company)
db.session.commit()
else:
company = Company.query.filter_by(symbol=symbol).first()
# Go through each key and assign it, with some exceptions
for key in json_indicators.keys():
if key.find(".") == -1 and \
key != 'name' and \
key != 'symbol' and \
key != "company_id" and \
key != "id":
value = float_or_none(json_indicators.get(key))
column = getattr(Indicators, key)
if value:
print "setting value ", value
setattr(indicators, key, value)
else:
print "value not set"
# If we didn't get the correct value type for a float, use a placeholder
if isinstance(column.type, Float):
print "didn't get a float for", column, key, value
setattr(indicators, key, -999999999999.99)
else:
setattr(indicators, key, json_indicators.get(key))
indicators.company = company
print "Indicators exiting", indicators, indicators.ev2ebitda
return indicators
@staticmethod
def last_indicator_date():
try:
return db.session.query(Indicators.date).order_by(desc("date")).distinct().limit(2).all()[0].date
except IndexError:
return None
def last_indicator_date_by_company(self, index=0, limit=2):
"""
Args:
index: date to return (0 is the last date, 1 second to last, etc. must be less than limit
limit: maximum number of records to return
Returns:
A date, if it exists
"""
try:
val = Indicators.query.join(Company).filter(Company.symbol == self.company.symbol).with_entities(Indicators.date).order_by(desc("date")).limit(limit).all()[index].date
return val
except AttributeError:
# indicator has no associated company?
current_app.logger.debug("No company associated with Indicator?")
return None
except IndexError:
current_app.logger.debug("Index error looking up Indicator")
return None
except IntegrityError as e:
current_app.logger.debug("Integrity Error {}".format(e))
print "roll it back!"
db.session.rollback()
return False
return None
def to_json(self):
indicators = {}
for k,v in self.get_attributes_no_fk().iteritems():
if k == "symbol":
indicators[k] = self.company.symbol
else:
indicators[k] = getattr(self, k)
indicators['id'] = self.id
indicators['date'] = self.date.isoformat()
return indicators
@staticmethod
def load_json(data):
indicators = json.loads(data).get('indicators')
for indicator in indicators:
i = Indicators.from_json(indicator)
if i:
db.session.add(i)
else:
continue
# db.session.add(i)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __repr__(self):
return "<{cls}|Symbol: {symbol}, Date: {date}>".format(cls=self.__class__,
symbol=self.company.symbol,
date=self.date)
#class Sector(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String(50), unique=True, nullable=False)
# siccode = db.Column(db.Integer, unique=True, nullable=False)
#
# def __repr__(self):
# return "<{cls}|Sector: {name}, SIC code: {siccode}>".format(cls=self.__class__, name=name, siccode=siccode)
#
#
#class Industry(db.Model):
|
[
"Kyle.Flavin@gmail.com"
] |
Kyle.Flavin@gmail.com
|
fa064ee2e57c586c0511f9e1c6c95c722e976f1f
|
c2531698ec9469613a392ed0b3252a7386d06f75
|
/python/raindrops/raindrops.py
|
ebda893e87cf32344e8ad9bb80850a6a88963539
|
[] |
no_license
|
KlimDos/exercism_traning
|
058eb36c90499c7fbf76c5b11dcdf0500dfe3475
|
22c3b1e8c3e5d25f96840cc6283eaf20ddfaeee4
|
refs/heads/master
| 2022-12-15T18:58:55.566046
| 2020-07-08T19:24:37
| 2020-07-08T19:24:37
| 161,189,724
| 0
| 0
| null | 2022-12-11T07:41:40
| 2018-12-10T14:45:16
|
Python
|
UTF-8
|
Python
| false
| false
| 237
|
py
|
def convert(number: int):
result = ""
if number % 3 == 0:
result += "Pling"
if number % 5 == 0:
result += "Plang"
if number % 7 == 0:
result += "Plong"
return result if result else str(number)
|
[
"aalimov@wiley.com"
] |
aalimov@wiley.com
|
ea7399fbabd16da51234a2ea1af4e28de0718045
|
5a357e80a49438e68f8a0d6497864e0616d39a0f
|
/mac/google-cloud-sdk/lib/surface/compute/networks/peerings/list_routes.py
|
e025b21fd3d77b96ff6b6678ef664fccd2e8146a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bopopescu/cndw
|
2dc443644a69ee6ef132414916bd14fe5cd7e60d
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
refs/heads/master
| 2022-11-23T16:11:46.077619
| 2019-12-16T04:51:01
| 2019-12-16T04:51:01
| 282,462,882
| 0
| 0
|
Apache-2.0
| 2020-07-25T14:42:23
| 2020-07-25T14:42:23
| null |
UTF-8
|
Python
| false
| false
| 4,161
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing internal IP addresses in a network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
from googlecloudsdk.core.resource import resource_projector
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class ListRoutes(base.ListCommand):
"""List received or advertised routes for a VPC network peering."""
example = """\
List received routes for VPC network peering in us-central1:
$ {command} peering-name \
--network=network-name --region=us-central1 --direction=INCOMING
"""
detailed_help = {
'brief':
'List received or advertised routes for a VPC network peering.',
'DESCRIPTION':
"""\
*{command}* is used to list received or advertised routes for a VPC
network peering. This includes subnetwork routes, static custom routes,
and dynamic custom routes.
""",
'EXAMPLES':
example
}
@staticmethod
def Args(parser):
parser.add_argument('name', help='Name of the peering to list routes for.')
parser.add_argument(
'--network', required=True, help='Network of the peering.')
parser.add_argument(
'--region', required=True, help='Region to list the routes for.')
parser.add_argument(
'--direction',
required=True,
choices={
'INCOMING': 'To list received routes.',
'OUTGOING': 'To list advertised routes.',
},
type=lambda x: x.upper(),
help="""\
Direction of the routes to list. To list received routes, use
`INCOMING`. To list advertised routes, use `OUTGOING`.
""")
parser.display_info.AddFormat("""\
table(
dest_range,
type,
next_hop_region,
priority,
status)
""")
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = client.MESSAGES_MODULE
project = properties.VALUES.core.project.Get(required=True)
list_request = messages.ComputeNetworksListPeeringRoutesRequest
request = list_request(
project=project,
network=args.network,
peeringName=args.name,
region=args.region)
directions = list_request.DirectionValueValuesEnum
if args.direction == 'INCOMING':
request.direction = directions.INCOMING
else:
request.direction = directions.OUTGOING
items = list_pager.YieldFromList(
client.networks,
request,
method='ListPeeringRoutes',
field='items',
limit=args.limit,
batch_size=None)
def _TransformStatus(direction, imported):
"""Create customized status field based on direction and imported."""
if imported:
if direction == 'INCOMING':
return 'accepted'
else:
return 'accepted by peer'
else:
if direction == 'INCOMING':
return 'rejected by config'
else:
return 'rejected by peer config'
for item in items:
route = resource_projector.MakeSerializable(item)
# Set "status" to "Imported" or "Imported by peer" based on direction.
route['status'] = _TransformStatus(args.direction, route['imported'])
yield route
|
[
"raphael.carrier@gmail.com"
] |
raphael.carrier@gmail.com
|
43cf537caaa7f052df58fdc82cf6db71f6b4dfa8
|
4ed3a2d59267a8c5acae1364e786856f0fdc12c6
|
/app.py
|
b9b00d4382cb185e3cec188f3674db8be489ebea
|
[] |
no_license
|
brunnoaraujo/folhinha
|
5033ffc1dd079e648de36c63d10122a944b049d7
|
036937862d6e64754bd12d8300b2e90558030b3d
|
refs/heads/master
| 2021-01-13T10:22:14.988715
| 2016-10-31T04:22:56
| 2016-10-31T04:22:56
| 72,214,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
from flask import Flask, request, g, render_template
import datetime
import time
import sqlite3
import json
app = Flask(__name__)
lista = {}
@app.route("/" , methods=['GET', 'POST'])
def index():
global lista
te = datetime.datetime.utcnow()
t = time.mktime(te.timetuple())
temp = request.values.get("temp")
ldr = request.values.get("ldr")
conn = sqlite3.connect('sample.db')
c = conn.cursor()
c.execute("INSERT into dados (temp, ldr, hora) VALUES (?, ?, ?)", (temp, ldr, t))
conn.commit()
conn.close()
lista = {'temperatura': temp, 'ldr': ldr, 'hora': t}
print(ldr)
print(temp)
print(t)
return "ok"
@app.route("/value" , methods=['GET', 'POST'])
def value():
data = [lista['hora'], lista['temperatura']]
return json.dumps(data).replace('"', '')
@app.route("/graph")
def graph():
return render_template('chart.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9090, debug=True)
|
[
"brunnobaraujo@gmail.com"
] |
brunnobaraujo@gmail.com
|
8a790c71e531e5c37c1d97879cb3ef664a329ffd
|
952dbac03b90b23a2f56e7f43ca1fc7a2df31555
|
/.buildkite/images/docker/test_project/test_pipelines/test_pipelines/schedules.py
|
8e8504611192372750f1ec773a939cb4fce48130
|
[
"Apache-2.0"
] |
permissive
|
zzztimbo/dagster
|
2b5c5413d16d4ca726259ed0b9f1e48648f5f7ec
|
5cf8f159183a80d2364e05bb30362e2798a7af37
|
refs/heads/master
| 2020-12-23T07:40:27.230870
| 2020-03-28T19:35:56
| 2020-03-30T22:34:47
| 251,444,191
| 1
| 0
|
Apache-2.0
| 2020-03-30T22:37:49
| 2020-03-30T22:37:49
| null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
import datetime
from dagster import schedules
from dagster.core.definitions.decorators import daily_schedule
from .repo import optional_outputs
@daily_schedule(
pipeline_name=optional_outputs.name, start_date=datetime.datetime(2020, 1, 1),
)
def daily_optional_outputs(_date):
return {}
@schedules
def define_schedules():
return [daily_optional_outputs]
|
[
"nate@elementl.com"
] |
nate@elementl.com
|
e5905f69989260dab29aaa542a08875f02775413
|
88c67aed0f059523f545053286c92ed78f82227c
|
/lib/config.py
|
4c9e728449b76c911f8680937a0bba294de824f8
|
[] |
no_license
|
dravix/pyventa
|
bcc173342d3880fff4a77eb22f115447e6a2f744
|
2080925db7198ce9e799863c261671cef37b05d0
|
refs/heads/master
| 2021-01-01T18:38:11.089306
| 2018-07-17T22:59:59
| 2018-07-17T22:59:59
| 7,473,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,921
|
py
|
# -*- coding: utf-8 -*-
import sys,os, base64, datetime, tarfile, ftplib
from os import listdir
from os.path import isfile, join, expanduser, exists, basename
from PyQt4 import QtCore, QtGui
from ui.ui_config import Ui_Form
from lib.utileria import Respaldo, editorSimple
from lib.librerias.configurador import Configurador
import MySQLdb
import ConfigParser
from ui.ui_editor_ticket import Ui_Dialog as Editor
from lib.buscador_pop import buscadorPop
if sys.platform == 'linux2':
import cups
class Configs(QtGui.QDialog, Ui_Form):
def __init__(self,parent,id=0):
QtGui.QDialog.__init__(self)
self.setupUi(self)
self.stack.setCurrentIndex(0)
self.curser=parent.curser
self.cursor=parent.cursor
self.datos={'nombre':"Configurador",'descripcion':"Configura el funcionamiento del programa, ademas de guardar las personalizaciones .",'version':"0.05",'id':id,'nivel':2}
self.id=id
self.parent=parent
self.action = QtGui.QAction(self)
self.action.setObjectName(self.datos['nombre']+str(id))
self.action.setToolTip("Configuraciones globales de este punto de venta.")
#self.action.setShortcut("F4")
#self.action.setShortcut(QtGui.QApplication.translate("Principal", "F4", None, QtGui.QApplication.UnicodeUTF8))
icono = QtGui.QIcon()
icono.addPixmap(QtGui.QPixmap(":/modulos/images/png/elegant/config.png"), 0, QtGui.QIcon.Off)
self.icono=":/modulos/images/png/elegant/config.png"
icono.addPixmap(QtGui.QPixmap(self.icono), 2, QtGui.QIcon.Off)
self.action.setIcon(icono)
self.action.setIconVisibleInMenu(True)
self.action.setText(self.datos['nombre'])
#self.connect(self.action, QtCore.SIGNAL("triggered()"), lambda: parent.stackMove(self.id) )
self.connect(self.action, QtCore.SIGNAL("triggered()"), self.launch )
#self.connect(self.benter, QtCore.SIGNAL("clicked()"), lambda: self.stack.setCurrentIndex(1) )
#self.connect(self.bdata, QtCore.SIGNAL("clicked()"), lambda: self.stack.setCurrentIndex(0) )
#self.connect(self.bpyventa, QtCore.SIGNAL("clicked()"), lambda: self.stack.setCurrentIndex(2) )
#self.connect(self.tbFormas, QtCore.SIGNAL("clicked()"), lambda: self.stack.setCurrentIndex(3) )
#self.connect(self.bRespaldo, QtCore.SIGNAL("clicked()"), lambda: self.stack.setCurrentIndex(4) )
self.connect(self.cbPath, QtCore.SIGNAL("clicked()"), self.cambiarFolderFacturas )
self.connect(self.ctPrinter, QtCore.SIGNAL("clicked()"), self.cambiarImpresoraTickets )
self.connect(self.pbEditar, QtCore.SIGNAL("clicked()"), self.editar )
#self.connect(parent.stack, QtCore.SIGNAL("currentChanged(int)"),lambda: parent.aut(self.id,2) )
#self.connect(self.tablaImpuestos, QtCore.SIGNAL("currentItemChanged(QTableWidgetItem*,QTableWidgetItem*)"), self.cambiarImp)
#self.connect(parent.stack, QtCore.SIGNAL("currentChanged(int)"), self.inicia)
self.connect(self.rlBackup, QtCore.SIGNAL("clicked()"), self.respaldarLocal)
self.connect(self.cpgrespaldo, QtCore.SIGNAL("clicked()"), self.respaldarRemoto)
self.connect(self.rsServer, QtCore.SIGNAL("editingFinished ()"), lambda: self.setCambio('respaldo','server',self.rsServer.text()))
self.connect(self.rsUser, QtCore.SIGNAL("editingFinished ()"), lambda: self.setCambio('respaldo','user',self.rsUser.text()))
self.connect(self.rsPass, QtCore.SIGNAL("editingFinished ()"), lambda: self.setCambio('respaldo','pass',self.rsPass.text()))
self.connect(self.rsPath, QtCore.SIGNAL("editingFinished ()"), lambda: self.setCambio('respaldo','rpath',self.rsPath.text()))
self.connect(self.rlPeriod, QtCore.SIGNAL("valueChanged ( int )"), lambda: self.setCambio('respaldo','autolocal',self.rlPeriod.value()))
self.connect(self.rsPeriod, QtCore.SIGNAL("valueChanged ( int )"), lambda: self.setCambio('respaldo','autoremoto',self.rsPeriod.value()))
self.connect(self.bprobar, QtCore.SIGNAL("clicked()"), self.conexion )
self.connect(self.bset, QtCore.SIGNAL("clicked()"), self.setDB )
self.connect(self.bclose, QtCore.SIGNAL("clicked()"), self.close )
self.connect(self.bcreate, QtCore.SIGNAL("clicked()"), self.crearDB )
self.connect(self.brecargar, QtCore.SIGNAL("clicked()"),lambda: self.recargar('empresa') )
self.connect(self.cbEstilos,QtCore.SIGNAL("activated(const QString)"),self.cambiarEstilo)
self.connect(self.cbPrinters,QtCore.SIGNAL("activated(const QString)"),self.setPrinter)
self.connect(self.cbDrivers,QtCore.SIGNAL("activated(const QString)"),self.setDriver)
self.connect(self.clrExplorar,QtCore.SIGNAL("clicked()"),self.cambiarFolderRespaldo)
self.connect(self.cbpreview,QtCore.SIGNAL("clicked()"),self.editarTicket)
self.connect(self.pbfEditar,QtCore.SIGNAL("clicked()"),self.editarFactura)
self.connect(self.pbpEditar_,QtCore.SIGNAL("clicked()"),self.editarPresupuesto)
self.connect(self.pbEditarCorte,QtCore.SIGNAL("clicked()"),self.editarCorte)
self.connect(self.rlRestore,QtCore.SIGNAL("clicked()"),self.restaurar)
self.connect(self.rlRestoreDB,QtCore.SIGNAL("clicked()"),lambda:self.restaurar(True,False))
self.connect(self.rlRestoreConf,QtCore.SIGNAL("clicked()"),lambda:self.restaurar(False,True))
self.connect(self.sbCaja,QtCore.SIGNAL("editingFinished ()"),lambda: self.setCambio('pyventa','caja',self.sbCaja.value()))
self.connect(self.tbBuscarCaja,QtCore.SIGNAL("clicked()"),self.buscador)
self.connect(self.tbLogo,QtCore.SIGNAL("clicked()"),self.cambiarLogo)
self.connect(self.tbRecargarEstilo,QtCore.SIGNAL("clicked()"),self.parent.iniciarEstilo)
self.connect(self.chbRecibePagos,QtCore.SIGNAL("stateChanged ( int )"),self.setRecibePagos)
self.connect(self.chbImprimirCopia,QtCore.SIGNAL("stateChanged ( int )"),self.setImprimeCopiaRecibo)
self.connect(self.chbImprimirTicket,QtCore.SIGNAL("stateChanged ( int )"),self.setImprimeTicket)
self.connect(self.dsbTicketTigger,QtCore.SIGNAL("valueChanged ( float )"),self.setTicketTrigger)
self.connect(self.dsbCopia,QtCore.SIGNAL("valueChanged ( float )"),self.setCopiaTrigger)
#self.connect(self.gbTickets,QtCore.SIGNAL("clicked()"),lambda: self.setCambio('ticket','default',self.boolint(self.gbTickets.isChecked())))
#self.connect(self.gbFacturas,QtCore.SIGNAL("clicked()"),lambda: self.setCambio('facturas','default',self.boolint(self.gbFacturas.isChecked()))) #self.connect(self.bfSave, QtCore.SIGNAL("clicked()"), self.cambiarFolderFacturas )
self.mysql={'host':'','user':'','pass':'','db':'tpv'}
self.mysql['db']='tpv'
self.ruta=join(self.parent.home,"config.cfg")
#self.cfg = ConfigParser.ConfigParser()
self.modulos={'empresa':{},'respaldo':{},'mysql':{},'ticket':{},'factura':{},'nota':{},'pyventa':{}}
self.inicia()
self.checkRespaldo()
self.setupMenus()
#self.listarImp()
def launch(self):
if self.parent.aut(self.datos['nivel'])>0:
self.show()
self.activateWindow ()
def inicia(self):
self.kfg=self.parent.cfg
if (self.kfg.cfg!=None):
self.cfg=self.kfg
mysql=['host','user','pass','db']
for key in mysql:
if self.cfg.has_option("mysql", key):
self.mysql[key]=self.kfg.getDato('mysql',key)
self.modulos['empresa']['nombre']=self.lenombre
self.modulos['empresa']['rfc']=self.lerfc
self.modulos['empresa']['slogan']=self.leslogan
self.modulos['empresa']['direccion']=self.ledir
self.modulos['empresa']['ciudad']=self.leciudad
self.modulos['empresa']['estado']=self.leestado
self.modulos['empresa']['cp']=self.lecp
self.modulos['empresa']['email']=self.lemail
self.modulos['empresa']['telefono']=self.letel
self.modulos['empresa']['pagina']=self.leweb
self.modulos['empresa']['logo']=self.leLogo
self.lblLogo.setPixmap(QtGui.QPixmap(self.kfg.getDato('empresa','logo')))
self.modulos['mysql']['host']=self.tserver
self.modulos['mysql']['user']=self.tuser
self.modulos['mysql']['pass']=self.tpass
self.modulos['mysql']['db']=self.tdb
self.modulos['respaldo']['lpath']=self.rlPath
self.modulos['respaldo']['server']=self.rsServer
self.modulos['respaldo']['user']=self.rsUser
self.modulos['respaldo']['pass']=self.rsPass
self.modulos['respaldo']['rpath']=self.rsPath
remoto=self.kfg.getDato('respaldo','remoto')
local=self.kfg.getDato('respaldo','local')
#if (remoto!=1):
#self.gbRemoto.setChecked(False)
#if local!=1:
#self.gbLocal.setChecked(False)
for modulo in self.modulos:
for key in self.modulos[modulo]:
try:
self.modulos[modulo][key].setText(self.kfg.getDato(modulo,key))
except:
pass
self.sbCaja.setValue(float(self.kfg.getDato("pyventa","caja")))
self.chbRecibePagos.setCheckState(int(self.kfg.getDato("pyventa","cobra")))
#self.gbTickets.setChecked(bool(int(self.kfg.getDato("ticket","default"))))
#self.gbFacturas.setChecked(bool(int(self.kfg.getDato("factura","default"))))
self.gbBox.setChecked(bool(int(self.kfg.getDato("pyventa","caja"))))
for files in os.walk(join(self.parent.home,"estilos")):
for i,name in enumerate(files[2]):
tipo=name.split('.')
if tipo[1]=='css':
self.cbEstilos.addItem(str(name))
#----Impresiones
self.cbPath.setText(self.kfg.getDato("factura","ruta"))
try:
conn = cups.Connection ()
printers = conn.getPrinters()
self.cbPrinters.addItems([str(p) for p in conn.getPrinters ()])
self.cbPrinters.setCurrentIndex(self.cbPrinters.findText(self.kfg.getDato("ticket","impresora")))
except:
self.cbPrinters.addItem("Predeterminada")
self.dsbCopia.setValue(float(self.kfg.getDato("ticket","copia-trigger")))
self.dsbTicketTigger.setValue(float(self.kfg.getDato("ticket","trigger")))
self.chbImprimirCopia.setCheckState(int(self.kfg.getDato("ticket","copia")))
self.chbImprimirTicket.setCheckState(int(self.kfg.getDato("ticket","default")))
driverpath=join(self.parent.home,'drivers')
self.cbDrivers.addItems([ f[0:-3] for f in os.listdir(driverpath) if isfile(join(driverpath,f)) and f[-1]=='y' ])
self.cbDrivers.setCurrentIndex(self.cbDrivers.findText(self.kfg.getDato("ticket","driver")))
def setupMenus(self):
respaldos=self.parent.menuPyventa.addMenu("Respaldos")
respaldos.addAction("Generar respaldo",self.respaldarLocal)
respaldos.addAction("Restaurar todo",lambda:self.restaurar(True,True))
respaldos.addSeparator()
respaldos.addAction("Restaurar base de datos",lambda:self.restaurar(True,False))
respaldos.addAction("Restaurar configuraciones",lambda:self.restaurar(False,True))
self.parent.menuHerramientas.addAction("Configuraciones",self.launch)
def cambiarLogo(self):
File = QtGui.QFileDialog()
saveFile = str(File.getOpenFileName(self, "Seleccione la imagen",expanduser("~"),self.tr("Imagenes (*.png *.jpg *.jpeg)")))
if (saveFile!=""):
self.lblLogo.setPixmap(QtGui.QPixmap(saveFile))
self.leLogo.setText(saveFile)
self.setCambio('empresa','logo',saveFile)
def recargar(self,modulo):
for key in self.modulos[modulo]:
try:
print self.modulos[modulo][key].text()
self.cfg.set(modulo,key,str(self.modulos[modulo][key].text()))
except:
pass
self.cfg.guardar()
self.kfg=Configurador()
msgBox=QtGui.QMessageBox()
msgBox.setText("Se han guardado las configuraciones")
msgBox.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
msgBox.exec_()
def setCambio(self,modulo,propiedad,valor):
try:
self.cfg.set(str(modulo),str(propiedad),str(valor))
except ConfigParser.Error,e:
print "({0},{1},{2}), No se guardo la configuracion".format(modulo,propiedad,valor),e
else:
#self.cfg.guardar()
self.parent.cfg=self.cfg
def setDB(self):
if self.cfg!=None:
pass
else:
self.cfg.add_section('mysql')
self.cfg.set('mysql','host',self.mysql['host'])
self.cfg.set('mysql','user',self.mysql['user'])
self.cfg.set('mysql','pass',base64.b64encode(self.mysql['pass']))
self.cfg.set('mysql','db',self.mysql['db'])
self.cfg.guardar()
self.parent.conexion()
self.display.setText("<h1>Se ha guardado correctamente.</h1><p>Su conexion ha sido guardada y esta lista para su uso. </p>")
msgBox=QtGui.QMessageBox()
msgBox.setText("Se ha establecido la base de datos.")
msgBox.setInformativeText("Desea usted empezar a trabajar con este punto venta? <br> <i>Si desea continuar configurando pulse cancelar<i>")
msgBox.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
ret=msgBox.exec_()
if ret==QtGui.QMessageBox.Ok:
self.parent.insert()
def crearDB(self):
fi=open('./perfil/db.sql')
tpv=fi.read()
self.mysql['host']=str(self.tserver.text())
self.mysql['user']=str(self.tuser.text())
self.mysql['pass']=str(self.tpass.text())
db = MySQLdb.connect(self.mysql['host'], self.mysql['user'], self.mysql['pass'])
stout=db.query(tpv)
self.display.setText('<p>'+str(stout)+'</p>')
def conexion(self):
host=str(self.tserver.text())
user=str(self.tuser.text())
password=str(self.tpass.text())
db=str(self.tdb.text())
try:
db = MySQLdb.connect(host, user, password,db)
except MySQLdb.Error, e:
if (e.args[0]==1049):
self.bcreate.setEnabled(True)
self.display.setText('<h1>No se encontro ninguna base de datos en el servidor.</h1>\
</br> <ol><li>Puede crear una pulsando el boton de CREAR BASE DE DATOS</li>\
<li>Puede buscar en otro servidor</li></ol>')
if (e.args[0]==1045):
self.display.setText('<h1>Acceso denegado, Usuario y/o contrasena incorrecta.</h1>\
</br> <ol><li>Es posible que solo tenga que intentar con otra contrasena</li>\
<li>O que la contrasena no sea para este usuario</li></ol>')
if (e.args[0]==2005):
self.display.setText('<h1>No se encontro al servidor.</h1>\
</br> <ol><li>Pruebe cambiando el nombre del servidor, puede ser IP, o su nombre DNS en la red local</li>\
<li>Averigue si el servidor esta disponible en red</li></ol>')
else:
self.bset.setEnabled(True)
self.display.setText('<h1>Conectado.</h1><p>Guarde esta configuracion para que Pyventa se conecte usando esta base de datos</p>')
self.mysql['host']=str(self.tserver.text())
self.mysql['user']=str(self.tuser.text())
self.mysql['pass']=str(self.tpass.text())
self.mysql['db']=str(self.tdb.text())
def explorer(self):
File = QtGui.QFileDialog()
return File.getExistingDirectory(self, "Escoga un directorio.",expanduser('~'))
def listarImp(self):
head=('Nombre','Porciento')
col='`'
col+='`,`'.join(head)
col+='`'
sql="SELECT "+col+" FROM impuestos; "
self.parent.cursor.execute(sql)
result = self.parent.cursor.fetchall()
self.tablaImpuestos.setColumnCount(len(head))
self.tablaImpuestos.setRowCount(len(result))
for i,data in enumerate(head):
item = QTableWidgetItem(1)
item.setText(str(data))
self.tablaImpuestos.setHorizontalHeaderItem(i,item)
for i,elem in enumerate(result):
for j,data in enumerate(elem):
item = QTableWidgetItem(1)
item.setText(str(data))
self.tablaImpuestos.setItem(i,j,item)
self.tablaImpuestos.resizeColumnsToContents()
def cambiarImpresoraTickets(self):
printer=QtGui.QPrinter()
dlg=QtGui.QPrintDialog(printer, self)
if dlg.exec_()==QtGui.QDialog.Accepted:
self.setCambio("ticket","impresora",str(printer.printerName()))
def cambiarFolderFacturas(self):
folder=self.explorer()
self.setCambio("factura","ruta",folder)
self.cbPath.setText(folder)
def cambiarEstilo(self,index):
self.setCambio("pyventa","estilo",index)
kcss = open("%s/estilos/%s"%(self.parent.home,index),"r")
styname=index.split('.')[0]
if exists("/usr/share/pyventa/images/png/%s"%styname):
self.setCambio("pyventa","resolucion",styname)
estilo=kcss.read()
self.parent.setStyleSheet(estilo)
kcss.close()
def cambiarFolderRespaldo(self):
folder=self.explorer()
self.setCambio("respaldo","lpath",folder)
self.rlPath.setText(folder)
def respaldarLocal(self):
if self.parent.aut(2)>0:
RES=Respaldo()
out=RES.respaldarLocal()
msgBox=QtGui.QMessageBox(QtGui.QMessageBox.Information,"El respaldo fue generado.","Se ha creado el respaldo %s."%out,QtGui.QMessageBox.Close,self)
msgBox.exec_()
return out
def respaldarRemoto(self):
ftp_servidor = self.kfg.getDato('respaldo','server')
ftp_usuario = self.kfg.getDato('respaldo','user')
ftp_clave = self.kfg.getDato('respaldo','pass')
ftp_raiz = self.kfg.getDato('respaldo','rpath')
fichero_origen = self.respaldarLocal() # Ruta al fichero que vamos a subir
fichero_destino = basename(fichero_origen) # Nombre que tendrá el fichero en el servidor
# Conectamos con el servidor
try:
s = ftplib.FTP(ftp_servidor, ftp_usuario, ftp_clave)
try:
f = open(fichero_origen, 'rb')
s.cwd(ftp_raiz)
s.storbinary('STOR ' + fichero_destino, f)
f.close()
s.quit()
except:
print "No se ha podido encontrar el fichero " + fichero_origen
except:
print "No se ha podido conectar al servidor " + ftp_servidor
def checkRespaldo(self):
hoy=datetime.date.today()
self.dia=int(hoy.strftime("%d"))
if exists(join(str(self.kfg.getDato('respaldo','lpath')),"respaldo_"+self.kfg.getDato("empresa","nombre")+"-pyventa_"+str(hoy.strftime("%d-%m-%Y"))+".tar.bz2"))==False:
autolocal=int(self.kfg.getDato("respaldo","autolocal"))
autoremoto=int(self.kfg.getDato("respaldo","autoremoto"))
if autoremoto!=0 :
if self.dia%autoremoto==0:
self.respaldarRemoto()
elif autolocal!=0 :
if self.dia%autolocal==0:
self.respaldarLocal()
def restaurar(self,database=True,config=True):
File = QtGui.QFileDialog()
saveFile = File.getOpenFileName(self, "Escoga el archivo de respaldo",self.kfg.getDato('respaldo','lpath'),self.tr("Respaldos (*.tar.gz *.tar.bz2 *.zip)"))
if (saveFile!=""):
rs=Respaldo()
self.setCursor(QtGui.QCursor(3))
if rs.restaurar(saveFile,database,config):
self.setCursor(QtGui.QCursor(0))
msgBox=QtGui.QMessageBox(QtGui.QMessageBox.Information,"El respaldo ha sido restaurado.","La base de datos ha sido restaurada, todos los cambios hechos desde la fecha del respaldo, han sido eliminados.",QtGui.QMessageBox.Close,self)
msgBox.exec_()
def editarTicket(self):
editor=editorSimple(self.parent,join(self.parent.home,"ticket.xml"))
editor.exec_()
self.cfg.recargar()
def editarFactura(self):
editor=editorSimple(self.parent,join(self.parent.home,"formas","factura.cfg"))
editor.exec_()
self.cfg.recargar()
def editarPresupuesto(self):
editor=editorSimple(self.parent,join(self.parent.home,"formas","presupuesto.xml"))
editor.exec_()
self.cfg.recargar()
def editar(self):
editor=editorSimple(self.parent,join(self.parent.home,"config.cfg"))
editor.exec_()
self.cfg.recargar()
def editarCorte(self):
editor=editorSimple(self.parent,join(self.parent.home,"corte.xml"))
editor.exec_()
self.cfg.recargar()
def buscador(self):
sql="SELECT num_caja, caja, maquina from cajas;"
app=buscadorPop(self,'',1,['Num_caja','Nombre','Maquina'],'cajas')
#Proceso padre, texto a buscar, numero de columna, arreglo de columnas, tabla sql, seleccion multiple bool
ret=app.exec_()
if ret>0:
caja=app.selected()
if len(caja)>0 and len(caja[0])>0:
self.sbCaja.setValue(int(caja[0][0]))
self.setCambio('pyventa','caja',self.sbCaja.value())
#print app.getFilas()
#====SETTERS===
def setId(self,ide):
self.id=ide
self.datos['id']=ide
def setNivel(self,nivel):
self.datos['nivel']=nivel
def setRecibePagos(self,bo):
self.setCambio('pyventa','cobra',bo)
def setPrinter(self,st):
self.setCambio('ticket','impresora',st)
def setDriver(self,st):
self.setCambio('ticket','driver',st)
def setTicketTrigger(self,num):
self.setCambio('ticket','trigger',str(num))
def setImprimeCopiaRecibo(self,bo):
self.setCambio('ticket','copia',bo)
if bo:
self.setCambio('ticket','copia-trigger',str(self.dsbCopia.value()))
def setImprimeTicket(self,bo):
self.setCambio('ticket','default',bo)
if bo:
self.setCambio('ticket','trigger',str(self.dsbTicketTigger.value()))
def setTicketTrigger(self,val):
self.setCambio('ticket','trigger',str(self.dsbTicketTigger.value()))
def setCopiaTrigger(self, val):
self.setCambio('ticket','copia-trigger',str(self.dsbCopia.value()))
#====GETTERS====
def datos(self):
return self.datos
def boolint(self,boo=True):
#recibe un booleano y lo transforma en 1 o 0
if not boo: return 0
else: return 1
def closeEvent(self,event):
self.cfg.guardar()
if __name__=="__main__":
app = QtGui.QApplication(sys.argv)
aw = modulo(app,1)
aw.show()
sys.exit(app.exec_())
|
[
"dravix@gmail.com"
] |
dravix@gmail.com
|
35bab64ed3684cc4d00f55c9dd9187cd697362c4
|
d6274dbccbb0e414d23f177288d03c2573835ead
|
/pythontest/venv/bin/pytest
|
b44661daa0f68c0b0d0dc845bea49ef5a528e70b
|
[] |
no_license
|
MoTo-LaBo/Python-Guideline
|
79d24aa1326a167b76856c5d4c6a45ee55254388
|
cca5ce5b901c13ba4a3428168bffd192016259ba
|
refs/heads/main
| 2023-09-01T09:55:34.744971
| 2021-10-11T14:36:38
| 2021-10-11T14:36:38
| 374,849,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
#!/Users/moto/Dropbox/udemy/PythonLecture/pythontest/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pytest import console_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(console_main())
|
[
"moto.labo.desgin@gmail.com"
] |
moto.labo.desgin@gmail.com
|
|
c5d0fe317f2faff7cf4a4bf824c2bdf6f19d51fe
|
9ac42ea56826dfdb8a584dc6dedd2b181dc5cffa
|
/opl/transportation_CPLEX_C_C++_Python/transport.py
|
61f4a468dfd7c9826214e5a330cd519df095ce56
|
[] |
no_license
|
claudiosa/CCS
|
e8f731bf108d1221ae62017f82d088cb49d5c948
|
08bb0f32aba6a1a6b2ea605552d15f9cbc06d2be
|
refs/heads/master
| 2023-09-01T01:28:14.685489
| 2023-08-27T03:48:57
| 2023-08-27T03:48:57
| 44,081,079
| 41
| 23
| null | 2021-12-06T22:11:57
| 2015-10-12T03:03:43
|
TeX
|
UTF-8
|
Python
| false
| false
| 5,150
|
py
|
#!/usr/bin/python
# --------------------------------------------------------------------------
# File: examples/src/python/transport.py
# Version 12.9.0
# --------------------------------------------------------------------------
# Licensed Materials - Property of IBM
# 5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
# Copyright IBM Corporation 2008, 2019. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
# --------------------------------------------------------------------------
"""
Model piecewise linear cost coefficients.
The problem is a simple transportation model. Set the function argument
to 0 for a convex piecewise linear model and to 1 for a concave piecewise
linear model.
The user must choose the model type on the command line:
python transport.py 0
python transport.py 1
"""
from __future__ import print_function
import sys
import cplex
from cplex.exceptions import CplexError
def transport(convex):
supply = [1000.0, 850.0, 1250.0]
nbSupply = len(supply)
demand = [900.0, 1200.0, 600.0, 400.0]
nbDemand = len(demand)
n = nbSupply * nbDemand
if convex:
pwl_slope = [120.0, 80.0, 50.0]
else:
pwl_slope = [30.0, 80.0, 130.0]
def varindex(m, n):
return m * nbDemand + n
# The x coordinate of the last break point of pwl
k = 0
pwl_x = [[0.0] * 4] * n
pwl_y = [[0.0] * 4] * n
for i in range(nbSupply):
for j in range(nbDemand):
if supply[i] < demand[j]:
midval = supply[i]
else:
midval = demand[j]
pwl_x[k][1] = 200.0
pwl_x[k][2] = 400.0
pwl_x[k][3] = midval
pwl_y[k][1] = pwl_x[k][1] * pwl_slope[0]
pwl_y[k][2] = pwl_y[k][1] + \
pwl_slope[1] * (pwl_x[k][2] - pwl_x[k][1])
pwl_y[k][3] = pwl_y[k][2] + \
pwl_slope[2] * (pwl_x[k][3] - pwl_x[k][2])
k = k + 1
# Build model
model = cplex.Cplex()
model.set_problem_name("transport_py")
model.objective.set_sense(model.objective.sense.minimize)
# x(varindex(i, j)) is the amount that is shipped from supplier i to
# recipient j
colname_x = ["x{0}".format(i + 1) for i in range(n)]
model.variables.add(obj=[0.0] * n, lb=[0.0] * n,
ub=[cplex.infinity] * n, names=colname_x)
# y(varindex(i, j)) is used to model the PWL cost associated with
# this shipment.
colname_y = ["y{0}".format(j + 1) for j in range(n)]
model.variables.add(obj=[1.0] * n, lb=[0.0] * n,
ub=[cplex.infinity] * n, names=colname_y)
# Supply must meet demand
for i in range(nbSupply):
ind = [varindex(i, j) for j in range(nbDemand)]
val = [1.0] * nbDemand
row = [[ind, val]]
model.linear_constraints.add(lin_expr=row,
senses="E", rhs=[supply[i]])
# Demand must meet supply
for j in range(nbDemand):
ind = [varindex(i, j) for i in range(nbSupply)]
val = [1.0] * nbSupply
row = [[ind, val]]
model.linear_constraints.add(lin_expr=row,
senses="E", rhs=[demand[j]])
# Add the PWL constraints
for i in range(n):
# preslope is the slope before the first breakpoint. Since the
# first breakpoint is (0, 0) and the lower bound of y is 0, it is
# not meaningful here. To keep things simple, we re-use the
# first item in pwl_slope.
# Similarly, postslope is the slope after the last breakpoint.
# We just use the same slope as in the last segment; we re-use
# the last item in pwl_slope.
model.pwl_constraints.add(vary=n + i,
varx=i,
preslope=pwl_slope[0],
postslope=pwl_slope[-1],
breakx=pwl_x[i],
breaky=pwl_y[i],
name="p{0}".format(i + 1))
# solve model
model.solve()
model.write('transport_py.lp')
# Display solution
print()
print("Solution status :", model.solution.get_status())
print("Cost : {0:.2f}".format(
model.solution.get_objective_value()))
print()
print("Solution values:")
for i in range(nbSupply):
print(" {0}: ".format(i), end='')
for j in range(nbDemand):
print("{0:.2f}\t".format(
model.solution.get_values(varindex(i, j))),
end='')
print()
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Specify an argument to choose between convex and "
"concave problems")
print("Usage: python transport.py <model>")
print(" model = 0 -> convex piecewise linear model")
print(" model = 1 -> concave piecewise linear model")
sys.exit(-1)
convex = bool(int(sys.argv[1]))
transport(convex)
|
[
"claudio.sa@udesc.br"
] |
claudio.sa@udesc.br
|
a8e9b3a419e89b46c333a6c66a40139256b425cf
|
28957cec8b6f2a4e9c1f9e9ff794e40a3be44af0
|
/skk-gradient_method -0.py
|
2bae77e298c63fbe91dd8aaa5453b7adebcebe3e
|
[] |
no_license
|
chanmin07/SKK_py
|
892c2f85309177b2833f9d4f97944d3c4b712159
|
c075bfb9d2246d9510a7325e0263f1c9ee68e3fd
|
refs/heads/master
| 2023-09-03T14:45:18.868555
| 2021-10-21T00:25:06
| 2021-10-21T00:25:06
| 414,959,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
# coding: utf-8
import numpy as np
def numerical_gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x) # x와 형상이 같은 배열을 생성
for idx in range(x.size):
tmp_val = x[idx]
# f(x+h) 계산
x[idx] = float(tmp_val) + h
fxh1 = f(x)
# f(x-h) 계산
x[idx] = tmp_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 값 복원
return grad
def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x
for i in range(step_num):
grad = numerical_gradient(f, x)
x -= lr * grad
return x
def function_2(x):
return x[0]**2 + x[1]**2
init_x = np.array([-3.0, 4.0])
result=gradient_descent(function_2, init_x, lr=0.1, step_num=100)
print(result)
|
[
"cksals2589@naver.com"
] |
cksals2589@naver.com
|
66b6863e2c5b5b8478fe87707f387a7bf7465e05
|
cd49a02f2cd3b5fbce7afb035eaf82d613b6d1ca
|
/build/franka_visualization/catkin_generated/generate_cached_setup.py
|
86c2be0d38399384481e0502cb199a73ea072a3b
|
[] |
no_license
|
robwoidi/ws_ur10e_hand
|
f7f1f00f61e0378e893469b3574bc13c72b8beb1
|
96d49c2a925309e478d937bb84c417cf6a3f9f7d
|
refs/heads/master
| 2023-08-20T14:56:04.057416
| 2021-10-28T08:35:58
| 2021-10-28T08:35:58
| 397,485,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,372
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/noetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/noetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/woidi/ws_ur10e_hand/devel;/home/woidi/ws_moveit/devel;/opt/ros/noetic'.split(';'):
python_path = os.path.join(workspace, 'lib/python3/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/woidi/ws_ur10e_hand/devel/.private/franka_visualization/env.sh')
output_filename = '/home/woidi/ws_ur10e_hand/build/franka_visualization/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"woidi@t-online.de"
] |
woidi@t-online.de
|
836715d187fc3691f73986fa79e836102e97c9bf
|
1c390cd4fd3605046914767485b49a929198b470
|
/leetcode/stone-game-ii.py
|
13afc8289559c47c9c0592b37951de3d6573ad25
|
[] |
no_license
|
wwwwodddd/Zukunft
|
f87fe736b53506f69ab18db674311dd60de04a43
|
03ffffee9a76e99f6e00bba6dbae91abc6994a34
|
refs/heads/master
| 2023-01-24T06:14:35.691292
| 2023-01-21T15:42:32
| 2023-01-21T15:42:32
| 163,685,977
| 7
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
class Solution:
def stoneGameII(self, a: List[int]) -> int:
n = len(a)
f = [[-9**9 for j in range(n+1)]for i in range(n+1)]
for i in range(n + 1):
f[n][i] = 0
for i in range(n)[::-1]:
for j in range(1, n + 1):
s = 0
for k in range(i, min(n, i + 2 * j)):
s += a[k]
f[i][j] = max(f[i][j], s - f[k + 1][max(j, k - i + 1)])
return (sum(a) + f[0][1]) // 2
|
[
"wwwwodddd@gmail.com"
] |
wwwwodddd@gmail.com
|
00237c737bbd82f3c298d93497fbbacd3a708295
|
0cb8c0b66c3f514ffb028d91eb7c2c3e114ac102
|
/simpleFrameId/main.py
|
c39b8bc1fefc622948c1a50f442e826cb534b5d3
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] |
permissive
|
UKPLab/eacl2017-oodFrameNetSRL
|
5ec373e5976d08c8d28eaad8834796631cddc361
|
d30e23d724c911d001cc5ca8f28bdac86eee1ce4
|
refs/heads/master
| 2023-09-04T04:37:35.600024
| 2017-04-03T14:55:21
| 2017-04-03T14:55:21
| 78,657,081
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,207
|
py
|
from globals import *
from data import get_graphs
from extras import Lexicon, VSM
from representation import DependentsBowMapper, SentenceBowMapper, DummyMapper
from classifier import SharingDNNClassifier, DataMajorityBaseline, LexiconMajorityBaseline, WsabieClassifier
from evaluation import Score
from reporting import ReportManager
from config import Config
from resources import ResourceManager
import time
from numpy import random
HOME = "/home/local/UKP/martin/repos/frameID/" # adjust accordingly
if __name__ == "__main__":
random.seed(4) # fix the random seed
vsms = [EMBEDDINGS_LEVY_DEPS_300] # vector space model to use
lexicons = [LEXICON_FULL_BRACKETS_FIX] # lexicon to use (mind the all_unknown setting!)
multiword_averaging = [False] # treatment of multiword predicates, false - use head embedding, true - use avg
all_unknown = [False, True] # makes the lexicon treat all LU as unknown, corresponds to the no-lex setting
# WSABIE params
num_components = [1500]
max_sampled = [10] # maximum number of negative samples used during WARP fitting 'warp'
num_epochs = [500]
configs = []
for lexicon in lexicons:
for all_unk in all_unknown:
# DummyMapper doesn't do anything
configs += [Config(DataMajorityBaseline, DummyMapper, lexicon, None, False, all_unk, None, None, None)]
configs += [Config(LexiconMajorityBaseline, DummyMapper, lexicon, None, False, all_unk, None, None, None)]
# Add configurations for NN classifiers
for lexicon in lexicons:
for vsm in vsms:
for mwa in multiword_averaging:
for all_unk in all_unknown:
configs += [Config(SharingDNNClassifier, SentenceBowMapper, lexicon, vsm, mwa, all_unk, None, None, None)]
configs += [Config(SharingDNNClassifier, DependentsBowMapper, lexicon, vsm, mwa, all_unk, None, None, None)]
# Add configurations for WSABIE classifiers
for lexicon in lexicons:
for vsm in vsms:
for mwa in multiword_averaging:
for all_unk in all_unknown:
for num_comp in num_components:
for max_sampl in max_sampled:
for num_ep in num_epochs:
configs += [Config(WsabieClassifier, SentenceBowMapper, lexicon, vsm, mwa, all_unk, num_comp, max_sampl, num_ep)]
configs += [Config(WsabieClassifier, DependentsBowMapper, lexicon, vsm, mwa, all_unk, num_comp, max_sampl, num_ep)]
print "Starting resource manager"
sources = ResourceManager(HOME)
print "Initializing reporters"
reports = ReportManager(sources.out)
print "Running the experiments!"
runs = len(configs)*len(CORPORA_TRAIN)*len(CORPORA_TEST)
print len(configs), "configurations, ", len(CORPORA_TRAIN)*len(CORPORA_TEST), " train-test pairs -> ", \
runs, " runs"
current_train = 0
current_config = 0
current_test = 0
for corpus_train in CORPORA_TRAIN:
current_train += 1
current_config = 0
g_train = get_graphs(*sources.get_corpus(corpus_train))
reports.conll_reporter_train.report(g_train)
for conf in configs:
current_config += 1
start_time = time.time()
lexicon = Lexicon()
# go to configuration, check which lexicon is needed, locate the lexicon in FS, load the lexicon
lexicon.load_from_list(sources.get_lexicon(conf.get_lexicon()))
reports.lexicon_reporter.report(lexicon)
# same for VSM
vsm = VSM(sources.get_vsm(conf.get_vsm()))
mapper = conf.get_feat_extractor()(vsm, lexicon)
# prepare the data
X_train, y_train, lemmapos_train, gid_train = mapper.get_matrix(g_train)
# train the model
clf = conf.get_clf()(lexicon, conf.get_all_unknown(), conf.get_num_components(), conf.get_max_sampled(),
conf.get_num_epochs())
clf.train(X_train, y_train, lemmapos_train)
current_test = 0
for corpus_test in CORPORA_TEST:
score = Score() # storage for scores
score_v = Score() # storage for verb-only scores
score_known = Score() # storage for known lemma-only scores
start_time = time.time()
reports.set_config(conf, corpus_train, corpus_test)
current_test += 1
# prepare test data
g_test = get_graphs(*sources.get_corpus(corpus_test))
reports.conll_reporter_test.report(g_test)
X_test, y_test, lemmapos_test, gid_test = mapper.get_matrix(g_test)
# predict and compare
for x, y_true, lemmapos, gid, g in zip(X_test, y_test, lemmapos_test, gid_test, g_test):
y_predicted = clf.predict(x, lemmapos)
correct = y_true == y_predicted
score.consume(correct, lexicon.is_ambiguous(lemmapos), lexicon.is_unknown(lemmapos), y_true)
if lemmapos.endswith(".v"):
score_v.consume(correct, lexicon.is_ambiguous(lemmapos), lexicon.is_unknown(lemmapos), y_true)
if not lexicon.is_unknown(lemmapos):
score_known.consume(correct, lexicon.is_ambiguous(lemmapos), lexicon.is_unknown(lemmapos), y_true)
reports.result_reporter.report(gid, g, lemmapos, y_predicted, y_true, lexicon)
reports.summary_reporter.report(corpus_train, corpus_test, conf, score, time.time() - start_time)
reports.summary_reporter_v.report(corpus_train, corpus_test, conf, score_v, time.time() - start_time)
reports.summary_reporter_known.report(corpus_train, corpus_test, conf, score_known, time.time() - start_time)
print "============ STATUS: - train", current_train, "/", len(CORPORA_TRAIN), \
"conf", current_config, "/", len(configs),\
"test", current_test, "/", len(CORPORA_TEST)
|
[
"iokuznetsov@gmail.com"
] |
iokuznetsov@gmail.com
|
68d3953ebbdc68af663548b9cb935710c1d4e410
|
1f3f2778e82b49e722cfdec123261760129f1778
|
/python-T3H-baitap/Bai9-wxFormbuilder/baitap_7_5_menu.spec
|
a97bb33750641410cad6346bc8c9e9ca6b4c67df
|
[] |
no_license
|
chuongnh1709/Python-Baitap
|
3d720e53edd34b541f6e0946dc79397dbb4ae6b4
|
cb643b451f711842d16f0cb2e40a4f0abad5c5a5
|
refs/heads/master
| 2022-12-15T00:11:41.983217
| 2018-11-24T11:11:08
| 2018-11-24T11:11:08
| 158,926,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
spec
|
# -*- mode: python -*-
block_cipher = None
a = Analysis(['baitap_7_5_menu.py'],
pathex=['D:\\PYTHON\\Python-T3H\\ThucHanh\\Bai9-wxFormbuilder'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='baitap_7_5_menu',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='baitap_7_5_menu')
|
[
"chuong_ngn@yahoo.com"
] |
chuong_ngn@yahoo.com
|
a72af10701b785c3b85506a80fbf8c8a54581c94
|
334fe3f67d23028fe136d04815513b5cd214bffa
|
/home/migrations/0001_load_initial_data.py
|
ea31e35ea2075d1959a900809a0e92ff75d6ba16
|
[] |
no_license
|
crowdbotics-apps/chaindomains-27569
|
08b77f59ac96e3e388550ee09269009c0f9278a0
|
4940abad1aad9751a78a961ec18314b3847419bc
|
refs/heads/master
| 2023-05-02T16:41:53.135585
| 2021-05-29T19:37:43
| 2021-05-29T19:37:43
| 372,056,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "chaindomains-27569.botics.co"
site_params = {
"name": "ChainDomains",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
201ffed9384800043371543a3a0d1d1766fb3c67
|
eaee21263de64696dbfc10e8fb8dfdaad4777e2f
|
/iris-species-engine/marvin_iris_species_engine/data_handler/acquisitor_and_cleaner.py
|
f1c24604001f5ab92c17aea387f617f6e0640077
|
[] |
no_license
|
cctruc/engines
|
8d849113312cc4978243f7a37ae12669dfa3616d
|
50af53ef250b7f980ed7c11f3500b3b3c14bab78
|
refs/heads/master
| 2021-05-08T07:29:45.458697
| 2017-10-10T19:25:21
| 2017-10-10T19:25:21
| 106,857,050
| 0
| 1
| null | 2017-10-13T18:01:37
| 2017-10-13T18:01:37
| null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
#!/usr/bin/env python
# coding=utf-8
"""AcquisitorAndCleaner engine action.
Use this module to add the project main code.
"""
import pandas as pd
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.engine_base import EngineBaseDataHandler
from marvin_python_toolbox.common.data import MarvinData
__all__ = ['AcquisitorAndCleaner']
logger = get_logger('acquisitor_and_cleaner')
class AcquisitorAndCleaner(EngineBaseDataHandler):
def __init__(self, **kwargs):
super(AcquisitorAndCleaner, self).__init__(**kwargs)
def execute(self, **kwargs):
file_path = MarvinData.download_file(url=self.params["data_url"])
iris = pd.read_csv(file_path)
iris.drop('Id', axis=1, inplace=True)
self.initial_dataset = iris
|
[
"daniel.takabayashi@gmail.com"
] |
daniel.takabayashi@gmail.com
|
b6e58cdb991e28ed538b347209a111b84b668cf4
|
242f9594cf03345e79965fd0c6eff9f5ed3042a9
|
/chapter08 - Functions/exercise8.5_cities.py
|
d73684d842352a09ced167d84df4681a899fc6e7
|
[] |
no_license
|
Eqliphex/python-crash-course
|
f32c9cd864b89ac8d7b1ba0fe612a3a29081ed32
|
952ce3554129a37d0b6ff8a35757f3ddadc44895
|
refs/heads/master
| 2021-04-03T09:05:31.397913
| 2018-08-29T09:14:41
| 2018-08-29T09:14:41
| 124,416,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
def describe_city(city_name, city_country='europa'):
print(city_name.title() + " is in " + city_country.title())
describe_city('reykjavik', 'iceland')
describe_city('paris')
describe_city('copenhagen')
|
[
"tbt_paddik@hotmail.com"
] |
tbt_paddik@hotmail.com
|
acfcd4dcb8bb44c5ec44309b14b0f624ae7e5f75
|
0487c30d3d2a26ee62eb9e82c1b1e6edb7cb8b36
|
/tests/mclag/conftest.py
|
9f598fdbba64b9fc824d26f6ac91d4138c23757d
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
gord1306/sonic-mgmt
|
e4047cbcdb600591816215e765c7f30664cc4543
|
05094321ed58270ac06d1a0ef575a4ab9ea3ddd6
|
refs/heads/master
| 2022-12-17T08:05:58.944208
| 2022-06-06T02:34:48
| 2022-06-06T02:34:48
| 195,778,851
| 1
| 0
|
NOASSERTION
| 2019-07-08T09:21:07
| 2019-07-08T09:21:07
| null |
UTF-8
|
Python
| false
| false
| 7,879
|
py
|
import pytest
import ipaddress
import os
from collections import defaultdict
from natsort import natsorted
from tests.common.reboot import reboot
from mclag_helpers import get_dut_routes
from mclag_helpers import get_interconnected_links
from mclag_helpers import get_vm_links
from mclag_helpers import DUT1_INDEX, DUT2_INDEX
from mclag_helpers import PC_NAME_TEMPLATE, SUBNET_CHECK
from mclag_helpers import CONFIG_DB_TEMP, CONFIG_DB_BACKUP, MAX_MCLAG_INTF
from mclag_helpers import TEMPLATE_DIR, PTF_NN_AGENT_TEMPLATE
from mclag_helpers import DEFAULT_SESSION_TIMEOUT, NEW_SESSION_TIMEOUT
from mclag_helpers import MCLAG_DOMAINE_ID
from tests.common.ptf_agent_updater import PtfAgentUpdater
def pytest_addoption(parser):
"""
Adds options to pytest that are used by the mclag test.
"""
parser.addoption(
"--amount_mclag_intf",
action="store",
type=int,
default=6,
help="Amount of mclag interfaces to test, default value is 6",
)
@pytest.fixture(scope='module')
def mclag_intf_num(request):
argument = request.config.getoption("--amount_mclag_intf")
assert(argument <= MAX_MCLAG_INTF)
return argument
@pytest.fixture(scope='module')
def duthost1(duthosts):
return duthosts[DUT1_INDEX]
@pytest.fixture(scope='module')
def duthost2(duthosts):
return duthosts[DUT2_INDEX]
@pytest.fixture(scope='module')
def mg_facts(duthosts, tbinfo):
return {dut.hostname:dut.get_extended_minigraph_facts(tbinfo) for dut in duthosts}
@pytest.fixture(scope='module')
def get_router_macs(duthost1, duthost2):
router_mac1 = duthost1.facts['router_mac']
router_mac2 = duthost2.facts['router_mac']
return router_mac1, router_mac2
@pytest.fixture(scope="module")
def tear_down(duthost1, duthost2, ptfhost, localhost, collect):
"""
Performs tear down of all configuration on PTF and DUTs
Args:
duthost1: DUT host object
duthost2: DUT host object
ptfhost: PTF host object
localhost: localhost object
collect: Fixture which collects main info about link connection
"""
yield
mclag_interfaces = collect[duthost1.hostname]['mclag_interfaces']
cmds_to_del_lags = ['ip link del {}'.format(lag) for lag in mclag_interfaces]
ptfhost.shell_cmds(cmds=cmds_to_del_lags)
ptfhost.remove_ip_addresses()
duthost1.shell("mv {} {}".format(CONFIG_DB_BACKUP, CONFIG_DB_TEMP))
reboot(duthost1, localhost)
duthost2.shell("mv {} {}".format(CONFIG_DB_BACKUP, CONFIG_DB_TEMP))
reboot(duthost2, localhost)
@pytest.fixture(scope="module")
def get_routes(duthost1, duthost2, collect, mg_facts):
"""
Get bgp routes that are advertised to each DUT
Args:
duthost1: DUT host object
duthost2: DUT host object
collect: Fixture which collects main info about link connection
mg_facts: Dict with minigraph facts for each DUT
"""
dut1_routes_all = get_dut_routes(duthost1, collect, mg_facts)
dut2_routes_all = get_dut_routes(duthost2, collect, mg_facts)
dut_1_diff_routes = list(set(dut1_routes_all).difference(set(dut2_routes_all)))
dut_2_diff_routes = list(set(dut2_routes_all).difference(set(dut1_routes_all)))
res1 = natsorted([route for route in dut_1_diff_routes if ipaddress.ip_network(route).subnet_of(ipaddress.ip_network(SUBNET_CHECK))])
res2 = natsorted([route for route in dut_2_diff_routes if ipaddress.ip_network(route).subnet_of(ipaddress.ip_network(SUBNET_CHECK))])
return {duthost1.hostname: res1, duthost2.hostname: res2}
@pytest.fixture(scope="module")
def collect(duthosts, tbinfo):
"""
Collect main information about link connection from tbinfo
Args:
duthosts: Duthosts fixture
tbinfo: Testbed object
"""
duts_map = tbinfo['duts_map']
res = defaultdict(dict)
for dut in duthosts:
dut_indx = duts_map[dut.hostname]
dut_hostname = dut.hostname
res[dut_hostname]['devices_interconnect_interfaces'] = get_interconnected_links(tbinfo, dut_indx)
res[dut_hostname]['vm_links'] = get_vm_links(tbinfo, dut_indx)
host_interfaces = tbinfo['topo']['ptf_map'][str(dut_indx)]
res[dut_hostname]['vm_link_on_ptf'] = host_interfaces[res[dut_hostname]['vm_links'][0]]
_ = [host_interfaces.pop(vm) for vm in res[dut_hostname]['vm_links'] if vm in host_interfaces.keys()]
res[dut_hostname]['host_interfaces'] = natsorted(host_interfaces)
res[dut_hostname]['ptf_map'] = host_interfaces
res[dut_hostname]['all_links'] = natsorted(res[dut_hostname]['host_interfaces'] + res[dut_hostname]['devices_interconnect_interfaces'] + res[dut_hostname]['vm_links'])
res[dut_hostname]['mclag_interfaces'] = natsorted([PC_NAME_TEMPLATE.format(indx + 1) for indx, _ in enumerate(res[dut_hostname]['host_interfaces'][:-2])])
return res
@pytest.fixture()
def update_and_clean_ptf_agent(duthost1, ptfhost, ptfadapter, collect):
"""
Fixture that will add new interfaces to interfaces map of ptfadapter and remove them
Args:
duthost1: DUT host object
ptfhost: PTF host object
ptfadapter: PTF adapter
collect: Fixture which collects main info about link connection
"""
ptf_agent_updater = PtfAgentUpdater(ptfhost=ptfhost,
ptfadapter=ptfadapter,
ptf_nn_agent_template=os.path.join(TEMPLATE_DIR, PTF_NN_AGENT_TEMPLATE))
mclag_interfaces = collect[duthost1.hostname]['mclag_interfaces']
ptf_agent_updater.configure_ptf_nn_agent(mclag_interfaces)
yield
ptf_agent_updater.cleanup_ptf_nn_agent(mclag_interfaces)
@pytest.fixture()
def change_session_timeout(duthost1, duthost2, keep_and_peer_link_member):
"""
Change default session-timeout and shutdown keepalive link, restore to default setting afterwards
Args:
duthost1: DUT host object
duthost2: DUT host object
collect: Fixture which collects main info about link connection
mg_facts: Dict with minigraph facts for each DUT
"""
cmd = 'config mclag session-timeout {} {}'
keep_alive_interface = keep_and_peer_link_member[duthost1.hostname]['keepalive']
duthost1.shell(cmd.format(MCLAG_DOMAINE_ID, NEW_SESSION_TIMEOUT))
duthost2.shell(cmd.format(MCLAG_DOMAINE_ID, NEW_SESSION_TIMEOUT))
duthost1.shutdown(keep_alive_interface)
yield
duthost1.shell(cmd.format(MCLAG_DOMAINE_ID, DEFAULT_SESSION_TIMEOUT))
duthost2.shell(cmd.format(MCLAG_DOMAINE_ID, DEFAULT_SESSION_TIMEOUT))
duthost1.no_shutdown(keep_alive_interface)
@pytest.fixture(scope="module")
def keep_and_peer_link_member(duthosts, collect, mg_facts):
"""
Fixture which holds keepalive and peerlink member for both PEERs
Args:
duthosts: Duthosts fixture
collect: Fixture which collects main info about link connection
mg_facts: Dict with minigraph facts for each DUT
"""
res = defaultdict(dict)
for dut in duthosts:
port_indices = {mg_facts[dut.hostname]['minigraph_port_indices'][k]:k for k in mg_facts[dut.hostname]['minigraph_port_indices']}
keep_alive_interface = port_indices[int(collect[dut.hostname]['devices_interconnect_interfaces'][0])]
peer_link_member = port_indices[int(collect[dut.hostname]['devices_interconnect_interfaces'][-1])]
res[dut.hostname]['keepalive'] = keep_alive_interface
res[dut.hostname]['peerlink'] = peer_link_member
return res
@pytest.fixture(scope="module", autouse=True)
def check_topo(tbinfo):
"""
Fixture that checks if the reqired t0-mclag topo is set
Args:
tbinfo: Testbed object
"""
if tbinfo['topo']['name'] != 't0-mclag':
pytest.skip("test requires t0-mclag topo to run, current topo - {}".format(tbinfo['topo']['name']))
|
[
"noreply@github.com"
] |
gord1306.noreply@github.com
|
ee4999edd4bfd08c05a04db6537fefa5a7bfa1ed
|
b5b057f010788881e6120bded01acb54f77b2067
|
/api/serializers.py
|
5d08852fcd061a93a70eb51031a273ff9eb346f9
|
[
"MIT"
] |
permissive
|
Fit-Tracker/api
|
b2cda63899e7ce0e60aa511db625be7fb756ae6a
|
417893e7c2b8d8e6f0e03cb97e52c25b59db0437
|
refs/heads/master
| 2021-01-10T17:21:53.861673
| 2015-10-28T18:14:51
| 2015-10-28T18:14:51
| 44,998,478
| 0
| 0
| null | 2015-10-28T18:14:52
| 2015-10-26T21:28:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 983
|
py
|
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Activity, Stat
class StatSerializer(serializers.HyperlinkedModelSerializer):
activity_id = serializers.PrimaryKeyRelatedField(
many=False, read_only=True, source='activity')
timestamp = serializers.DateField()
class Meta:
model = Stat
fields = ('pk', 'activity_id', 'stat', 'timestamp')
class ActivitySerializer(serializers.HyperlinkedModelSerializer):
title = serializers.CharField(max_length=255)
class Meta:
model = Activity
fields = ('pk', 'title', 'user')
class ActivityDetailSerializer(ActivitySerializer):
pass
class UserSerializer(serializers.HyperlinkedModelSerializer):
activities = ActivitySerializer(many=True, read_only=True)
stats = StatSerializer(many=True, read_only=True)
class Meta:
model = User
fields = ('pk', 'username', 'password', 'activities', 'stats')
|
[
"rryanburton@gmail.com"
] |
rryanburton@gmail.com
|
dd942f1eee93ca0253b83d71cb58b50e1288383a
|
572f5a0020b6794e20f779c6efd34a9ef28c7cfe
|
/script/fixfa.py
|
29a044335c4b45150272cb0c7edb24082437b7ac
|
[] |
no_license
|
xutaodeng/virushunter
|
43609b9f8cc8d81c5a3e7cfacccadf952057c3d0
|
7370d54ec20ec530be59185a79f8ddc248a25ab8
|
refs/heads/master
| 2022-11-16T16:50:49.176479
| 2020-07-12T07:52:51
| 2020-07-12T07:52:51
| 279,016,293
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
#!/usr/bin/env python
from collections import defaultdict
import operator
import sys
import os
import re
def fixfa(infile, outfile): #illumina 33 or 64
f = open(infile, 'r')
of = open(outfile, 'w')
of.write(f.readline())#header
seq=[]
for line in f:
seq.append(line.strip())
seq=''.join(seq)
nrow = len(seq)/80
if len(seq)%80 !=0: nrow+=1
for i in xrange(nrow):
start=i*80
end=i*80+80
try: of.write(seq[start:end]+'\n')
except: of.write(seq[start:]+'\n')
f.close()
of.close()
if __name__ == '__main__':
infile = sys.argv[1]
outfile =sys.argv[2]
fixfa(infile, outfile)
|
[
"xutaodeng@gmail.com"
] |
xutaodeng@gmail.com
|
30eefc0707aa5dc23ce0de03dd31963a50e58873
|
1850e13cf529af92674c8aad7393d6454cb47742
|
/backend/urls.py
|
35497cc732fbff9dd31afe917a6e64122b9d10c3
|
[] |
no_license
|
Jerodsun/random-walk-backend
|
a6620d33e8ad6525852fdb29468053f7d01aa75e
|
3197bbddf004f3f04be5fd4e62a9481ca6724f4f
|
refs/heads/develop
| 2020-06-29T16:28:14.126842
| 2019-09-13T15:38:29
| 2019-09-13T15:38:29
| 196,791,917
| 0
| 0
| null | 2019-08-05T01:56:49
| 2019-07-14T04:05:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from rest_framework_swagger.views import get_swagger_view
from randomwalk import views
# Routers
# Can register multiple routers
router = routers.DefaultRouter()
router.register(r'randomwalk', views.SampleDataView, 'randomwalk')
router.register(r'blackscholes', views.BlackScholesView, 'blackscholes')
router.register(r'brownianmotion', views.BrownianMotionView, 'brownianmotion')
# Swagger Docs
schema_view = get_swagger_view(title='Random Walk API')
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
path('api2/', views.StaticView.as_view()),
path('axios_test/', views.AxiosView.as_view()),
path('swagger/', schema_view)
]
|
[
"Jerod.sun@live.com"
] |
Jerod.sun@live.com
|
936274e923f6341006ab6fdf3427539c4177ef82
|
32d1171d05a42948410c939f03a03b7c9090d5ef
|
/migrations/0001_initial.py
|
2ea7bc1ac9e75a8377f5fe09ff52a9cc3909cf73
|
[] |
no_license
|
RUTNIX/mainapp
|
9ee275c928bd520abf7e1f609bae0405b3b092fe
|
258c1cb9dc1ac61b7b48c5aa200702e370db6c2a
|
refs/heads/master
| 2023-04-22T22:00:36.534947
| 2021-05-08T09:28:45
| 2021-05-08T09:28:45
| 365,471,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,422
|
py
|
# Generated by Django 3.2.2 on 2021-05-06 08:30
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_products', models.PositiveIntegerField(default=0)),
('final_price', models.DecimalField(decimal_places=2, default=0, max_digits=9, verbose_name='Общая цена')),
('in_order', models.BooleanField(default=False)),
('for_anonymous_user', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='CartProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qty', models.PositiveIntegerField(default=1)),
('final_price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Общая цена')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Имя категории')),
('slug', models.SlugField(unique=True)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(blank=True, max_length=20, null=True, verbose_name='Номер телефона')),
('address', models.CharField(blank=True, max_length=255, null=True, verbose_name='Адрес')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255, verbose_name='Имя')),
('last_name', models.CharField(max_length=255, verbose_name='Фамилия')),
('phone', models.CharField(max_length=20, verbose_name='Телефон')),
('address', models.CharField(blank=True, max_length=1024, null=True, verbose_name='Адрес')),
('status', models.CharField(choices=[('new', 'Новый заказ'), ('in_progress', 'Заказ в обработке'), ('is_ready', 'Заказ готов'), ('completed', 'Заказ выполнен')], default='new', max_length=100, verbose_name='Статус заказ')),
('buying_type', models.CharField(choices=[('self', 'Самовывоз'), ('delivery', 'Доставка')], default='self', max_length=100, verbose_name='Тип заказа')),
('comment', models.TextField(blank=True, null=True, verbose_name='Комментарий к заказу')),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Дата создания заказа')),
('order_date', models.DateField(default=django.utils.timezone.now, verbose_name='Дата получения заказа')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Наименование')),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to='', verbose_name='Изображение')),
('description', models.TextField(null=True, verbose_name='Описание')),
('price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Цена')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.category', verbose_name='Категория')),
],
),
]
|
[
"alibejsenbekov@MacBook-Air-Ali.local"
] |
alibejsenbekov@MacBook-Air-Ali.local
|
89bfa7186290914ff768759d71d15559b85d231c
|
de99915c1da74e79db3f06e7b01adac05c0978e1
|
/password_encrypt_test.py
|
25eb278110f70a017b068a9de02e2aa4c3a6e057
|
[] |
no_license
|
Handosonic/Python
|
fdcaef7bf2dcdc3bd9532246d386afc6121f79e2
|
b0808d50ffe574d61dd6b14d8f3a95408943e05d
|
refs/heads/master
| 2020-08-07T19:07:44.307419
| 2019-11-29T16:11:00
| 2019-11-29T16:11:00
| 213,560,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
from password_encrypt import encrypt_pass
pas = "12345"
Aa = encrypt_pass(pas)
name = Aa.enco()
print('1-р үеийн Encode', name)
name1 = Aa.encrypt_passv2(name)
print('2-р үеийн Encode', name1)
#
Aa = encrypt_pass(name1)
de_name = Aa.deco()
print('real word:' ,de_name)
de2_name = Aa.deco_v2()
print(de2_name)
|
[
"noreply@github.com"
] |
Handosonic.noreply@github.com
|
9f8807d34899ca34601903ded7198753894c9c7f
|
443f527f203742d7de7514e0a296b8a4dbc0e09d
|
/manage.py
|
f8a86f680feec46f4c4dbe3e8d5e5c4570184008
|
[
"Apache-2.0"
] |
permissive
|
joelghill/fu_rss_feeds
|
b9bef3855ee8cb448c67c1e7a02dce617be12307
|
400c5b684f828d034592895f7cfcf6f0c7ec81ed
|
refs/heads/master
| 2021-01-01T07:25:45.051801
| 2020-02-08T17:14:14
| 2020-02-08T17:14:14
| 239,170,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fu_rss_feeds.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"joel.hill.87@gmail.com"
] |
joel.hill.87@gmail.com
|
2e3ebefefe3b64903a6ca5bffb0294fad122990f
|
8143175ba4c0cbd53eeaad407700d685adb16d10
|
/build/sshros/build/lib/sshros/test_ssh.py
|
51d2b076073a9400d5665a24b4d4ba70f250653f
|
[] |
no_license
|
Hessy99/test
|
0007401ccde2fea6532b3f7294684b88a798bb2c
|
7fb44a1bd3e80647e65b74e33babd11c42704b53
|
refs/heads/main
| 2023-06-05T03:42:20.604532
| 2021-06-25T12:26:11
| 2021-06-25T12:26:11
| 379,245,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
import rclpy
from rclpy.node import Node
import paramiko
from blackboard_interfaces.msg import TaskMsg
hostname = "145.93.112.105"
username = "student"
password = "student"
port = 22
class TestSubscriber(Node):
def __init__(self):
super().__init__('test_subscriber')
self.subscription = self.create_subscription(TaskMsg,'newTask',self.listener_callback,10)
self.subscription
def listener_callback(self, msg):
print("msg received")
self.get_logger().info(msg.TaskMsg)
def main(args=None):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname, port=port, username=username, password=password)
rclpy.init(args=args)
test_subscriber = TestSubscriber()
try:
while(True):
print("hoi")
rclpy.spin(test_subscriber)
except KeyboardInterrupt:
test_subscriber.destroy_node()
rclpy.shutdown
if __name__ == '__main__':
main()
|
[
"hessy99@hotmail.com"
] |
hessy99@hotmail.com
|
ff5e1fdb3decc270f83d5692506c228ba3c44406
|
3b9b4049a8e7d38b49e07bb752780b2f1d792851
|
/src/third_party/catapult/telemetry/telemetry/internal/backends/chrome/chrome_browser_backend_unittest.py
|
80ed7ad8d9e5b0b4ab91b06883ab712b52ce4cae
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
webosce/chromium53
|
f8e745e91363586aee9620c609aacf15b3261540
|
9171447efcf0bb393d41d1dc877c7c13c46d8e38
|
refs/heads/webosce
| 2020-03-26T23:08:14.416858
| 2018-08-23T08:35:17
| 2018-09-20T14:25:18
| 145,513,343
| 0
| 2
|
Apache-2.0
| 2019-08-21T22:44:55
| 2018-08-21T05:52:31
| null |
UTF-8
|
Python
| false
| false
| 4,001
|
py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from telemetry.internal import forwarders
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal.browser import browser_options as browser_options_module
from telemetry.util import wpr_modes
class FakePlatformBackend(object):
def __init__(self, is_replay_active, wpr_http_device_port,
wpr_https_device_port, is_host_platform):
self.is_host_platform = is_host_platform
self.forwarder_factory = mock.Mock()
self.network_controller_backend = mock.Mock()
self.network_controller_backend.is_replay_active = is_replay_active
self.network_controller_backend.wpr_device_ports = forwarders.PortSet(
http=wpr_http_device_port, https=wpr_https_device_port, dns=None)
self.network_controller_backend.host_ip = '127.0.0.1'
self.network_controller_backend.is_test_ca_installed = False
class FakeBrowserOptions(browser_options_module.BrowserOptions):
def __init__(self, wpr_mode=wpr_modes.WPR_OFF):
super(FakeBrowserOptions, self).__init__()
self.wpr_mode = wpr_mode
self.browser_type = 'chrome'
self.browser_user_agent_type = 'desktop'
self.disable_background_networking = False
self.disable_component_extensions_with_background_pages = False
self.disable_default_apps = False
class TestChromeBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
# The test does not need to define the abstract methods.
# pylint: disable=abstract-method
def __init__(self, browser_options,
wpr_http_device_port=None, wpr_https_device_port=None,
is_running_locally=False):
browser_options.extensions_to_load = []
browser_options.output_profile_path = None
super(TestChromeBrowserBackend, self).__init__(
platform_backend=FakePlatformBackend(
browser_options.wpr_mode != wpr_modes.WPR_OFF,
wpr_http_device_port, wpr_https_device_port, is_running_locally),
supports_tab_control=False,
supports_extensions=False,
browser_options=browser_options)
class StartupArgsTest(unittest.TestCase):
"""Test expected inputs for GetBrowserStartupArgs."""
def testNoProxyServer(self):
browser_options = FakeBrowserOptions()
browser_options.no_proxy_server = False
browser_options.AppendExtraBrowserArgs('--proxy-server=http=inter.net')
browser_backend = TestChromeBrowserBackend(browser_options)
self.assertNotIn('--no-proxy-server',
browser_backend.GetBrowserStartupArgs())
browser_options.no_proxy_server = True
self.assertIn('--no-proxy-server', browser_backend.GetBrowserStartupArgs())
class ReplayStartupArgsTest(unittest.TestCase):
"""Test expected inputs for GetReplayBrowserStartupArgs."""
def testReplayOffGivesEmptyArgs(self):
browser_options = FakeBrowserOptions()
browser_backend = TestChromeBrowserBackend(browser_options)
self.assertEqual([], browser_backend.GetReplayBrowserStartupArgs())
def BasicArgsHelper(self, is_running_locally):
browser_options = FakeBrowserOptions(wpr_mode=wpr_modes.WPR_REPLAY)
browser_backend = TestChromeBrowserBackend(
browser_options,
wpr_http_device_port=456,
wpr_https_device_port=567,
is_running_locally=is_running_locally)
expected_args = [
'--host-resolver-rules=MAP * 127.0.0.1,EXCLUDE localhost',
'--ignore-certificate-errors',
'--testing-fixed-http-port=456',
'--testing-fixed-https-port=567'
]
self.assertEqual(
expected_args,
sorted(browser_backend.GetReplayBrowserStartupArgs()))
def testBasicArgs(self):
# The result is the same regardless of whether running locally.
self.BasicArgsHelper(is_running_locally=True)
self.BasicArgsHelper(is_running_locally=False)
|
[
"changhyeok.bae@lge.com"
] |
changhyeok.bae@lge.com
|
88964c824ca496d230f8260acabdb714a35f2bee
|
dded486ab2037e2ca4ea6c05439e1b434c6e9de9
|
/setup.py
|
a3b9d26e315c814e95dd9a525cede00d1ff0683f
|
[
"Apache-2.0"
] |
permissive
|
istarion/changelog-helper
|
963eec600c7083986dc7b0e2d39722008184c9f3
|
2c99e7bbc898e98901d0d7209b8a1b68163f6329
|
refs/heads/master
| 2021-01-20T05:48:41.869107
| 2017-09-18T11:27:03
| 2017-09-18T11:27:03
| 89,812,131
| 0
| 1
| null | 2017-07-24T09:48:16
| 2017-04-29T20:49:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
import os
from setuptools import setup, find_packages
from setuptools.command.sdist import sdist
from wheel.bdist_wheel import bdist_wheel
from changelog_helper.version import __version__ as app_version
def info(message):
print('\033[92m{0}\033[0m'.format(message))
def error(message):
print('\033[91m{0}\033[0m'.format(message))
class DistWheel(bdist_wheel):
def run(self):
bdist_wheel.run(self)
info('-' * 100)
info('-----Build wheel DONE')
info('-' * 100)
class Sdist(sdist):
def run(self):
sdist.run(self)
info('-' * 100)
info('-----Build sdist DONE')
info('-' * 100)
here = os.path.abspath(os.path.dirname(__file__))
try:
LONG_DESCRIPTION = open(os.path.join(here, "README.rst")).read()
except IOError:
LONG_DESCRIPTION = ""
with open(os.path.join(here, 'requirements.txt')) as f:
requires = f.read()
setup(
name='changelog-helper',
version=app_version,
description='Simple scripts for creating and compiling changelog files.',
long_description=LONG_DESCRIPTION,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Environment :: Console"
],
author='Sergey Zavgorodniy',
author_email='s.zavgorodniy@i-dgtl.ru',
url='https://github.com/istarion/changelog-helper',
download_url='https://github.com/istarion/changelog-helper/archive/{VERSION}.tar.gz'.format(VERSION=app_version),
keywords='git changelog generator',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
entry_points={
'console_scripts': [
'add-changelog = changelog_helper.add_changelog:main',
'release-changelog = changelog_helper.release_changelog:main'
]
},
cmdclass={
'bdist_wheel': DistWheel,
'sdist': Sdist
}
)
|
[
"s.zavgorodniy@i-dgtl.ru"
] |
s.zavgorodniy@i-dgtl.ru
|
cab76a33db11d3b5de6be283ceb925e455d40c0f
|
17ca5bae91148b5e155e18e6d758f77ab402046d
|
/analysis_SWarp/CID206/analysis/cutout.py
|
c896308c2a91b456da25ec34bdd083cd890e458b
|
[] |
no_license
|
dartoon/QSO_decomposition
|
5b645c298825091c072778addfaab5d3fb0b5916
|
a514b9a0ad6ba45dc9c3f83abf569688b9cf3a15
|
refs/heads/master
| 2021-12-22T19:15:53.937019
| 2021-12-16T02:07:18
| 2021-12-16T02:07:18
| 123,425,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 14:04:02 2018
@author: Dartoon
Cut PSF and QSO for CID206
"""
import numpy as np
import sys
sys.path.insert(0,'../../../py_tools')
from cut_image import cut_image, cut_center_bright, save_loc_png, grab_pos
import astropy.io.fits as pyfits
ID = 'CID206'
filename= 'stars_and_QSO.reg'
c_psf_list = grab_pos(filename)
print c_psf_list
fitsFile = pyfits.open('../swarp/coadd.fits')
img = fitsFile[0].data #- (-0.003) # check the back grounp
center_QSO = c_psf_list[-1]
QSO = cut_center_bright(image=img, center=center_QSO, radius=50)
pyfits.PrimaryHDU(QSO).writeto('{0}_cutout.fits'.format(ID),overwrite=True)
count=0
for i in range(len(c_psf_list[:-1])):
PSF = cut_center_bright(image=img, center=c_psf_list[i], radius=30)
pyfits.PrimaryHDU(PSF).writeto('PSF{0}.fits'.format(count),overwrite=True)
count += 1
print count
extra_psfs = np.array([[970,252],[545,1010]])
for i in range(len(extra_psfs)):
PSF = cut_center_bright(image=img, center=extra_psfs[i], radius=30)
pyfits.PrimaryHDU(PSF).writeto('PSF{0}.fits'.format(count),overwrite=True)
count += 1
save_loc_png(img,center_QSO,c_psf_list[:-1],extra_psfs, ID=ID)
##Check and find that the brightest point of PSF1.fits are not at the center.
#PSF = cut_image(image=img, center=(705, 843), radius=20)
#pyfits.PrimaryHDU(PSF).writeto('PSF1.fits'.format(i),overwrite=True)
|
[
"dingxuheng@mail.bnu.edu.cn"
] |
dingxuheng@mail.bnu.edu.cn
|
a016e5c45666df7933632d619a5309801ae01cb5
|
6ec8a9edfba0a2619ede68250d4bda705d0d0893
|
/1/venv-36-mauro/bin/2to3-3.6
|
2eba472557ae2bd18cb3360fbe4e966cdb7e61c7
|
[] |
no_license
|
lgdc-ufpa/predictive-immunogenetic-markers-in-covid-19
|
f7499b8e8fad0f4da79296044697f38babdfb3fc
|
06d58bd9921a758deb4951dfadf388d86c127dc0
|
refs/heads/master
| 2023-02-17T08:43:20.639751
| 2021-01-17T17:14:55
| 2021-01-17T17:14:55
| 330,265,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
6
|
#!/home/bruno/Documents/dev/mauro/venv-36-mauro/bin/python3.6
import sys
from lib2to3.main import main
sys.exit(main("lib2to3.fixes"))
|
[
"brunoconde.ufpa@gmail.com"
] |
brunoconde.ufpa@gmail.com
|
2657edc60d57e8e33385efd223c4450cfd5bd141
|
2ec57e04b5c96eeb2c596d7743b221c0aeb9d109
|
/pyphism/polybench/pb_flow_test.py
|
7dc8d29aed98f496850a557d6df0df6380dc5acd
|
[
"MIT"
] |
permissive
|
fossabot/phism
|
2b0078407bf105ae1f6ddcbdf97de5da0ed723fa
|
e3f2250fdcdbe8bb86c3a422ad99832c67f5cb99
|
refs/heads/main
| 2023-08-20T11:04:36.976218
| 2021-10-07T09:22:02
| 2021-10-07T09:22:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
from pyphism.polybench import pb_flow
|
[
"vincentzhaorz@gmail.com"
] |
vincentzhaorz@gmail.com
|
385c4385d31beef76f811ee26d6b414d50c4b3be
|
e51ed583d29f3d110df082d5378bb0dd49bec65b
|
/models.py
|
fbf2d105d0c1a3d7ce457b976f1739f149e3adcb
|
[
"Apache-2.0"
] |
permissive
|
NithinKumaraNT/DNN_Quantizer
|
d3188232d7c624a79ae4e5b1202a80cc9f39caec
|
3a6885f77aabb9b539e554a34a1c7ad358a39336
|
refs/heads/master
| 2020-05-04T03:37:53.038149
| 2019-04-01T22:00:39
| 2019-04-01T22:00:39
| 178,950,333
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 61,621
|
py
|
import numpy as np
import tensorflow as tf
from itertools import cycle
import bayesian_dnn.stochastic as stochastic
from bayesian_dnn.quantization import quant_convolution as qc
from bayesian_dnn.quantization import quant_tensordot as qt
from bayesian_dnn.quantization import quant_add as qa
from bayesian_dnn.quantization import UniformLinear as UniformLinear
from bayesian_dnn.quantization import ClippedUniformQuantizer as ClippedUniformQuantizer
from bayesian_dnn.quantization import ApproxUniformQuantizer as ApproxUniformQuantizer
from bayesian_dnn.quantization import ClippedApproxUniformQuantizer as ClippedApproxUniformQuantizer
#---------------------------helper functions-----------------------------
def get_normal_dist(name, shape, mean_initializer=tf.zeros_initializer(), scale=1.0, trainable=True):
"""
Function that returns a normal distribution with mean initializer mean_initializer and a specific scale
Args:
name: string, the name of the distribution
shape: tuple, the shape of the distribution
mean_initializer: tf.initializer, how the mean of the distribution is initialized. Default is zeros_initializer.
scale: float, the standard deviation used for each element of the normal distribution
trainable: boolean, are the parameters loc and scale of the distribution trainable. Default is True.
"""
loc = tf.get_variable(name=name+'_loc', shape=shape, initializer=mean_initializer, trainable=trainable, dtype=tf.float32)
scale = tf.get_variable(name=name+'_scale', shape=shape, initializer=tf.constant_initializer(scale * np.ones(shape)), trainable=trainable, dtype=tf.float32)
p = tf.distributions.Normal(name = name,
loc = loc,
scale = scale)
return p
#---------------------------base class for models-----------------------------
class Classification_Model(object):
def __init__(self, inp):
"""
Base class for models for classification. All child classes have to implement _get_logits() and _get_q()
"""
self.inp = inp
self.logits = self.get_logits()
self.q = self.get_q(self.logits)
if self.__class__.__name__ is 'QC_approx_Uni_LeNet':
self.logits_uniform = self.get_logits_uniform()
self.q_uniform = self.get_q_uniform(self.logits_uniform)
def get_logits_uniform(self):
return self._get_logits_uniform()
def get_q_uniform(self, logits):
"""
Returns the probability mass function for given logits.
Args:
logits: tf.tensor, the logits defined by the model
"""
return self._get_q_uniform(logits)
def _get_logits_uniform(self):
"""
All Classification models have to implement this method
"""
return None
def _get_q_uniform(self, logits):
"""
All Classification models have to implement this method
Args:
logits: tf.tensor, the logits defined by the model
"""
return None
#___________________________
def get_logits(self):
"""
Returns the logits for given input inp.
"""
return self._get_logits()
def get_q(self, logits):
"""
Returns the probability mass function for given logits.
Args:
logits: tf.tensor, the logits defined by the model
"""
return self._get_q(logits)
def _get_logits(self):
"""
All Classification models have to implement this method
"""
return None
def _get_q(self, logits):
"""
All Classification models have to implement this method
Args:
logits: tf.tensor, the logits defined by the model
"""
return None
def save_trainable(self, path, session):
"""
Saves all the trainable weights in a .ckpt file
Args:
path: string, the path where to save the trainable weights
session: tf.Session, the session to save the weights from
"""
params = tf.trainable_variables()
saver = tf.train.Saver(params)
return saver.save(session, path)
def load_trainable(self, path, session):
"""
Loads all the trainable weights from a .ckpt file
Args:
path: string, the path where to load the trainable weights from
session: tf.Session, the session to restore the weights to
"""
params = tf.trainable_variables()
saver = tf.train.Saver(params)
return saver.restore(session, path)
def set_deterministic(self):
"""
Sets all the stochastic parameters of the model as deterministic ones. Only
the mean of the stochastic parameters is used to perform inference.
"""
sd = [sp.set_deterministic() for sp in self. sto_params]
return sd
def set_stochastic(self):
"""
Sets all the stochastic parameters of the model as stochastic. New
parameter realizations are drawn from the parameter distribution for inference.
"""
ss = [sp.set_stochastic() for sp in self. sto_params]
return ss
#------------------------definition of a LeNet5--------------------------
class LeNet(Classification_Model):
def __init__(self, inp):
#------------------------create the parameters--------------------------
with tf.name_scope('Lenet'):
with tf.name_scope('weights_layer1_init'):
self.W1 = stochastic.Stochastic('W1',
[5,5,1,6],
prior_dist = lambda name: get_normal_dist(name, (5,5,1,6), scale=100.0, trainable=False),
var_dist = lambda name: get_normal_dist(name, (5,5,1,6), scale=0.1, trainable=True, mean_initializer=tf.contrib.layers.xavier_initializer()))
with tf.name_scope('weights_layer2_init'):
self.W2 = stochastic.Stochastic('W2',
[5,5,6,16],
prior_dist = lambda name: get_normal_dist(name, (5,5,6,16), scale=100.0, trainable=False),
var_dist = lambda name: get_normal_dist(name, (5,5,6,16), scale=0.1, trainable=True, mean_initializer=tf.contrib.layers.xavier_initializer()))
with tf.name_scope('weights_layer3_init'):
self.W3 = stochastic.Stochastic('W3',
[784,120],
prior_dist = lambda name: get_normal_dist(name, (784,120), scale=100.0, trainable=False),
var_dist = lambda name: get_normal_dist(name, (784,120), scale=0.1, trainable=True, mean_initializer=tf.contrib.layers.xavier_initializer()))
with tf.name_scope('weights_layer4_init'):
self.W4 = stochastic.Stochastic('W4',
[120,84],
prior_dist = lambda name: get_normal_dist(name, (120,84), scale=100.0, trainable=False),
var_dist = lambda name: get_normal_dist(name, (120,84), scale=0.1, trainable=True, mean_initializer=tf.contrib.layers.xavier_initializer()))
with tf.name_scope('weights_layer5_init'):
self.W5 = stochastic.Stochastic('W5',
[84,10],
prior_dist = lambda name: get_normal_dist(name, (84,10), scale=100.0, trainable=False),
var_dist = lambda name: get_normal_dist(name, (84,10), scale=0.1, trainable=True, mean_initializer=tf.contrib.layers.xavier_initializer()))
self.sto_params = [self.W1, self.W2, self.W3, self.W4, self.W5]
with tf.name_scope('bias_layer1_init'):
self.b1 = tf.get_variable(name='b1', shape=(1,28,28,6), dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True)
with tf.name_scope('bias_layer2_init'):
self.b2 = tf.get_variable(name='b2', shape=(1,14,14,16), dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True)
with tf.name_scope('bias_layer3_init'):
self.b3 = tf.get_variable(name='b3', shape=(1,120), dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True)
with tf.name_scope('bias_layer4_init'):
self.b4 = tf.get_variable(name='b4', shape=(1,84), dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True)
with tf.name_scope('bias_layer5_init'):
self.b5 = tf.get_variable(name='b5', shape=(1,10), dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True)
self.det_params = [self.b1, self.b2, self.b3, self.b4, self.b5]
for d in self.det_params:
tf.add_to_collection('DET_PARAMS', d)
super(LeNet, self).__init__(inp=inp)
def _get_logits(self):
#------------------------create the network graph--------------------------
#layer 1:
a_1 = tf.nn.convolution(self.inp, self.W1(), padding="SAME") + self.b1
x_1 = tf.nn.pool(tf.nn.relu(a_1), [2,2], "MAX","SAME", strides=[2,2])
#layer 2:
a_2 = tf.nn.convolution(x_1, self.W2(), padding="SAME") + self.b2
x_2 = tf.contrib.layers.flatten(tf.nn.pool(tf.nn.relu(a_2), [2,2], "MAX","SAME", strides=[2,2]))
#layer 3:
a_3 = tf.tensordot(x_2, self.W3(), axes=1) + self.b3
x_3 = tf.nn.relu(a_3)
#layer 4:
a_4 = tf.tensordot(x_3, self.W4(), axes=1) + self.b4
x_4 = tf.nn.relu(a_4)
#layer 5 returning the logits:
a_5 = tf.tensordot(x_4, self.W5(), axes=1) + self.b5
return a_5
def _get_q(self, logits):
return tf.distributions.Categorical(name='q', logits=logits)
def _get_q_uniform(self, logits):
return tf.distributions.Categorical(name='q', logits=logits)
#------------------------definition of multiple quantized LeNet5--------------------------
#quantized LeNet without clipping
class QLeNet(LeNet):
def __init__(self, inp, m_init, k):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does not
clip to a fixed range.
Args:
inp: tf.tensor, the input of the network
m_init: tf.initializer, the initializer for the resolution of the quantizers
k: integer, the approximation order for approximate quantization
"""
self.m_init = m_init
self.k = k
super(QLeNet, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name):
return ApproxUniformQuantizer(m_init=self.m_init, k=self.k, name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# layer 1:
with tf.name_scope('layer1'):
a_1 = qc(self.inp, self.W1(), self.quant(name="quant_input"), self.quant(name="quant_weights_1"),
padding="SAME") + self.b1
x_1 = tf.nn.pool(tf.nn.relu(a_1), [2, 2], "MAX", "SAME", strides=[2, 2])
# layer 2:
with tf.name_scope('layer2'):
a_2 = qc(x_1, self.W2(), self.quant(name="quant_activation_1"), self.quant(name="quant_weights_2"),
padding="SAME") + self.b2
x_2 = tf.contrib.layers.flatten(tf.nn.pool(tf.nn.relu(a_2), [2, 2], "MAX", "SAME", strides=[2, 2]))
# layer 3:
with tf.name_scope('layer3'):
a_3 = qt(x_2, self.W3(), self.quant(name="quant_activation_2"), self.quant(name="quant_weights_3"),
axes=1) + self.b3
x_3 = tf.nn.relu(a_3)
# layer 4:
with tf.name_scope('layer4'):
a_4 = qt(x_3, self.W4(), self.quant(name="quant_activation_3"), self.quant(name="quant_weights_4"),
axes=1) + self.b4
x_4 = tf.nn.relu(a_4)
# layer 5 returning the logits:
with tf.name_scope('output_layer'):
a_5 = qt(x_4, self.W5(), self.quant(name="quant_activation_4"), self.quant(name="quant_weights_5"),
axes=1) + self.b5
return a_5
# Uniform quantized LeNet with clipping
class QHardLeNet(LeNet):
def __init__(self, inp,c_init):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does not
clip to a fixed range.
Args:
inp: tf.tensor, the input of the network
m_init: tf.initializer, the initializer for the resolution of the quantizers
k: integer, the approximation order for approximate quantization
"""
self.c_init = c_init
super(QHardLeNet, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name):
return ClippedUniformQuantizer(c_init=self.c_init,name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# layer 1:
with tf.name_scope('layer1'):
a_1 = qc(self.inp, self.W1(), self.quant(name="quant_input"), self.quant(name="quant_weights_1"), padding="SAME")+ self.b1
x_1 = tf.nn.pool(tf.nn.relu(a_1), [2, 2], "MAX", "SAME", strides=[2, 2])
# layer 2:
with tf.name_scope('layer2'):
a_2 = qc(x_1, self.W2(), self.quant(name="quant_activation_1"), self.quant(name="quant_weights_2"), padding="SAME")+ self.b2
x_2 = tf.contrib.layers.flatten(tf.nn.pool(tf.nn.relu(a_2), [2, 2], "MAX", "SAME", strides=[2, 2]))
# layer 3:
with tf.name_scope('layer3'):
a_3 = qt(x_2, self.W3(), self.quant(name="quant_activation_2"), self.quant(name="quant_weights_3"), axes=1)+ self.b3
x_3 = tf.nn.relu(a_3)
# layer 4:
with tf.name_scope('layer4'):
a_4 = qt(x_3, self.W4(), self.quant(name="quant_activation_3"), self.quant(name="quant_weights_4"), axes=1)+ self.b4
x_4 = tf.nn.relu(a_4)
# layer 5 returning the logits:
with tf.name_scope('output_layer'):
a_5 = qt(x_4, self.W5(), self.quant(name="quant_activation_4"), self.quant(name="quant_weights_5"), axes=1)+ self.b5
return a_5
# Approx quantized LeNet with clipping
class QCLeNet(LeNet):
def __init__(self, inp,c_init, k, n_steps=5):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does not
clip to a fixed range.
Args:
inp: tf.tensor, the input of the network
m_init: tf.initializer, the initializer for the resolution of the quantizers
k: integer, the approximation order for approximate quantization
"""
self.c_init = c_init
self.k = k
self.n_steps = n_steps
super(QCLeNet, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name):
return ClippedApproxUniformQuantizer(c_init=self.c_init, k=self.k, n_steps=self.n_steps, name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# layer 1:
with tf.name_scope('layer1'):
a_1 = qc(self.inp, self.W1(), self.quant(name="quant_input"), self.quant(name="quant_weights_1"), padding="SAME")+ self.b1
x_1 = tf.nn.pool(tf.nn.relu(a_1), [2, 2], "MAX", "SAME", strides=[2, 2])
# layer 2:
with tf.name_scope('layer2'):
a_2 = qc(x_1, self.W2(), self.quant(name="quant_activation_1"), self.quant(name="quant_weights_2"), padding="SAME")+ self.b2
x_2 = tf.contrib.layers.flatten(tf.nn.pool(tf.nn.relu(a_2), [2, 2], "MAX", "SAME", strides=[2, 2]))
# layer 3:
with tf.name_scope('layer3'):
a_3 = qt(x_2, self.W3(), self.quant(name="quant_activation_2"), self.quant(name="quant_weights_3"), axes=1)+ self.b3
x_3 = tf.nn.relu(a_3)
# layer 4:
with tf.name_scope('layer4'):
a_4 = qt(x_3, self.W4(), self.quant(name="quant_activation_3"), self.quant(name="quant_weights_4"), axes=1)+ self.b4
x_4 = tf.nn.relu(a_4)
# layer 5 returning the logits:
with tf.name_scope('output_layer'):
a_5 = qt(x_4, self.W5(), self.quant(name="quant_activation_4"), self.quant(name="quant_weights_5"), axes=1)+ self.b5
return a_5
#quantized LeNet with clipping both approx and uniform
class QC_approx_Uni_LeNet(LeNet):
def __init__(self, inp, c_init_list, k, n_steps):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does
clip to a fixed range c.
Args:
inp: tf.tensor, the input of the network
c_init: tf.initializer, the initializer for the range c of the quantizers with clipping
k: integer), the approximation order for approximate quantization
n_steps: positive odd integer (numpy), the number of quantization steps used for training
"""
self.c_init = cycle(c_init_list)
self.k = k
self.n_steps = n_steps
super(QC_approx_Uni_LeNet, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name):
with tf.variable_scope("Quant_vars"):
return ClippedApproxUniformQuantizer(c_init=next(self.c_init), k=self.k, n_steps=self.n_steps, name=name)
def quant_uni(self, name):
with tf.variable_scope("Quant_vars",reuse=True):
return ClippedUniformQuantizer(c_init=next(self.c_init),n_steps=self.n_steps, name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# layer 1:
with tf.name_scope('layer1'):
a_1 = qc(self.inp, self.W1(), self.quant(name="quant_input"), self.quant(name="quant_weights_1"), padding="SAME")+ self.b1
x_1 = tf.nn.pool(tf.nn.relu(a_1), [2, 2], "MAX", "SAME", strides=[2, 2])
# layer 2:
with tf.name_scope('layer2'):
a_2 = qc(x_1, self.W2(), self.quant(name="quant_activation_1"), self.quant(name="quant_weights_2"), padding="SAME")+ self.b2
x_2 = tf.contrib.layers.flatten(tf.nn.pool(tf.nn.relu(a_2), [2, 2], "MAX", "SAME", strides=[2, 2]))
# layer 3:
with tf.name_scope('layer3'):
a_3 = qt(x_2, self.W3(), self.quant(name="quant_activation_2"), self.quant(name="quant_weights_3"), axes=1)+ self.b3
x_3 = tf.nn.relu(a_3)
# layer 4:
with tf.name_scope('layer4'):
a_4 = qt(x_3, self.W4(), self.quant(name="quant_activation_3"), self.quant(name="quant_weights_4"), axes=1)+ self.b4
x_4 = tf.nn.relu(a_4)
# layer 5 returning the logits:
with tf.name_scope('output_layer'):
a_5 = qt(x_4, self.W5(), self.quant(name="quant_activation_4"), self.quant(name="quant_weights_5"), axes=1)+ self.b5
return a_5
def _get_logits_uniform(self):
# ------------------------create the network graph--------------------------
# layer 1:
with tf.name_scope('layer1'):
a_1 = qc(self.inp, self.W1(), self.quant_uni(name="quant_input"), self.quant_uni(name="quant_weights_1"),
padding="SAME") + self.b1
x_1 = tf.nn.pool(tf.nn.relu(a_1), [2, 2], "MAX", "SAME", strides=[2, 2])
# layer 2:
with tf.name_scope('layer2'):
a_2 = qc(x_1, self.W2(), self.quant_uni(name="quant_activation_1"), self.quant_uni(name="quant_weights_2"),
padding="SAME") + self.b2
x_2 = tf.contrib.layers.flatten(tf.nn.pool(tf.nn.relu(a_2), [2, 2], "MAX", "SAME", strides=[2, 2]))
# layer 3:
with tf.name_scope('layer3'):
a_3 = qt(x_2, self.W3(), self.quant_uni(name="quant_activation_2"), self.quant_uni(name="quant_weights_3"),
axes=1) + self.b3
x_3 = tf.nn.relu(a_3)
# layer 4:
with tf.name_scope('layer4'):
a_4 = qt(x_3, self.W4(), self.quant_uni(name="quant_activation_3"), self.quant_uni(name="quant_weights_4"),
axes=1) + self.b4
x_4 = tf.nn.relu(a_4)
# layer 5 returning the logits:
with tf.name_scope('output_layer'):
a_5 = qt(x_4, self.W5(), self.quant_uni(name="quant_activation_4"), self.quant_uni(name="quant_weights_5"),
axes=1) + self.b5
return a_5
# Approx quantized LeNet with clipping
class QCLeNet_list_init(LeNet):
def __init__(self, inp, c_init_list, k, n_steps=5):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does not
clip to a fixed range.
Args:
inp: tf.tensor, the input of the network
m_init: tf.initializer, the initializer for the resolution of the quantizers
k: integer, the approximation order for approximate quantization
"""
self.c_init = cycle(c_init_list)
self.k = k
self.n_steps = n_steps
super(QCLeNet_list_init, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name):
return ClippedApproxUniformQuantizer(c_init=next(self.c_init_list), k=self.k, n_steps=self.n_steps, name=name)
def xavier_init(self):
return tf.contrib.layers.xavier_initializer(uniform=True,seed=None,dtype=tf.float32)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# layer 1:
with tf.name_scope('layer1'):
a_1 = qc(self.inp, self.W1(), self.quant(c_init=self.xavier_init(),name="quant_input"),
self.quant(c_init=self.xavier_init(),name="quant_weights_1"), padding="SAME")+ self.b1
x_1 = tf.nn.pool(tf.nn.relu(a_1), [2, 2], "MAX", "SAME", strides=[2, 2])
# layer 2:
with tf.name_scope('layer2'):
a_2 = qc(x_1, self.W2(), self.quant(c_init=self.xavier_init(),name="quant_activation_1"),
self.quant(c_init=self.xavier_init(),name="quant_weights_2"), padding="SAME")+ self.b2
x_2 = tf.contrib.layers.flatten(tf.nn.pool(tf.nn.relu(a_2), [2, 2], "MAX", "SAME", strides=[2, 2]))
# layer 3:
with tf.name_scope('layer3'):
a_3 = qt(x_2, self.W3(), self.quant(c_init=self.xavier_init(),name="quant_activation_2"),
self.quant(c_init=self.xavier_init(),name="quant_weights_3"), axes=1)+ self.b3
x_3 = tf.nn.relu(a_3)
# layer 4:
with tf.name_scope('layer4'):
a_4 = qt(x_3, self.W4(), self.quant(c_init=self.xavier_init(),name="quant_activation_3"),
self.quant(c_init=self.xavier_init(),name="quant_weights_4"), axes=1)+ self.b4
x_4 = tf.nn.relu(a_4)
# layer 5 returning the logits:
with tf.name_scope('output_layer'):
a_5 = qt(x_4, self.W5(), self.quant(c_init=self.xavier_init(),name="quant_activation_4"),
self.quant(c_init=self.xavier_init(),name="quant_weights_5"), axes=1)+ self.b5
return a_5
# Approx quantized LeNet with clipping
class QCLeNet_list_init_try1(LeNet):
def __init__(self, inp, c_init_list, k, n_steps=100):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does not
clip to a fixed range.
Args:
inp: tf.tensor, the input of the network
m_init: tf.initializer, the initializer for the resolution of the quantizers
k: integer, the approximation order for approximate quantization
"""
self.c_init_list = cycle(c_init_list)
self.k = k
self.n_steps = n_steps
super(QCLeNet_list_init_try1, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self,name):
return ClippedApproxUniformQuantizer(c_init=next(self.c_init_list), n_steps=self.n_steps, name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# layer 1:
with tf.name_scope('layer1'):
a_1 = qc(self.inp, self.W1(), self.quant(name="quant_input"), self.quant(name="quant_weights_1"),
padding="SAME") + self.b1
x_1 = tf.nn.pool(tf.nn.relu(a_1), [2, 2], "MAX", "SAME", strides=[2, 2])
# layer 2:
with tf.name_scope('layer2'):
a_2 = qc(x_1, self.W2(), self.quant(name="quant_activation_1"), self.quant(name="quant_weights_2"),
padding="SAME") + self.b2
x_2 = tf.contrib.layers.flatten(tf.nn.pool(tf.nn.relu(a_2), [2, 2], "MAX", "SAME", strides=[2, 2]))
# layer 3:
with tf.name_scope('layer3'):
a_3 = qt(x_2, self.W3(), self.quant(name="quant_activation_2"), self.quant(name="quant_weights_3"),
axes=1) + self.b3
x_3 = tf.nn.relu(a_3)
# layer 4:
with tf.name_scope('layer4'):
a_4 = qt(x_3, self.W4(), self.quant(name="quant_activation_3"), self.quant(name="quant_weights_4"),
axes=1) + self.b4
x_4 = tf.nn.relu(a_4)
# layer 5 returning the logits:
with tf.name_scope('output_layer'):
a_5 = qt(x_4, self.W5(), self.quant(name="quant_activation_4"), self.quant(name="quant_weights_5"),
axes=1) + self.b5
return a_5
# ------------------------definition of a SVHN--------------------------
class SVHN(Classification_Model):
def __init__(self, inp):
# ------------------------create the parameters--------------------------
self.W1 = stochastic.Stochastic('W1',
[5, 5, 3, 48],
prior_dist=lambda name: get_normal_dist(name, (5, 5, 3, 48), scale=100.0,
trainable=False),
var_dist=lambda name: get_normal_dist(name, (5, 5, 3, 48), scale=0.1,
trainable=True,
mean_initializer=tf.contrib.layers.xavier_initializer())) #conv_1
self.W2 = stochastic.Stochastic('W2',
[5, 5, 48, 64],
prior_dist=lambda name: get_normal_dist(name, (5, 5, 48, 64), scale=100.0,
trainable=False),
var_dist=lambda name: get_normal_dist(name, (5, 5, 48, 64), scale=0.1,
trainable=True,
mean_initializer=tf.contrib.layers.xavier_initializer())) #conv_2
self.W3 = stochastic.Stochastic('W3',
[5, 5, 64, 128],
prior_dist=lambda name: get_normal_dist(name, (5, 5, 64, 128), scale=100.0,
trainable=False),
var_dist=lambda name: get_normal_dist(name, (5, 5, 64, 128), scale=0.1,
trainable=True,
mean_initializer=tf.contrib.layers.xavier_initializer())) #conv_3
self.W4 = stochastic.Stochastic('W4',
[2048, 256],
prior_dist=lambda name: get_normal_dist(name, (2048, 256), scale=100.0,
trainable=False),
var_dist=lambda name: get_normal_dist(name, (2048, 256), scale=0.1,
trainable=True,
mean_initializer=tf.contrib.layers.xavier_initializer())) #FC1
self.W5 = stochastic.Stochastic('W5',
[256, 128],
prior_dist=lambda name: get_normal_dist(name, (256, 128), scale=100.0,
trainable=False),
var_dist=lambda name: get_normal_dist(name, (256, 128), scale=0.1, trainable=True,
mean_initializer=tf.contrib.layers.xavier_initializer())) #FC2
self.W6 = stochastic.Stochastic('W6',
[128, 10],
prior_dist=lambda name: get_normal_dist(name, (128, 10), scale=100.0,
trainable=False),
var_dist=lambda name: get_normal_dist(name, (128, 10), scale=0.1,
trainable=True,
mean_initializer=tf.contrib.layers.xavier_initializer())) #output layer
self.sto_params = [self.W1, self.W2, self.W3, self.W4, self.W5, self.W6]
self.b1 = tf.get_variable(name='b1', shape=(1, 32, 32, 48), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True)
self.b2 = tf.get_variable(name='b2', shape=(1, 16, 16, 64), dtype=tf.float32,
initializer=tf.zeros_initializer(), trainable=True)
self.b3 = tf.get_variable(name='b3', shape=(1, 8, 8, 128), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True)
self.b4 = tf.get_variable(name='b4', shape=(1, 256), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True)
self.b5 = tf.get_variable(name='b5', shape=(1, 128), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True)
self.b6 = tf.get_variable(name='b6', shape=(1, 10), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True)
self.det_params = [self.b1, self.b2, self.b3, self.b4, self.b5, self.b6]
for d in self.det_params:
tf.add_to_collection('DET_PARAMS', d)
super(SVHN, self).__init__(inp=inp)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# Conv layer 1:
a_1 = tf.nn.conv2d(self.inp, self.W1(),strides=[1, 1, 1, 1], padding="SAME") + self.b1
x_1 = tf.nn.max_pool(tf.nn.relu(a_1), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 2:
a_2 = tf.nn.conv2d(x_1, self.W2(),[1, 1, 1, 1], padding="SAME") + self.b2
x_2 = tf.nn.max_pool(tf.nn.relu(a_2), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 3:
a_3 = tf.nn.conv2d(x_2, self.W3(), [1, 1, 1, 1], padding="SAME") + self.b3
x_3 =tf.contrib.layers.flatten(tf.nn.max_pool(tf.nn.relu(a_3), [1, 2, 2, 1], [1, 2, 2, 1], "SAME"))
# fc layer 4:
a_4 = tf.tensordot(x_3, self.W4(), axes=1) + self.b4
x_4 = tf.nn.relu(a_4)
# fc layer 5 :
a_5 = tf.tensordot(x_4, self.W5(), axes=1) + self.b5
x_5 = tf.nn.relu(a_5)
# output layer 6 returning the logits:
a_6 = tf.tensordot(x_5, self.W6(), axes=1) + self.b6
return a_6
def _get_q(self, logits):
return tf.distributions.Categorical(name='q', logits=logits)
class Vgg16_Cifar10(Classification_Model):
def add_layer(self,name,param_dim):
return stochastic.Stochastic(name,
param_dim,
prior_dist=lambda name: get_normal_dist(name, param_dim, scale=100.0,
trainable=False),
var_dist=lambda name: get_normal_dist(name, param_dim, scale=0.1,
trainable=True,
mean_initializer=tf.contrib.layers.xavier_initializer()))
def __init__(self, inp):
# ------------------------create the parameters--------------------------
self.W1 = self.add_layer('W1',[3, 3, 3, 64]) #conv1
self.W2 = self.add_layer('W2',[3, 3, 64, 64]) #conv2
# max pool
self.W3 = self.add_layer('W3', [3, 3, 64, 128]) # conv3
self.W4 = self.add_layer('W4', [3, 3, 128, 128]) # conv4
# max pool
self.W5 = self.add_layer('W5', [3, 3, 128, 256]) # conv5
self.W6 = self.add_layer('W6', [3, 3, 256, 256]) # conv6
self.W7 = self.add_layer('W7', [3, 3, 256, 256]) # conv7
# max pool
self.W8 = self.add_layer('W8', [3, 3, 256, 512]) # conv8
self.W9 = self.add_layer('W9', [3, 3, 512, 512]) # conv9
self.W10 = self.add_layer('W10', [3, 3, 512, 512]) # conv10
# max pool
self.W11 = self.add_layer('W11', [3, 3, 512, 512]) # conv11
self.W12 = self.add_layer('W12', [3, 3, 512, 512]) # conv12
self.W13 = self.add_layer('W13', [3, 3, 512, 512]) # conv13
#max pool
self.W14 = self.add_layer('W14', [512, 1024]) # conv14 ( [None,1,1,512] = 1x1x512 = 512 neurons )
self.W15 = self.add_layer('W15', [1024, 512]) # conv15
self.W16 = self.add_layer('W16', [512, 10]) # conv output layer
self.sto_params = [self.W1, self.W2, self.W3, self.W4, self.W5, self.W6, self.W7,self.W8,
self.W9, self.W10, self.W11, self.W12, self.W13, self.W14, self.W15, self.W16]
self.b1 = tf.get_variable(name='b1', shape=(1, 32, 32, 64), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_1
self.b2 = tf.get_variable(name='b2', shape=(1, 32, 32, 64), dtype=tf.float32,
initializer=tf.zeros_initializer(),
trainable=True) #conv_2
#max pool
self.b3 = tf.get_variable(name='b3', shape=(1, 16, 16, 128), dtype=tf.float32,
initializer=tf.zeros_initializer(), trainable=True) #conv_3
self.b4 = tf.get_variable(name='b4', shape=(1, 16, 16, 128), dtype=tf.float32,
initializer=tf.zeros_initializer(), trainable=True) #conv_4
# max pool
self.b5 = tf.get_variable(name='b5', shape=(1, 8, 8, 256), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_5
self.b6 = tf.get_variable(name='b6', shape=(1, 8, 8, 256), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_6
self.b7 = tf.get_variable(name='b7', shape=(1, 8, 8, 256), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_7
# max pool
self.b8 = tf.get_variable(name='b8', shape=(1, 4, 4, 512), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_8
self.b9 = tf.get_variable(name='b9', shape=(1, 4, 4, 512), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_9
self.b10 = tf.get_variable(name='b10', shape=(1, 4, 4, 512), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_10
# max pool
self.b11 = tf.get_variable(name='b11', shape=(1, 2, 2, 512), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_11
self.b12 = tf.get_variable(name='b12', shape=(1, 2, 2, 512), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_12
self.b13 = tf.get_variable(name='b13', shape=(1, 2, 2, 512), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #conv_13
self.b14 = tf.get_variable(name='b14', shape=(1, 1024), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #FC_1
self.b15 = tf.get_variable(name='b15', shape=(1, 512), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) #FC_2
self.b16 = tf.get_variable(name='b16', shape=(1, 10), dtype=tf.float32, initializer=tf.zeros_initializer(),
trainable=True) # output
self.det_params = [self.b1, self.b2, self.b3, self.b4, self.b5, self.b6, self.b7, self.b8, self.b9,
self.b10, self.b11, self.b12, self.b13, self.b14, self.b15, self.b16]
for d in self.det_params:
tf.add_to_collection('DET_PARAMS', d)
super(Vgg16_Cifar10, self).__init__(inp=inp)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# Conv layer 1:
a_1 = tf.nn.conv2d(self.inp, self.W1(),strides=[1, 1, 1, 1], padding="SAME") + self.b1
# a_1 = tf.layers.batch_normalization(a_1, training=self.train_phase)
# Conv layer 2:
a_2 = tf.nn.conv2d(a_1, self.W2(), strides=[1, 1, 1, 1], padding="SAME") + self.b2
# max pooling 1: 16 x 16
x_1 = tf.nn.max_pool(tf.nn.relu(a_2), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 3:
a_3 = tf.nn.conv2d(x_1, self.W3(), strides=[1, 1, 1, 1], padding="SAME") + self.b3
# Conv layer 4:
a_4 = tf.nn.conv2d(a_3, self.W4(), strides=[1, 1, 1, 1], padding="SAME") + self.b4
# max pooling 2: 8 x 8
x_2 = tf.nn.max_pool(tf.nn.relu(a_4), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 5:
a_5 = tf.nn.conv2d(x_2, self.W5(), strides=[1, 1, 1, 1], padding="SAME") + self.b5
# Conv layer 6:
a_6 = tf.nn.conv2d(a_5, self.W6(), strides=[1, 1, 1, 1], padding="SAME") + self.b6
# Conv layer 7:
a_7 = tf.nn.conv2d(a_6, self.W7(), strides=[1, 1, 1, 1], padding="SAME") + self.b7
# max pooling 3: 4 x 4
x_3 = tf.nn.max_pool(tf.nn.relu(a_7), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 8:
a_8 = tf.nn.conv2d(x_3, self.W8(), strides=[1, 1, 1, 1], padding="SAME") + self.b8
# Conv layer 9:
a_9 = tf.nn.conv2d(a_8, self.W9(), strides=[1, 1, 1, 1], padding="SAME") + self.b9
# Conv layer 10:
a_10 = tf.nn.conv2d(a_9, self.W10(), strides=[1, 1, 1, 1], padding="SAME") + self.b10
# max pooling 4: 2 x 2
x_4 = tf.nn.max_pool(tf.nn.relu(a_10), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 11:
a_11 = tf.nn.conv2d(x_4, self.W11(), strides=[1, 1, 1, 1], padding="SAME") + self.b11
# Conv layer 12:
a_12 = tf.nn.conv2d(a_11, self.W12(), strides=[1, 1, 1, 1], padding="SAME") + self.b12
# Conv layer 13:
a_13 = tf.nn.conv2d(a_12, self.W13(), strides=[1, 1, 1, 1], padding="SAME") + self.b13
# max pooling 5: 1 x 1
x_5 =tf.contrib.layers.flatten(tf.nn.max_pool(tf.nn.relu(a_13), [1, 2, 2, 1], [1, 2, 2, 1], "SAME"))
# fc layer 14:
a_14 = tf.tensordot(x_5, self.W14(), axes=1) + self.b14
x_6 = tf.nn.relu(a_14)
# output layer 15 returning the logits:
a_15 = tf.tensordot(x_6, self.W15(), axes=1) + self.b15
x_7 = tf.nn.relu(a_15)
a_16 = tf.tensordot(x_7, self.W16(), axes=1) + self.b16
return a_16
def _get_q(self, logits):
return tf.distributions.Categorical(name='q', logits=logits)
class QC_Vgg16_Cifar10(Vgg16_Cifar10):
def __init__(self, inp,c_init, k, n_steps=5):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does not
clip to a fixed range.
Args:
inp: tf.tensor, the input of the network
m_init: tf.initializer, the initializer for the resolution of the quantizers
k: integer, the approximation order for approximate quantization
"""
self.c_init = c_init
self.k = k
self.n_steps = n_steps
super(QC_Vgg16_Cifar10, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name):
return ClippedApproxUniformQuantizer(c_init=self.c_init, k=self.k, n_steps=self.n_steps, name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# Conv layer 1:
a_1 = qc(self.inp, self.W1(),self.quant(name="quant_input"), self.quant(name="quant_weights_1"), padding="SAME") + self.b1
# Conv layer 2:
a_2 = qc(a_1, self.W2(), self.quant(name="quant_a_2"), self.quant(name="quant_weights_2"), padding="SAME") + self.b2
# max pooling 1: 16 x 16
x_1 = tf.nn.max_pool(tf.nn.relu(a_2), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 3:
a_3 = qc(x_1, self.W3(), self.quant(name="quant_x_1"), self.quant(name="quant_weights_3"), padding="SAME") + self.b3
# Conv layer 4:
a_4 = qc(a_3, self.W4(), self.quant(name="quant_a_4"), self.quant(name="quant_weights_4"), padding="SAME") + self.b4
# max pooling 2: 8 x 8
x_2 = tf.nn.max_pool(tf.nn.relu(a_4), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 5:
a_5 = qc(x_2, self.W5(), self.quant(name="quant_x_2"), self.quant(name="quant_weights_5"), padding="SAME") + self.b5
# Conv layer 6:
a_6 = qc(a_5, self.W6(),self.quant(name="quant_a_5"), self.quant(name="quant_weights_6"), padding="SAME") + self.b6
# Conv layer 7:
a_7 = qc(a_6, self.W7(), self.quant(name="quant_a_6"), self.quant(name="quant_weights_7"), padding="SAME") + self.b7
# max pooling 3: 4 x 4
x_3 = tf.nn.max_pool(tf.nn.relu(a_7), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 8:
a_8 = qc(x_3, self.W8(), self.quant(name="quant_x_3"), self.quant(name="quant_weights_8"), padding="SAME") + self.b8
# Conv layer 9:
a_9 = qc(a_8, self.W9(), self.quant(name="quant_a_9"), self.quant(name="quant_weights_9"), padding="SAME") + self.b9
# Conv layer 10:
a_10 = qc(a_9, self.W10(), self.quant(name="quant_a_10"), self.quant(name="quant_weights_10"), padding="SAME") + self.b10
# max pooling 4: 2 x 2
x_4 = tf.nn.max_pool(tf.nn.relu(a_10), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 11:
a_11 = qc(x_4, self.W11(), self.quant(name="quant_x_4"), self.quant(name="quant_weights_11"), padding="SAME") + self.b11
# Conv layer 12:
a_12 = qc(a_11, self.W12(), self.quant(name="quant_a_11"), self.quant(name="quant_weights_12"), padding="SAME") + self.b12
# Conv layer 13:
a_13 = qc(a_12, self.W13(), self.quant(name="quant_a_12"), self.quant(name="quant_weights_13"), padding="SAME") + self.b13
# max pooling 5: 1 x 1
x_5 = tf.contrib.layers.flatten(tf.nn.max_pool(tf.nn.relu(a_13), [1, 2, 2, 1], [1, 2, 2, 1], "SAME"))
# fc layer 14:
a_14 = qt(x_5, self.W14(),self.quant(name="quant_x_5"), self.quant(name="quant_weights_14"), axes=1) + self.b14
x_6 = tf.nn.relu(a_14)
# output layer 15 returning the logits:
a_15 = qt(x_6, self.W15(),self.quant(name="quant_x_6"), self.quant(name="quant_weights_15"), axes=1) + self.b15
x_7 = tf.nn.relu(a_15)
a_16 = qt(x_7, self.W16(), self.quant(name="quant_x_7"), self.quant(name="quant_weights_16"), axes=1) + self.b16
return a_16
class Q_Vgg16_Cifar10(Vgg16_Cifar10):
def __init__(self, inp,m_init, k):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does not
clip to a fixed range.
Args:
inp: tf.tensor, the input of the network
m_init: tf.initializer, the initializer for the resolution of the quantizers
k: integer, the approximation order for approximate quantization
"""
self.m_init = m_init
self.k = k
super(Q_Vgg16_Cifar10, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name):
return ApproxUniformQuantizer(m_init=self.m_init, k=self.k, name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# Conv layer 1:
a_1 = qc(self.inp, self.W1(),self.quant(name="quant_input"), self.quant(name="quant_weights_1"), padding="SAME") + self.b1
# Conv layer 2:
a_2 = qc(a_1, self.W2(), self.quant(name="quant_a_1"), self.quant(name="quant_weights_2"), padding="SAME") + self.b2
# max pooling 1: 16 x 16
x_1 = tf.nn.max_pool(tf.nn.relu(a_2), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 3:
a_3 = qc(x_1, self.W3(), self.quant(name="quant_x_1"), self.quant(name="quant_weights_3"), padding="SAME") + self.b3
# Conv layer 4:
a_4 = qc(a_3, self.W4(), self.quant(name="quant_a_3"), self.quant(name="quant_weights_4"), padding="SAME") + self.b4
# max pooling 2: 8 x 8
x_2 = tf.nn.max_pool(tf.nn.relu(a_4), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 5:
a_5 = qc(x_2, self.W5(), self.quant(name="quant_x_2"), self.quant(name="quant_weights_5"), padding="SAME") + self.b5
# Conv layer 6:
a_6 = qc(a_5, self.W6(),self.quant(name="quant_a_5"), self.quant(name="quant_weights_6"), padding="SAME") + self.b6
# Conv layer 7:
a_7 = qc(a_6, self.W7(), self.quant(name="quant_a_6"), self.quant(name="quant_weights_7"), padding="SAME") + self.b7
# max pooling 3: 4 x 4
x_3 = tf.nn.max_pool(tf.nn.relu(a_7), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 8:
a_8 = qc(x_3, self.W8(), self.quant(name="quant_x_3"), self.quant(name="quant_weights_8"), padding="SAME") + self.b8
# Conv layer 9:
a_9 = qc(a_8, self.W9(), self.quant(name="quant_a_9"), self.quant(name="quant_weights_9"), padding="SAME") + self.b9
# Conv layer 10:
a_10 = qc(a_9, self.W10(), self.quant(name="quant_a_10"), self.quant(name="quant_weights_10"), padding="SAME") + self.b10
# max pooling 4: 2 x 2
x_4 = tf.nn.max_pool(tf.nn.relu(a_10), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 11:
a_11 = qc(x_4, self.W11(), self.quant(name="quant_x_4"), self.quant(name="quant_weights_11"), padding="SAME") + self.b11
# Conv layer 12:
a_12 = qc(a_11, self.W12(), self.quant(name="quant_a_11"), self.quant(name="quant_weights_12"), padding="SAME") + self.b12
# Conv layer 13:
a_13 = qc(a_12, self.W13(), self.quant(name="quant_a_12"), self.quant(name="quant_weights_13"), padding="SAME") + self.b13
# max pooling 5: 1 x 1
x_5 = tf.contrib.layers.flatten(tf.nn.max_pool(tf.nn.relu(a_13), [1, 2, 2, 1], [1, 2, 2, 1], "SAME"))
# fc layer 14:
a_14 = qt(x_5, self.W14(),self.quant(name="quant_x_5"), self.quant(name="quant_weights_14"), axes=1) + self.b14
x_6 = tf.nn.relu(a_14)
# output layer 15 returning the logits:
a_15 = qt(x_6, self.W15(),self.quant(name="quant_x_6"), self.quant(name="quant_weights_15"), axes=1) + self.b15
return a_15
class lin_Vgg16_Cifar10(Vgg16_Cifar10):
def __init__(self, inp):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does not
clip to a fixed range.
Args:
inp: tf.tensor, the input of the network
m_init: tf.initializer, the initializer for the resolution of the quantizers
k: integer, the approximation order for approximate quantization
"""
super(lin_Vgg16_Cifar10, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name):
return UniformLinear(name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# Conv layer 1:
a_1 = qc(self.inp, self.W1(),self.quant(name="quant_input"), self.quant(name="quant_weights_1"), padding="SAME") + self.b1
# Conv layer 2:
a_2 = qc(a_1, self.W2(), self.quant(name="quant_a_1"), self.quant(name="quant_weights_2"), padding="SAME") + self.b2
# max pooling 1: 16 x 16
x_1 = tf.nn.max_pool(tf.nn.relu(a_2), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 3:
a_3 = qc(x_1, self.W3(), self.quant(name="quant_x_1"), self.quant(name="quant_weights_3"), padding="SAME") + self.b3
# Conv layer 4:
a_4 = qc(a_3, self.W4(), self.quant(name="quant_a_3"), self.quant(name="quant_weights_4"), padding="SAME") + self.b4
# max pooling 2: 8 x 8
x_2 = tf.nn.max_pool(tf.nn.relu(a_4), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 5:
a_5 = qc(x_2, self.W5(), self.quant(name="quant_x_2"), self.quant(name="quant_weights_5"), padding="SAME") + self.b5
# Conv layer 6:
a_6 = qc(a_5, self.W6(),self.quant(name="quant_a_5"), self.quant(name="quant_weights_6"), padding="SAME") + self.b6
# Conv layer 7:
a_7 = qc(a_6, self.W7(), self.quant(name="quant_a_6"), self.quant(name="quant_weights_7"), padding="SAME") + self.b7
# max pooling 3: 4 x 4
x_3 = tf.nn.max_pool(tf.nn.relu(a_7), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 8:
a_8 = qc(x_3, self.W8(), self.quant(name="quant_x_3"), self.quant(name="quant_weights_8"), padding="SAME") + self.b8
# Conv layer 9:
a_9 = qc(a_8, self.W9(), self.quant(name="quant_a_8"), self.quant(name="quant_weights_9"), padding="SAME") + self.b9
# Conv layer 10:
a_10 = qc(a_9, self.W10(), self.quant(name="quant_a_9"), self.quant(name="quant_weights_10"), padding="SAME") + self.b10
# max pooling 4: 2 x 2
x_4 = tf.nn.max_pool(tf.nn.relu(a_10), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 11:
a_11 = qc(x_4, self.W11(), self.quant(name="quant_x_4"), self.quant(name="quant_weights_11"), padding="SAME") + self.b11
# Conv layer 12:
a_12 = qc(a_11, self.W12(), self.quant(name="quant_a_11"), self.quant(name="quant_weights_12"), padding="SAME") + self.b12
# Conv layer 13:
a_13 = qc(a_12, self.W13(), self.quant(name="quant_a_12"), self.quant(name="quant_weights_13"), padding="SAME") + self.b13
# max pooling 5: 1 x 1
x_5 = tf.contrib.layers.flatten(tf.nn.max_pool(tf.nn.relu(a_13), [1, 2, 2, 1], [1, 2, 2, 1], "SAME"))
# fc layer 14:
a_14 = qt(x_5, self.W14(),self.quant(name="quant_x_5"), self.quant(name="quant_weights_14"), axes=1) + self.b14
x_6 = tf.nn.relu(a_14)
# fc layer 15 returning the logits:
a_15 = qt(x_6, self.W15(),self.quant(name="quant_x_6"), self.quant(name="quant_weights_15"), axes=1) + self.b15
x_7 = tf.nn.relu(a_15)
# output layer 16 returning the logits:
a_16 = qt(x_7, self.W16(), self.quant(name="quant_x_7"), self.quant(name="quant_weights_16"), axes=1) + self.b16
return a_16
class QC_list_Vgg16_Cifar10(Vgg16_Cifar10):
def __init__(self, inp, c_init_list, k, n_steps):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does
clip to a fixed range c.
Args:
inp: tf.tensor, the input of the network
c_init: tf.initializer, the initializer for the range c of the quantizers with clipping
k: integer), the approximation order for approximate quantization
n_steps: positive odd integer (numpy), the number of quantization steps used for training
"""
self.c_init = cycle(c_init_list)
self.k = k
self.n_steps = n_steps
super(QC_list_Vgg16_Cifar10, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name, steps = None):
if steps== None:
return ClippedApproxUniformQuantizer(c_init=next(self.c_init), k=self.k, n_steps=self.n_steps, name=name)
else:
return ClippedApproxUniformQuantizer(c_init=next(self.c_init), k=self.k, n_steps=steps, name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# Conv layer 1:
a_1 = qc(self.inp, self.W1(),self.quant(name="quant_input",steps=128), self.quant(name="quant_weights_1"), padding="SAME") + self.b1
# Conv layer 2:
a_2 = qc(a_1, self.W2(), self.quant(name="quant_a_2"), self.quant(name="quant_weights_2"), padding="SAME") + self.b2
# max pooling 1: 16 x 16
x_1 = tf.nn.max_pool(tf.nn.relu(a_2), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 3:
a_3 = qc(x_1, self.W3(), self.quant(name="quant_x_1"), self.quant(name="quant_weights_3"), padding="SAME") + self.b3
# Conv layer 4:
a_4 = qc(a_3, self.W4(), self.quant(name="quant_a_3"), self.quant(name="quant_weights_4"), padding="SAME") + self.b4
# max pooling 2: 8 x 8
x_2 = tf.nn.max_pool(tf.nn.relu(a_4), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 5:
a_5 = qc(x_2, self.W5(), self.quant(name="quant_x_2"), self.quant(name="quant_weights_5"), padding="SAME") + self.b5
# Conv layer 6:
a_6 = qc(a_5, self.W6(),self.quant(name="quant_a_5"), self.quant(name="quant_weights_6"), padding="SAME") + self.b6
# Conv layer 7:
a_7 = qc(a_6, self.W7(), self.quant(name="quant_a_6"), self.quant(name="quant_weights_7"), padding="SAME") + self.b7
# max pooling 3: 4 x 4
x_3 = tf.nn.max_pool(tf.nn.relu(a_7), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 8:
a_8 = qc(x_3, self.W8(), self.quant(name="quant_x_3"), self.quant(name="quant_weights_8"), padding="SAME") + self.b8
# Conv layer 9:
a_9 = qc(a_8, self.W9(), self.quant(name="quant_a_8"), self.quant(name="quant_weights_9"), padding="SAME") + self.b9
# Conv layer 10:
a_10 = qc(a_9, self.W10(), self.quant(name="quant_a_9"), self.quant(name="quant_weights_10"), padding="SAME") + self.b10
# max pooling 4: 2 x 2
x_4 = tf.nn.max_pool(tf.nn.relu(a_10), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 11:
a_11 = qc(x_4, self.W11(), self.quant(name="quant_x_4"), self.quant(name="quant_weights_11"), padding="SAME") + self.b11
# Conv layer 12:
a_12 = qc(a_11, self.W12(), self.quant(name="quant_a_11"), self.quant(name="quant_weights_12"), padding="SAME") + self.b12
# Conv layer 13:
a_13 = qc(a_12, self.W13(), self.quant(name="quant_a_12"), self.quant(name="quant_weights_13"), padding="SAME") + self.b13
# max pooling 5: 1 x 1
x_5 = tf.contrib.layers.flatten(tf.nn.max_pool(tf.nn.relu(a_13), [1, 2, 2, 1], [1, 2, 2, 1], "SAME"))
# fc layer 14:
a_14 = qt(x_5, self.W14(),self.quant(name="quant_x_5"), self.quant(name="quant_weights_14"), axes=1) + self.b14
x_6 = tf.nn.relu(a_14)
# fc layer 15 returning the logits:
a_15 = qt(x_6, self.W15(), self.quant(name="quant_x_6"), self.quant(name="quant_weights_15"), axes=1) + self.b15
x_7 = tf.nn.relu(a_15)
# output layer 16 returning the logits:
a_16 = qt(x_7, self.W16(), self.quant(name="quant_x_7"), self.quant(name="quant_weights_16"), axes=1) + self.b16
return a_16
class PartQC_list_Vgg16_Cifar10(Vgg16_Cifar10):
def __init__(self, inp, c_init_list, k, n_steps):
"""
Instantiates a LeNet with quantized parameters and layer inputs. The quantizer does
clip to a fixed range c.
Args:
inp: tf.tensor, the input of the network
c_init: tf.initializer, the initializer for the range c of the quantizers with clipping
k: integer), the approximation order for approximate quantization
n_steps: positive odd integer (numpy), the number of quantization steps used for training
"""
self.c_init = cycle(c_init_list)
self.k = k
self.n_steps = n_steps
super(PartQC_list_Vgg16_Cifar10, self).__init__(inp)
def linear(self, name):
return UniformLinear(name=name)
def quant(self, name, steps = None):
if steps== None:
return ClippedApproxUniformQuantizer(c_init=next(self.c_init), k=self.k, n_steps=self.n_steps, name=name)
else:
return ClippedApproxUniformQuantizer(c_init=next(self.c_init), k=self.k, n_steps=steps, name=name)
def _get_logits(self):
# ------------------------create the network graph--------------------------
# Conv layer 1:
a_1 = qc(self.inp, self.W1(),self.quant(name="quant_input",steps= 256), self.quant(name="quant_weights_1", steps= 256), padding="SAME") + self.b1
# Conv layer 2:
a_2 = qc(a_1, self.W2(), self.linear(name="quant_a_2"), self.quant(name="quant_weights_2"), padding="SAME") + self.b2
# max pooling 1: 16 x 16
x_1 = tf.nn.max_pool(tf.nn.relu(a_2), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 3:
a_3 = qc(x_1, self.W3(), self.linear(name="quant_x_1"), self.quant(name="quant_weights_3"), padding="SAME") + self.b3
# Conv layer 4:
a_4 = qc(a_3, self.W4(), self.linear(name="quant_a_3"), self.quant(name="quant_weights_4"), padding="SAME") + self.b4
# max pooling 2: 8 x 8
x_2 = tf.nn.max_pool(tf.nn.relu(a_4), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 5:
a_5 = qc(x_2, self.W5(), self.linear(name="quant_x_2"), self.quant(name="quant_weights_5"), padding="SAME") + self.b5
# Conv layer 6:
a_6 = qc(a_5, self.W6(),self.linear(name="quant_a_5"), self.quant(name="quant_weights_6"), padding="SAME") + self.b6
# Conv layer 7:
a_7 = qc(a_6, self.W7(), self.linear(name="quant_a_6"), self.quant(name="quant_weights_7"), padding="SAME") + self.b7
# max pooling 3: 4 x 4
x_3 = tf.nn.max_pool(tf.nn.relu(a_7), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 8:
a_8 = qc(x_3, self.W8(), self.linear(name="quant_x_3"), self.quant(name="quant_weights_8"), padding="SAME") + self.b8
# Conv layer 9:
a_9 = qc(a_8, self.W9(), self.linear(name="quant_a_8"), self.quant(name="quant_weights_9"), padding="SAME") + self.b9
# Conv layer 10:
a_10 = qc(a_9, self.W10(), self.linear(name="quant_a_9"), self.quant(name="quant_weights_10"), padding="SAME") + self.b10
# max pooling 4: 2 x 2
x_4 = tf.nn.max_pool(tf.nn.relu(a_10), [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
# Conv layer 11:
a_11 = qc(x_4, self.W11(), self.linear(name="quant_x_4"), self.quant(name="quant_weights_11"), padding="SAME") + self.b11
# Conv layer 12:
a_12 = qc(a_11, self.W12(), self.linear(name="quant_a_11"), self.quant(name="quant_weights_12"), padding="SAME") + self.b12
# Conv layer 13:
a_13 = qc(a_12, self.W13(), self.linear(name="quant_a_12"), self.quant(name="quant_weights_13"), padding="SAME") + self.b13
# max pooling 5: 1 x 1
x_5 = tf.contrib.layers.flatten(tf.nn.max_pool(tf.nn.relu(a_13), [1, 2, 2, 1], [1, 2, 2, 1], "SAME"))
# fc layer 14:
a_14 = qt(x_5, self.W14(),self.linear(name="quant_x_5"), self.quant(name="quant_weights_14"), axes=1) + self.b14
x_6 = tf.nn.relu(a_14)
# fc layer 15 returning the logits:
a_15 = qt(x_6, self.W15(), self.linear(name="quant_x_6"), self.quant(name="quant_weights_15"), axes=1) + self.b15
x_7 = tf.nn.relu(a_15)
# output layer 16 returning the logits:
a_16 = qt(x_7, self.W16(), self.linear(name="quant_x_7"), self.quant(name="quant_weights_16"), axes=1) + self.b16
return a_16
|
[
"nt.nithinkumara@gmail.com"
] |
nt.nithinkumara@gmail.com
|
683e22478f7e3482dff2e0a08fef92edb548b1cf
|
863e6c320deedac962c7a27bfdaedf120e0ab7cf
|
/Design and Analysis of Algorithms II/week 1/jobschedule.py
|
6290947bdfce763e540b5c3d724f5dbb86175b61
|
[] |
no_license
|
zhouyuzju/online_course
|
2206b0ced5d8d1ab86d904f509587a97a324db66
|
02e327d51cdf2c5433bcf0bd19bd61269eac5158
|
refs/heads/master
| 2020-06-04T03:24:45.518174
| 2013-05-24T13:36:44
| 2013-05-24T13:36:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
def subcmp(x,y):
if x[0] - x[1] != y[0] - y[1]:
return (y[0] - y[1]) - (x[0] - x[1])
else:
return y[0] - x[0]
file = open('./jobs.txt','r')
num = file.readline()
data = file.readlines()
data = [(int(line.split(' ')[0]),int(line.split(' ')[1])) for line in data]
#data = sorted(data,reverse = True,key = lambda x : x[0] * 1.0 / x[1])
data = sorted(data,cmp = subcmp)
esum = 0
length = 0
for(w,l) in data:
length += l
esum += w * length
print esum
|
[
"Administrator@zhouyu-pc.(none)"
] |
Administrator@zhouyu-pc.(none)
|
b46766095c65ea50ef86a2f12db87f3b0da13ea4
|
d05a813a38ce872f9fb8ba09f11e11cb6126664e
|
/CreateView/CreateView/wsgi.py
|
39c5c99bfd308648b1afdb2084428171f0d6efa2
|
[] |
no_license
|
divyadivyaj19/proj6
|
d59aef6086457ccd0006851db93b113c573bc777
|
59b8e255da977ab5a5202b240ab5aa44215a594b
|
refs/heads/master
| 2020-09-21T02:34:58.593609
| 2019-11-28T13:10:33
| 2019-11-28T13:10:33
| 224,655,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for CreateView project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CreateView.settings')
application = get_wsgi_application()
|
[
"57679523+divyadivyaj19@users.noreply.github.com"
] |
57679523+divyadivyaj19@users.noreply.github.com
|
d56e5dad4b73e0fec992181febaa033c0a55963c
|
1c1f7202b014aa71600e50c59539265b647c809e
|
/app/modules/core/service.py
|
4ccbadb085f2fc8cda18985a7031685289580c12
|
[
"MIT"
] |
permissive
|
nguyennp/nhsx-website
|
9ec4dec07fc9f9dab781191ca2c8421f364c9161
|
03d43501a88794e613659b7d3148f7372f6b4754
|
refs/heads/master
| 2022-07-03T20:57:29.170159
| 2020-05-18T11:25:17
| 2020-05-18T11:25:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
from helpers.service import Service
from django.contrib.auth.models import Group
from wagtail.core.models import Collection, Page
from django.utils.functional import cached_property
from modules.core.models.pages import SectionPage, ArticlePage
class GroupService(Service):
__model__ = Group
def ensure(self, name: str) -> Group:
"""Ensures that a grouyp called ``name`` exists, and returns
that group.
Args:
name (str): The name of the collection you want
"""
try:
group = self.get_or_create(name=name)
except Exception:
raise
return group
@cached_property
def authors(self):
return self.ensure('Authors')
_groups = GroupService()
class CollectionService(Service):
__model__ = Collection
def ensure(self, name: str) -> Collection:
"""Ensures that a collection called ``name`` exists, and returns
that collection.
Args:
name (str): The name of the collection you want
"""
try:
coll = Collection.objects.get(name=name)
except Collection.DoesNotExist:
try:
root_collection = Collection.get_first_root_node()
coll = root_collection.add_child(name=name)
except Exception:
raise
return coll
_collections = CollectionService()
class PageService(Service):
__model__ = Page
_pages = PageService()
class SectionPageService(Service):
__model__ = SectionPage
_sections_pages = SectionPageService()
class ArticlePageService(Service):
__model__ = ArticlePage
_article_pages = ArticlePageService()
|
[
"andy@andybeaumont.com"
] |
andy@andybeaumont.com
|
ee336fb20619a84e01df1019401d59013c3ea364
|
8d62f82999f8566678afdafdb9a79352943151d9
|
/scripts/national/nps/merge_sources.py
|
d329e42caeda6b25932bb8f08776921a838b7756
|
[] |
no_license
|
MonumentLab/national-monument-audit
|
693abddd13b8d0c79a2bd83ac61cbb35b7544118
|
6364ad04369e4357f79a651519f50d344c063831
|
refs/heads/main
| 2023-08-15T00:17:04.215549
| 2021-09-29T14:56:28
| 2021-09-29T14:56:28
| 302,441,032
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,739
|
py
|
# -*- coding: utf-8 -*-
import argparse
import inspect
import os
from pprint import pprint
import sys
import time
# add parent directory to sys path to import relative modules
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir)
parentdir = os.path.dirname(parentdir)
sys.path.insert(0,parentdir)
from lib.collection_utils import *
from lib.io_utils import *
from lib.string_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-out', dest="OUTPUT_FILE", default="data/vendor/national/nps/nps_nrhp_combined.csv", help="Where to store merged data")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just output details and don't write data?")
a = parser.parse_args()
files = [
{
"filename": "data/vendor/national/nps/NPS_-_National_Register_of_Historic_Places_Locations-shp.csv",
"id": "NRIS_Refnu",
"columns": {
"X": "X",
"Y": "Y",
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": "RESNAME",
"ResType": "ResType",
"Address": "Address",
"City": "City",
"County": "County",
"State": "State",
"Listed_Dat": "Listed_Dat",
"NARA_URL": "NARA_URL"
}
},{
"filename": "data/vendor/national/nps/national_register_listed_20210214.csv",
"id": "Ref#",
"columns": {
"Ref#": "NRIS_Refnu",
"Property Name": "RESNAME",
"Category of Property": "ResType",
"Street & Number": "Address",
"City": "City",
"County": "County",
"State": "State",
"Listed Date": "Listed_Dat",
"External Link": "NARA_URL"
}
},{
"filename": "data/vendor/wv/nationalRegisterOfHistoricPlacesPoints_natoinalPakrService_200404.csv",
"id": "REFNUM",
"columns": {
"REFNUM": "NRIS_Refnu",
"RESNAME": "RESNAME",
"ADDRESS": "Address",
"CITY": "City",
"COUNTY": "County",
"STATE": "State",
"LISTED_DAT": "Listed_Dat"
}
},{
"filename": "data/vendor/mt/Montana National Register of Historic Places.csv",
"id": "NR_Referen",
"State": "MT",
"columns": {
"NR_Referen": "NRIS_Refnu",
"Name": "RESNAME",
"Street_Add": "Address",
"City": "City",
"County": "County",
"X": "X",
"Y": "Y",
"Type": "ResType",
"Nomination": "NARA_URL"
}
},{
"filename": "data/vendor/as/Naval_Postgraduate_School_Cultural_Resources_Buildings.csv",
"id": "NRIS_Refnu",
"State": "AS",
"columns": {
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": {"to": "Name",},
"Y": {"to": "Y"},
"X": {"to": "X"},
"SRC_DATE": {"to": "Year Listed"}
}
},{
"filename": "data/vendor/as/Naval_Postgraduate_School_Cultural_Resources_Sites.csv",
"id": "NRIS_Refnu",
"State": "AS",
"columns": {
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": {"to": "Name",},
"Y": {"to": "Y"},
"X": {"to": "X"},
"SRC_DATE": {"to": "Year Listed"}
}
},{
"filename": "data/vendor/as/Naval_Postgraduate_School_Cultural_Resources_Structures.csv",
"id": "NRIS_Refnu",
"State": "AS",
"columns": {
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": {"to": "Name",},
"Y": {"to": "Y"},
"X": {"to": "X"},
"SRC_DATE": {"to": "Year Listed"}
}
},{
"filename": "data/vendor/gu/National_Registry_Historic_Places_Buildings.csv",
"id": "NRIS_Refnu",
"State": "GU",
"columns": {
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": {"to": "Name",},
"Y": {"to": "Y"},
"X": {"to": "X"},
"SRC_DATE": {"to": "Year Listed"}
}
},{
"filename": "data/vendor/gu/National_Registry_Historic_Places_Objects.csv",
"id": "NRIS_Refnu",
"State": "GU",
"columns": {
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": {"to": "Name",},
"Y": {"to": "Y"},
"X": {"to": "X"},
"SRC_DATE": {"to": "Year Listed"}
}
},{
"filename": "data/vendor/gu/National_Registry_Historic_Places_Sites.csv",
"id": "NRIS_Refnu",
"State": "GU",
"columns": {
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": {"to": "Name",},
"Y": {"to": "Y"},
"X": {"to": "X"},
"SRC_DATE": {"to": "Year Listed"}
}
},{
"filename": "data/vendor/mp/CNMI_historic_places_buildings.csv",
"id": "NRIS_Refnu",
"State": "MP",
"columns": {
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": {"to": "Name",},
"Y": {"to": "Y"},
"X": {"to": "X"},
"SRC_DATE": {"to": "Year Listed"}
}
},{
"filename": "data/vendor/mp/CNMI_historic_places_buildings.csv",
"id": "NRIS_Refnu",
"State": "MP",
"columns": {
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": {"to": "Name",},
"Y": {"to": "Y"},
"X": {"to": "X"},
"SRC_DATE": {"to": "Year Listed"}
}
},{
"filename": "data/vendor/mp/CNMI_historic_places_buildings.csv",
"id": "NRIS_Refnu",
"State": "MP",
"columns": {
"NRIS_Refnu": "NRIS_Refnu",
"RESNAME": {"to": "Name",},
"Y": {"to": "Y"},
"X": {"to": "X"},
"SRC_DATE": {"to": "Year Listed"}
}
}
]
ids = set([])
mergedRows = []
fieldsOut = ["Sourcefile"]
for f in files:
fields, rows = readCsv(f["filename"])
newEntries = 0
for row in rows:
id = str(row[f["id"]]).strip()
if len(id) < 1:
continue
if id in ids:
continue
ids.add(id)
newEntries += 1
newRow = {
"Sourcefile": getBasename(f["filename"])
}
for colFrom, colTo in f["columns"].items():
newRow[colTo] = row[colFrom]
if colTo not in fieldsOut:
fieldsOut.append(colTo)
mergedRows.append(newRow)
print(f' {newEntries} new entries found.')
if a.PROBE:
sys.exit()
makeDirectories(a.OUTPUT_FILE)
writeCsv(a.OUTPUT_FILE, mergedRows, headings=fieldsOut)
|
[
"brian@youaremyjoy.org"
] |
brian@youaremyjoy.org
|
907bd681dcc3acfbbac66faa6a98806dd3b45c8c
|
35dcc3c3b24282d5a0246cc6eb919433ed6a43ed
|
/Metodo de Lagrange/Codigo de Metodo de Lagrange.py
|
8475db9877b9d9c68521dbb5a1dd1a94811d7b6b
|
[] |
no_license
|
Azazyro/Metodos-Numericos-
|
73342aee74f34c41bcc37e6b02260953d52e2b33
|
383172608680f1d907631f3ec860b78cd01f93b2
|
refs/heads/master
| 2020-03-25T15:52:29.370153
| 2018-11-25T23:33:07
| 2018-11-25T23:33:07
| 143,904,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
from sympy import *
import matplotlib.pyplot as plt
import numpy as np
x = [1950, 1960, 1970, 1980, 1990, 2000]
y = [123.5, 131.2, 150.7, 141.3, 203.2, 240.5]
pL = ''
for k in range(len(y)):
pL += str(y[k])+'*'
Lxk = 1
for i in range(len(x)):
if (i==k):
continue
pL += '(x - %f)*'%(x[i])
Lxk *= (x[k]-x[i])
pL = pL[:-1]
pL += '/%f+'%(Lxk)
pL = pL[:-1]
expr = sympify(pL)
print(expand(expr))
plt.plot(x,y,'go')
x2 = np.linspace(1950,2000,100)
x = symbols('x')
y2 = [expr.subs(x,xi) for xi in x2]
plt.plot(x2,y2)
plt.grid()
print(' ')
r = expr.subs(x,1965)
print(r)
|
[
"noreply@github.com"
] |
Azazyro.noreply@github.com
|
58b31803381c6be3d170820f7e1603a527a02656
|
e0ef8aeddfdd82fa1333dca854bd664bd910ed2e
|
/columns/lib/authorization/__init__.py
|
f443a541ab2fca94998bf25c098dfe278f2c195d
|
[
"BSD-3-Clause"
] |
permissive
|
yoshrote/Columns
|
afdb8f5b9e053133ca2ae3f81bb097da13023c98
|
c5784a286a8aac5bceb42de39ad3245239b53a7c
|
refs/heads/master
| 2021-01-10T21:08:57.538013
| 2011-07-19T00:34:52
| 2011-07-19T00:34:52
| 2,069,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from columns.lib.authorization.exceptions import *
from columns.lib.authorization.middleware import *
from columns.lib.authorization.predicates import *
#import logging
#log = logging.getLogger(__name__)
|
[
"josh@yoshrote.com"
] |
josh@yoshrote.com
|
69da7b313be8e430bf33a9018289bdb69cf7fe5b
|
0af29dc561a34a8191f456ec24f6a77bea104b89
|
/recurrent-neural-networks/neural-language-models/character-language-model-generator/generate.py
|
c49de28b1a5d4f2ac110f1b51257182cf72eb43b
|
[] |
no_license
|
cheeyeo/Machine_learning_portfolio
|
c4eea8390b2540706d9b8e9df0b491f3f434494b
|
927cc9eb3de394dcaa00a4178d873df9798921e4
|
refs/heads/master
| 2020-05-04T11:16:22.188024
| 2019-05-25T14:18:58
| 2019-05-25T14:18:58
| 179,104,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
from pickle import load
from keras.models import load_model
import numpy as np
from utils import sample, print_sample
# load the model
model = load_model('model-final.h5')
# model.summary()
# load the mapping
char_to_ix = load(open('char_to_ix.pkl', 'rb'))
print(char_to_ix)
ix_to_char = load(open('ix_to_char.pkl', 'rb'))
sampled_indices = sample(model, char_to_ix, seq_length=27, n_chars=50)
print(sampled_indices)
print_sample(sampled_indices, ix_to_char)
|
[
"ckyeo.1@gmail.com"
] |
ckyeo.1@gmail.com
|
1235d9367d5c72763a0801847544933032038078
|
e1a2c6ed4a4b93b4697974e3b0a32a4d67daa6f6
|
/venv/Lib/site-packages/pybrain/tools/xml/__init__.py
|
4136b124af01058151d4f10e6b7bd26739fe710f
|
[
"MIT"
] |
permissive
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
cdf0f23a58617e17d6b938e3a9df17daae8585e4
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
refs/heads/master
| 2021-09-11T01:39:26.228392
| 2018-04-05T14:33:39
| 2018-04-05T14:33:39
| 117,153,454
| 0
| 0
|
MIT
| 2018-03-27T05:20:37
| 2018-01-11T21:05:33
|
Python
|
UTF-8
|
Python
| false
| false
| 88
|
py
|
from networkreader import NetworkReader
from networkwriter import NetworkWriter
|
[
"shatserka@gmail.com"
] |
shatserka@gmail.com
|
b5349516c8d7066980c07a5272e58b52de0c6b95
|
a857d1911a118b8aa62ffeaa8f154c8325cdc939
|
/toontown/coghq/CountryClubLayout.py
|
83fb00d222c51e6a044eb1391c9493d314910f27
|
[
"MIT"
] |
permissive
|
DioExtreme/TT-CL-Edition
|
761d3463c829ec51f6bd2818a28b667c670c44b6
|
6b85ca8352a57e11f89337e1c381754d45af02ea
|
refs/heads/main
| 2023-06-01T16:37:49.924935
| 2021-06-24T02:25:22
| 2021-06-24T02:25:22
| 379,310,849
| 0
| 0
|
MIT
| 2021-06-22T15:07:31
| 2021-06-22T15:07:30
| null |
UTF-8
|
Python
| false
| false
| 8,370
|
py
|
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import invertDictLossless
from toontown.coghq import CountryClubRoomSpecs
from toontown.toonbase import ToontownGlobals
from direct.showbase.PythonUtil import normalDistrib, lerp
import random
def printAllBossbotInfo():
print 'roomId: roomName'
for roomId, roomName in CountryClubRoomSpecs.BossbotCountryClubRoomId2RoomName.items():
print '%s: %s' % (roomId, roomName)
print '\nroomId: numBattles'
for roomId, numBattles in CountryClubRoomSpecs.roomId2numBattles.items():
print '%s: %s' % (roomId, numBattles)
print '\ncountryClubId floor roomIds'
printCountryClubRoomIds()
print '\ncountryClubId floor numRooms'
printNumRooms()
print '\ncountryClubId floor numForcedBattles'
printNumBattles()
def iterateBossbotCountryClubs(func):
from toontown.toonbase import ToontownGlobals
for countryClubId in [ToontownGlobals.BossbotCountryClubIntA, ToontownGlobals.BossbotCountryClubIntB, ToontownGlobals.BossbotCountryClubIntC]:
for floorNum in xrange(ToontownGlobals.CountryClubNumFloors[countryClubId]):
func(CountryClubLayout(countryClubId, floorNum))
def printCountryClubInfo():
def func(ml):
print ml
iterateBossbotCountryClubs(func)
def printCountryClubRoomIds():
def func(ml):
print ml.getCountryClubId(), ml.getFloorNum(), ml.getRoomIds()
iterateBossbotCountryClubs(func)
def printCountryClubRoomNames():
def func(ml):
print ml.getCountryClubId(), ml.getFloorNum(), ml.getRoomNames()
iterateBossbotCountryClubs(func)
def printNumRooms():
def func(ml):
print ml.getCountryClubId(), ml.getFloorNum(), ml.getNumRooms()
iterateBossbotCountryClubs(func)
def printNumBattles():
def func(ml):
print ml.getCountryClubId(), ml.getFloorNum(), ml.getNumBattles()
iterateBossbotCountryClubs(func)
ClubLayout3_0 = [(0, 2, 5, 9, 17), (0, 2, 4, 9, 17), (0, 2, 5, 9, 18)]
ClubLayout3_1 = [(0, 2, 5, 9, 17), (0, 2, 4, 9, 17), (0, 2, 5, 9, 18)]
ClubLayout3_2 = [(0, 2, 4, 9, 17), (0, 2, 4, 9, 17), (0, 2, 6, 9, 18)]
ClubLayout6_0 = [(0, 22, 4, 29, 17),
(0, 22, 5, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 5, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 5, 29, 18)]
ClubLayout6_1 = [(0, 22, 4, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 4, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 4, 29, 17),
(0, 22, 6, 29, 18)]
ClubLayout6_2 = [(0, 22, 4, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 5, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 5, 29, 17),
(0, 22, 7, 29, 18)]
ClubLayout9_0 = [(0, 32, 4, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 7, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 7, 39, 17),
(0, 32, 7, 39, 17),
(0, 32, 6, 39, 18)]
ClubLayout9_1 = [(0, 32, 4, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 7, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 7, 39, 17),
(0, 32, 7, 39, 17),
(0, 32, 7, 39, 18)]
ClubLayout9_2 = [(0, 32, 5, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 7, 39, 18)]
countryClubLayouts = [ClubLayout3_0,
ClubLayout3_1,
ClubLayout3_2,
ClubLayout6_0,
ClubLayout6_1,
ClubLayout6_2,
ClubLayout9_0,
ClubLayout9_1,
ClubLayout9_2]
testLayout = [ClubLayout3_0,
ClubLayout3_0,
ClubLayout3_0,
ClubLayout6_0,
ClubLayout6_0,
ClubLayout6_0,
ClubLayout9_0,
ClubLayout9_0,
ClubLayout9_0]
countryClubLayouts = testLayout
class CountryClubLayout:
notify = DirectNotifyGlobal.directNotify.newCategory('CountryClubLayout')
def __init__(self, countryClubId, floorNum, layoutIndex):
self.countryClubId = countryClubId
self.floorNum = floorNum
self.layoutIndex = layoutIndex
self.roomIds = []
self.hallways = []
self.numRooms = 1 + ToontownGlobals.CountryClubNumRooms[self.countryClubId][0]
self.numHallways = self.numRooms - 1 + 1
self.roomIds = countryClubLayouts[layoutIndex][floorNum]
hallwayRng = self.getRng()
connectorRoomNames = CountryClubRoomSpecs.BossbotCountryClubConnectorRooms
for i in xrange(self.numHallways):
self.hallways.append(hallwayRng.choice(connectorRoomNames))
def _genFloorLayout(self):
rng = self.getRng()
startingRoomIDs = CountryClubRoomSpecs.BossbotCountryClubEntranceIDs
middleRoomIDs = CountryClubRoomSpecs.BossbotCountryClubMiddleRoomIDs
finalRoomIDs = CountryClubRoomSpecs.BossbotCountryClubFinalRoomIDs
numBattlesLeft = ToontownGlobals.CountryClubNumBattles[self.countryClubId]
finalRoomId = rng.choice(finalRoomIDs)
numBattlesLeft -= CountryClubRoomSpecs.getNumBattles(finalRoomId)
middleRoomIds = []
middleRoomsLeft = self.numRooms - 2
numBattles2middleRoomIds = invertDictLossless(CountryClubRoomSpecs.middleRoomId2numBattles)
allBattleRooms = []
for num, roomIds in numBattles2middleRoomIds.items():
if num > 0:
allBattleRooms.extend(roomIds)
while 1:
allBattleRoomIds = list(allBattleRooms)
rng.shuffle(allBattleRoomIds)
battleRoomIds = self._chooseBattleRooms(numBattlesLeft,
allBattleRoomIds)
if battleRoomIds is not None:
break
CountryClubLayout.notify.info('could not find a valid set of battle rooms, trying again')
middleRoomIds.extend(battleRoomIds)
middleRoomsLeft -= len(battleRoomIds)
if middleRoomsLeft > 0:
actionRoomIds = numBattles2middleRoomIds[0]
for i in xrange(middleRoomsLeft):
roomId = rng.choice(actionRoomIds)
actionRoomIds.remove(roomId)
middleRoomIds.append(roomId)
roomIds = []
roomIds.append(rng.choice(startingRoomIDs))
middleRoomIds.sort()
print 'middleRoomIds=%s' % middleRoomIds
roomIds.extend(middleRoomIds)
roomIds.append(finalRoomId)
return roomIds
def getNumRooms(self):
return len(self.roomIds)
def getRoomId(self, n):
return self.roomIds[n]
def getRoomIds(self):
return self.roomIds[:]
def getRoomNames(self):
names = []
for roomId in self.roomIds:
names.append(CountryClubRoomSpecs.BossbotCountryClubRoomId2RoomName[roomId])
return names
def getNumHallways(self):
return len(self.hallways)
def getHallwayModel(self, n):
return self.hallways[n]
def getNumBattles(self):
numBattles = 0
for roomId in self.getRoomIds():
numBattles += CountryClubRoomSpecs.roomId2numBattles[roomId]
return numBattles
def getCountryClubId(self):
return self.countryClubId
def getFloorNum(self):
return self.floorNum
def getRng(self):
return random.Random(self.countryClubId * self.floorNum)
def _chooseBattleRooms(self, numBattlesLeft, allBattleRoomIds, baseIndex = 0, chosenBattleRooms = None):
if chosenBattleRooms is None:
chosenBattleRooms = []
while baseIndex < len(allBattleRoomIds):
nextRoomId = allBattleRoomIds[baseIndex]
baseIndex += 1
newNumBattlesLeft = numBattlesLeft - CountryClubRoomSpecs.middleRoomId2numBattles[nextRoomId]
if newNumBattlesLeft < 0:
continue
elif newNumBattlesLeft == 0:
chosenBattleRooms.append(nextRoomId)
return chosenBattleRooms
chosenBattleRooms.append(nextRoomId)
result = self._chooseBattleRooms(newNumBattlesLeft, allBattleRoomIds, baseIndex, chosenBattleRooms)
if result is not None:
return result
else:
del chosenBattleRooms[-1:]
else:
return
return
def __str__(self):
return 'CountryClubLayout: id=%s, layoutIndex=%s, floor=%s, numRooms=%s, numBattles=%s' % (self.countryClubId,
self.layoutIndex,
self.floorNum,
self.getNumRooms(),
self.getNumBattles())
def __repr__(self):
return str(self)
|
[
"devinhall4@gmail.com"
] |
devinhall4@gmail.com
|
d0de3f7c1b1efdf289890092a2f58153bb5ff800
|
18465378796d46c3158548d376a55ab47400c388
|
/examples/deep_learning/human_pose_estimation/openpose.py
|
9f7b064e6c370fcc17605f6c16c3818800597050
|
[
"BSD-3-Clause"
] |
permissive
|
chokobole/felicia-examples
|
0b8a9a2a3f1885edad0bd50b01862a47dd467dc6
|
83303cf0f4bf688e9794ee574394d98619a16007
|
refs/heads/master
| 2020-09-05T16:18:38.788277
| 2019-11-30T06:42:14
| 2019-12-05T11:17:11
| 220,153,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
# Copyright (c) 2019 The Felicia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import cv2
import pyopenpose as op
from felicia.python.command_line_interface.flag_parser_delegate import FlagParserDelegate
import felicia_py.command_line_interface as cli
class OpenposeFlag(FlagParserDelegate):
def __init__(self):
super().__init__()
self.model_pose = cli.StringFlagBuilder().set_long_name(
"--model_folder").set_help("path to the model").build()
self.model_flag = cli.StringChoicesFlagBuilder("BODY_25", ["BODY_25", "COCO", "MPI"]).set_long_name(
"--model_pose").set_help("model to be used").build()
self.hand_flag = cli.BoolFlagBuilder().set_long_name(
"--hand").set_help("whether enable to detect hand").build()
self.face_flag = cli.BoolFlagBuilder().set_long_name(
"--face").set_help("whether enable to detect face").build()
def parse(self, flag_parser):
return self.parse_optional_flags(flag_parser)
def validate(self):
return self.model_flag.is_set()
class Openpose(object):
def __init__(self, params):
self.op_wrapper = op.WrapperPython()
self.op_wrapper.configure(params)
self.op_wrapper.start()
def inference(self, image):
try:
datum = op.Datum()
datum.cvInputData = image
self.op_wrapper.emplaceAndPop([datum])
return datum
except Exception as e:
print(e)
|
[
"chokobole33@gmail.com"
] |
chokobole33@gmail.com
|
5c2f0601e40d435b7a245bf5cbd6fe6bc4c0bb4b
|
fdf19f2fffcc0efc92e214b7adf07a1b28e513f2
|
/news/news/asgi.py
|
a714c79dd757e955fa28c1be367966aa0fcb3f84
|
[] |
no_license
|
nsr888/django_news
|
f991d15c906c206f10187a7af808b2302306a43a
|
33924d141a1029a3061865f0a4f395093b6a61b3
|
refs/heads/master
| 2023-08-30T19:59:20.116926
| 2021-09-27T18:31:59
| 2021-09-27T18:31:59
| 399,358,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
import os
import django
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "news.settings")
django.setup()
from chat import routing
application = ProtocolTypeRouter({
"http": get_asgi_application(),
"websocket": AuthMiddlewareStack(
URLRouter(
routing.websocket_urlpatterns
)
),
})
|
[
"sun604@gmail.com"
] |
sun604@gmail.com
|
24fcd3ca7a366ffab5b4baefde01550aceb4ff7e
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/7ZWinyRcC4AoxbqdG_15.py
|
bd2e9e8972e49ccc358f2f1668e443796bebcb70
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
"""
Given a positive integer `n`, compute the nth term in the Fibonacci sequence.
For those of you that have been living under a rock in the mathematical world,
here's the definition:
* The first and second terms are 1.
* nth term is the (n-1)th term + the (n-2)th term. So the 3rd term is the 1st term + the 2nd term, the 4th term is the 3rd term + the 2nd term, etc.
Thus the sequence looks like this: 1, 1, 2, 3, 5, 8, 13, 21, ...
### Examples
fibo(1) ➞ 1
fibo(2) ➞ 1
fibo(3) ➞ 2
fibo(6) ➞ 8
fibo(30) ➞ 832040
### Notes
N/A
"""
def fibo(n):
l, ll = 1, 1
for _ in range(1, n):
new = l + ll
l, ll = ll, new
return l
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
67e81eb550afba48adaa128eed4fd749a1e9de28
|
49f70d315c4a0824c513a1b093e577dcaa3b76ff
|
/Ejercicios/DesdeCero/10.- Modulos Paquetes Archivos/2.- Modulo 2/inicio.py
|
1108fbf86898492a87307f10e68bd8b9ecbfac28
|
[] |
no_license
|
JavierEsteban/TodoPython
|
acda6c3a9b9ee69255ca5bfe0c2163e886a2f29b
|
ba09e2f8ea336f17880e61a2d3e192e92ffa1d66
|
refs/heads/main
| 2023-03-24T21:27:56.507323
| 2021-03-23T06:00:56
| 2021-03-23T06:00:56
| 350,537,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 84
|
py
|
import datos2
nombre = str(input("ingrese su nombre.."))
datos2.saludar(nombre)
|
[
"jaestior@gmail.com"
] |
jaestior@gmail.com
|
467285dff10e1f7735fe251a800312f3f385587e
|
02c488d803bfea79f51c555e9d32fbfdae5866bc
|
/scripts/procedures/QuickAr.py
|
b5beee2d9a793a99ed2a3ffd9e1680d243c706c4
|
[] |
no_license
|
ANGLPychron/support
|
20f51f44fd71ba1826556056270c977532e20e73
|
5aa55e222e87ff82a135127c05569c17df4eff05
|
refs/heads/master
| 2020-07-24T08:39:08.074359
| 2019-09-30T20:18:37
| 2019-09-30T20:18:37
| 207,869,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
'''
'''
def main():
###########################################
# UA ANGL Thermo Ar cocktail script (gettered air)
info('Argus VI Ar Procedure Script')
#reset valving, pump prep/MS
close('8') # MS inlet
open('1') # Prep ion
open('9') # MS ion
close('2') # Aux 1
close('3') # Aux 2
close('4') # Pipette Ref Out
close('5') # Pipette Ref In
#load the air shot into the pipette
close('6') # for explicit completeness close the other pipette. not strictly necessary
close('7')
close('4')
open('5')
sleep(30)
close('5')
#load pipette into V1
open('4')
sleep (30)
# when doing automated analysis the extraction script should stop here
# since this is a procedure script we need to do the equilibration and post_equilibration
# steps here
# The following should only be used in procedures scripts NOT in extraction scripts
#gas into MS
close('4')
close ('9')
open ('8')
# equilibrate
sleep(15)
close('8')
open ('1')
# reset prep line to default state
# when doing automated analysis this would go in your post_equilibration script
open('1') # Prep ion
close('2') # Aux 1
close('3') # Aux 2
close('4') # Pipette Ref Out
close('5') # Pipette Ref In
# when doing automated analysis the following would go in your post_measurement script
# but you don't want to pump away the gas you just loaded so its commented out here
# open('9') # MS Ion pump
|
[
"ANGL-pychron@ariz.edu"
] |
ANGL-pychron@ariz.edu
|
bcf04cfc3600dc60c912ad51f41c9b6f7ea89ee2
|
a79e7562e573222930ecccaedf4239d108717ba3
|
/sockets.py
|
0367f5fdfdc4c5e86894c57d70fdfb5e739bd0cd
|
[] |
no_license
|
atilasos/pythonlearning
|
f21e2516ffd3135007d48585292c9b35d14bf413
|
cbcbb41184685c49cdcfc96b1b44e1deff51d3ba
|
refs/heads/master
| 2021-01-10T14:01:34.099926
| 2015-11-29T17:30:04
| 2015-11-29T17:30:04
| 47,004,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
import socket
import re
url = raw_input('Enter url - ')
# shortcut for ass data
if len(url) < 1:
url = 'http://www.pythonlearn.com/code/intro-short.txt'
host = (re.findall('http://(.+?)/', url))[0]
print 'Connecting to host:', host
url = 'GET ' + url + ' HTTP/1.0\n\n'
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect((host, 80))
mysock.send(url)
working = mysock.recv(512)
mysock.close()
print re.findall('.*?ied: (.+?)\r', working)
print re.findall('.*?ag: "(.+?)"', working)
print re.findall('.*?ength: (.+?)\r', working)
print re.findall('.*?trol: (.+?)\r', working)
print re.findall('.*?ype: (.+?)\r', working)
|
[
"Igor Almeida"
] |
Igor Almeida
|
9b111038bda66c834b7cebbdc7b6c5728f2a9351
|
138909a17b9f4b82ec91a209443864fbd18c1248
|
/FlippingBits.py
|
504c12daade03190387d36e00544086e1d9dd07e
|
[] |
no_license
|
surbhilakhani/Hackerrank
|
70fc0a7bf85e73dbc6bd1f4695e148f7080a0c59
|
f6cea99c5787c10ea5817bb9c4f3be8da1f6a73c
|
refs/heads/master
| 2021-01-19T03:03:05.435417
| 2016-07-01T13:45:19
| 2016-07-01T13:45:19
| 62,326,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
for i in xrange(int(raw_input())):
print 4294967296 - long(raw_input()) - 1
|
[
"noreply@github.com"
] |
surbhilakhani.noreply@github.com
|
a08f6e963a4605590dcaaa35d2ee5f8f542c6f94
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/l3/lbrtdif.py
|
b71a0b6e545d3f336fa9d52dc80329859f9e8a59
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,696
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LbRtdIf(Mo):
"""
The routed loopback interface.
"""
meta = ClassMeta("cobra.model.l3.LbRtdIf")
meta.moClassName = "l3LbRtdIf"
meta.rnFormat = "lb-[%(id)s]"
meta.category = MoCategory.REGULAR
meta.label = "Routed Loopback Interface"
meta.writeAccessMask = 0x400401002001
meta.readAccessMask = 0x8528425162001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.l3.RsLbIfToStaticRP")
meta.childClasses.add("cobra.model.ethpm.LbRtdIf")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.l3.RsProtLbIf")
meta.childClasses.add("cobra.model.l3.RsIntersiteLbIfToOutRef")
meta.childClasses.add("cobra.model.l3.RtSpanSrcToL3OutAtt")
meta.childClasses.add("cobra.model.l3.RtPseudoIf")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.l3.RsLbIfToOutRef")
meta.childClasses.add("cobra.model.l3.RtSrcToL3OutAtt")
meta.childClasses.add("cobra.model.nw.RtPathToIf")
meta.childClasses.add("cobra.model.l3.RsL3dot1pRuleAtt")
meta.childClasses.add("cobra.model.l3.RsL3dscpRuleAtt")
meta.childClasses.add("cobra.model.l3.RsLbIfToLocale")
meta.childClasses.add("cobra.model.l3.RsL3If")
meta.childClasses.add("cobra.model.eltm.LbIf")
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RsIntersiteLbIfToOutRef", "rsintersiteLbIfToOutRef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RtSpanSrcToL3OutAtt", "rtspanSpanSrcToL3OutAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RtSrcToL3OutAtt", "rtoamSrcToL3OutAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RsLbIfToStaticRP", "rslbIfToStaticRP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RsL3dot1pRuleAtt", "rsl3dot1pRuleAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RsL3dscpRuleAtt", "rsl3dscpRuleAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RsLbIfToOutRef", "rslbIfToOutRef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RsLbIfToLocale", "rslbIfToLocale-"))
meta.childNamesAndRnPrefix.append(("cobra.model.nw.RtPathToIf", "rtpathToIf-"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RsProtLbIf", "rsprotLbIf"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RtPseudoIf", "rtpseudoIf"))
meta.childNamesAndRnPrefix.append(("cobra.model.eltm.LbIf", "eltmlbif"))
meta.childNamesAndRnPrefix.append(("cobra.model.ethpm.LbRtdIf", "lbrtdif"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3.RsL3If", "rsl3If-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.l3.Ctx")
meta.parentClasses.add("cobra.model.l3.Inst")
meta.parentClasses.add("cobra.model.l3.CtxSubstitute")
meta.superClasses.add("cobra.model.nw.ConnEp")
meta.superClasses.add("cobra.model.nw.If")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.nw.LogicalIf")
meta.superClasses.add("cobra.model.nw.Item")
meta.superClasses.add("cobra.model.l3.If")
meta.superClasses.add("cobra.model.nw.Ep")
meta.rnPrefixes = [
('lb-', True),
]
prop = PropMeta("str", "adminSt", "adminSt", 4269, PropCategory.REGULAR)
prop.label = "Admin State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "down"
prop._addConstant("down", "down", 1)
prop._addConstant("up", "up", 2)
meta.props.add("adminSt", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5585, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "id", "id", 6848, PropCategory.REGULAR)
prop.label = "Interface ID"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("id", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "linkLog", "linkLog", 4338, PropCategory.REGULAR)
prop.label = "Administrative Link Logging Enable"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "default"
prop._addConstant("default", "default", 1)
prop._addConstant("disable", "disable", 3)
prop._addConstant("enable", "enable", 2)
meta.props.add("linkLog", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14573, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 16432, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "qosPrio", "qosPrio", 42173, PropCategory.REGULAR)
prop.label = "Qos Priority"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 9)]
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("level1", "level1", 3)
prop._addConstant("level2", "level2", 2)
prop._addConstant("level3", "level3-(default)", 1)
prop._addConstant("level4", "level4", 9)
prop._addConstant("level5", "level5", 8)
prop._addConstant("level6", "level6", 7)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("qosPrio", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "rtdOutDefDn", "rtdOutDefDn", 57108, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("rtdOutDefDn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 21968, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("inter-site", "loopback-for-inter-site-connectivity", 2)
prop._addConstant("intra-site", "loopback-for-intra-site-connectivity", 1)
prop._addConstant("intra-site-ext-routable", "loopback-for-intra-site-external-routable-connectivity", 4)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("type", prop)
meta.namingProps.append(getattr(meta.props, "id"))
getattr(meta.props, "id").needDelimiter = True
meta.deploymentCategory = DeploymentCategory("interface", "Interface")
def __init__(self, parentMoOrDn, id, markDirty=True, **creationProps):
namingVals = [id]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
bd3c6b494860eff351aa8ddd54c4b441aaca6f19
|
38a6d81f2a75d147ce82487197c10e15c2266db8
|
/main.py
|
350824f2628e52584aa686c841e8f55b87628c5d
|
[] |
no_license
|
LinarAbdrazakov/Autopilot_ver2
|
9302aba804346191c85d96a742c306e5fbe5023f
|
0e26ee7da0a3fa35bd815656ee92203eef4f7303
|
refs/heads/master
| 2020-04-25T23:35:56.137688
| 2019-02-28T16:57:00
| 2019-02-28T16:57:00
| 173,151,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
import io
import socket
import struct
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
import numpy as np
import cv2
import NeuralNetwork
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 10
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
print "[INFO] camera connect"
start = time.time()
number = 0
try:
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# get image from camera
image = frame.array
# predict angle
angle = NeuralNetwork.predict_angle(image)
print 'Angle:', angle
with open("angle.txt", 'w') as file:
file.write(str(angle))
# FPS
number += 1
if (time.time() - start) > 5:
print "FPS: ", number/(time.time() - start)
number = 0
start = time.time()
rawCapture.truncate(0)
finally:
pass
|
[
"linar200015@gmail.com"
] |
linar200015@gmail.com
|
43b3937f37516322caec140b93e86cb05ce9a0f0
|
ce8bb40bf2b688f19ab8bcc20cfd58994413bc0f
|
/session_and_cookie/session_and_cookie/session_and_cookie/settings.py
|
5f33906bebe357ef0bc4e9f6c12fabc14535a727
|
[] |
no_license
|
Fover21/project1
|
457f452d7f6e7ecbfc81a18512377ebc5457f3f6
|
84d596caf5701d7d76eee8c50f61bcb6150c57f2
|
refs/heads/master
| 2020-03-24T20:01:51.506348
| 2018-12-26T06:07:45
| 2018-12-26T06:07:45
| 142,955,917
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,253
|
py
|
"""
Django settings for session_and_cookie project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '91o0bpyj#l_3aj^dh$2kr&^guay-&auhjc7^xsvr-1yjsjr@7k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app01.apps.App01Config',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'session_and_cookie.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'session_and_cookie.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES = [
os.path.join(BASE_DIR, 'static')
]
|
[
"850781645@qq.com"
] |
850781645@qq.com
|
b110a452693ee2f5d626be0e4fabfd183f8ffe29
|
8178df63d0d2ff21587d200c6c5b124bc438d140
|
/strategy/Test1.py
|
a348076875dae9a09d50732c45e69d9aecb366e0
|
[] |
no_license
|
CrazyMoney/backtreader_python
|
8c4937a817f3280d0ad8403257dd16b887fbd4ad
|
c085f8f6913b0cc1cbdeb4fd65c5de8f0bc714b0
|
refs/heads/master
| 2023-04-26T12:01:02.555935
| 2021-05-10T08:17:33
| 2021-05-10T08:17:33
| 365,119,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
# import baostock as bs
# import pandas as pd
#
#
# def download_data(date):
# bs.login()
#
# # 获取指定日期的指数、股票数据
# stock_rs = bs.query_all_stock(date)
# stock_df = stock_rs.get_data()
# data_df = pd.DataFrame()
# print(stock_df)
# # for code in stock_df["code"]:
# # print("Downloading :" + code)
# # k_rs = bs.query_history_k_data_plus(code, "date,code,open,high,low,close", date, date)
# # data_df = data_df.append(k_rs.get_data())
# # bs.logout()
# # data_df.to_csv("D:\\demo_assignDayData.csv", encoding="gbk", index=False)
# # print(data_df)
#
#
# if __name__ == '__main__':
# # 获取指定日期全部股票的日K线数据
# download_data("2019-02-25")
DB_CONFIG= {
"host": '127.0.0.1',
"user" :'root',
"passwd":'5279888' ,
"port":3306,
"db":'china_stock',
"charset":'utf8'
}
import pymysql
conn = pymysql.connect(**DB_CONFIG)
cusor = conn.cursor()
rees = cusor.execute(
" SELECT date FROM {} WHERE code='{}' order by date limit 1".format('china_stock_day', 'sh.000001')
)
print(cusor.fetchall())
|
[
"1277582508@qq.com"
] |
1277582508@qq.com
|
396556eb2dfaf94d2ba0f13b43c91fd1a0ef439b
|
65c5037c7a554760a8fbf1c5f89e3de8fceab23f
|
/tests/test_cgcnn.py
|
31574c13b3f67331a0997d2f6c25cadc869b82c6
|
[] |
no_license
|
UON-comp-chem/GNNforCatalysis-DGL
|
22dedf04e58cffa43d2a9fbbf82ff050ae0a5dee
|
5175e05a8825003e3d3483b1ab43ebdddc086518
|
refs/heads/main
| 2023-04-17T10:17:10.585720
| 2021-04-30T01:18:50
| 2021-04-30T01:18:50
| 362,988,363
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
#!/usr/bin/env python
# coding: utf-8
# Author Xinyu Li
# Modify the python path so that we find/use the .gaspyrc.json in the testing
# folder instead of the main folder
import os
import sys
sys.path.append(('/home/xinyu/WSL-workspace/Repos/GNNforCatalysis-DGL'))
def test_model_cgcnn1():
import dgl
import torch
from catgnn.cgcnn import CGCNN
g = dgl.DGLGraph()
g.add_nodes(4)
g.add_edges([0, 0, 1, 1, 1, 2, 3], [1, 0, 1, 0, 2, 3, 2])
g.edata["distance"] = torch.tensor([1.0, 3.0, 2.0, 4.8, 2.8, 4., 6.]).reshape(-1, 1)
g.ndata["node_type"] = torch.LongTensor([1, 2, 3, 4])
model = CGCNN(embed = 'atom')
atom = model(g)
assert atom.shape == torch.Size([1, 1])
def test_model_cgcnn2():
import dgl
import torch
from catgnn.cgcnn import CGCNN
g = dgl.DGLGraph()
g.add_nodes(4)
g.add_edges([0, 0, 1, 1, 1, 2, 3], [1, 0, 1, 0, 2, 3, 2])
g.edata["distance"] = torch.tensor([1.0, 3.0, 2.0, 4.8, 2.8, 4., 6.]).reshape(-1, 1)
g.ndata["node_type"] = torch.LongTensor([1, 2, 3, 4])
model = CGCNN(embed = 'atom', norm = True)
model.set_mean_std(1.0, 1.0)
atom = model(g)
assert atom.shape == torch.Size([1, 1])
|
[
"lixy52@qq.com"
] |
lixy52@qq.com
|
01f9c6b23a8ab69cd0f6440c3da080c5423e4374
|
698b80c71e859bbdf9743363c4800be0b8ffd8aa
|
/parse_weapons.py
|
0071d3ce95f8da5acb89ccd60f48e3e2abe55673
|
[
"MIT"
] |
permissive
|
mkristofik/starfighter
|
0abcd971ab1bbd96910ee200012252970ac40426
|
1ec4c67a6356995ce624a966567ea4fca8638999
|
refs/heads/master
| 2021-08-30T05:13:02.772347
| 2017-12-16T04:28:06
| 2017-12-16T04:28:06
| 111,362,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,698
|
py
|
"""Convert a weapons data file in VB6 format to JSON."""
import json
import struct
import sys
from vb6_stuff import *
def get_locations(locs):
ret = []
txt_locs = str(locs)
if '1' in txt_locs:
ret.append('cockpit')
if '2' in txt_locs:
ret.append('fuselage')
if '3' in txt_locs:
ret.append('left wing')
if '4' in txt_locs:
ret.append('right wing')
if not ret:
raise RuntimeError
return ret
def get_options(opts):
if opts == 0:
return []
elif opts == 2:
return ['weapon']
elif opts == 12:
return ['warhead launcher', 'weapon']
else:
raise RuntimeError
def parse_weapons(filename):
with open(filename, 'rb') as f:
count = 0
for record in struct.iter_unpack('<25s6sdh15s6s5h', f.read()):
count += 1
is_deleted = record[10]
if is_deleted:
continue
yield {'id': count,
'name': record[0].decode().strip(),
'damage': record[1].decode().strip(),
'space': record[2],
'criticals': record[3],
'range': record[4].decode().strip(),
'tohit': record[5].decode().strip(),
'maxnum': record[6],
'techbase': get_techbase(record[7]),
'locations': get_locations(record[8]),
'options': get_options(record[9])}
if __name__ == '__main__':
filename = 'weapons.db'
if len(sys.argv) > 1:
filename = sys.argv[1]
print(json.dumps(list(parse_weapons(filename)), indent=4))
|
[
"kristo605@gmail.com"
] |
kristo605@gmail.com
|
6fa914d96fdd4d51fdb541e868b7bcc1c3173c10
|
603d371b0fb34cb71182a5433e35a61112e4442f
|
/voice_recog.py
|
c1974da6b73ffa5570e36706f5dfb5c61e9bc812
|
[] |
no_license
|
Ant2000/FreeCam
|
9f2da50cae6cab3b37bacc80cf1cff29f7417c79
|
c1f9719dc5359003d8400b736f5c2e32443a8945
|
refs/heads/main
| 2023-04-24T17:26:57.040008
| 2021-05-14T06:39:36
| 2021-05-14T06:39:36
| 354,518,104
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,815
|
py
|
"""
This code is for obtaining voice recognition results from google cloud.
"""
import speech_recognition as sr
import sqlite3 as sq3
with open("./key.json", 'r') as f:
GOOGLE_CLOUD_SPEECH_CREDENTIALS = f.read()
connection = sq3.connect("Parameters.db")
cursor = connection.cursor()
cursor.execute("UPDATE parameters SET status = 1 WHERE parameter = 'track'")
cursor.execute("UPDATE parameters SET status = 1 WHERE parameter = 'userView'")
cursor.execute("UPDATE parameters SET status = 1 WHERE parameter = 'autoOff'")
cursor.execute("UPDATE parameters SET status = 0 WHERE parameter = 'default'")
connection.commit()
# cursor.execute("""CREATE TABLE IF NOT EXISTS
# parameters(parameter TEXT PRIMARY KEY, status INTEGER)""")
# cursor.execute("INSERT INTO parameters VALUES ('track', 1)")
# cursor.execute("INSERT INTO parameters VALUES ('autoOff', 1)")
# cursor.execute("INSERT INTO parameters VALUES ('autoOff', 1)")
# cursor.execute("INSERT INTO parameters VALUES ('default', 0)")
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
while True:
print("Say something!")
audio = r.listen(source, phrase_time_limit=2)
try:
text = r.recognize_google_cloud(audio, credentials_json=GOOGLE_CLOUD_SPEECH_CREDENTIALS)
print(text)
if "system" in text:
cursor.execute("SELECT * FROM parameters")
test = cursor.fetchall()
print(test)
while True:
print("Command: ")
audio = r.listen(source, phrase_time_limit=2)
try:
text = r.recognize_google_cloud(audio, credentials_json=GOOGLE_CLOUD_SPEECH_CREDENTIALS)
text = text.lower()
if "track" in text:
if test[0][1] == 0:
cursor.execute("UPDATE parameters SET status = 1 WHERE parameter = 'track'")
else:
cursor.execute("UPDATE parameters SET status = 0 WHERE parameter = 'track'")
connection.commit()
print("Track")
cursor.execute("SELECT * FROM parameters")
print(cursor.fetchall())
break
elif "auto" in text:
if test[1][1] == 0:
cursor.execute("UPDATE parameters SET status = 1 WHERE parameter = 'autoOff'")
else:
cursor.execute("UPDATE parameters SET status = 0 WHERE parameter = 'autoOff'")
connection.commit()
print("autoOff")
cursor.execute("SELECT * FROM parameters")
print(cursor.fetchall())
break
elif "camera" in text:
if test[2][1] == 0:
cursor.execute("UPDATE parameters SET status = 1 WHERE parameter = 'userView'")
else:
cursor.execute("UPDATE parameters SET status = 0 WHERE parameter = 'userView'")
connection.commit()
print("userView")
cursor.execute("SELECT * FROM parameters")
print(cursor.fetchall())
break
elif "default" in text:
if test[3][1] == 0:
cursor.execute("UPDATE parameters SET status = 1 WHERE parameter = 'default'")
else:
cursor.execute("UPDATE parameters SET status = 0 WHERE parameter = 'default'")
connection.commit()
print("default")
cursor.execute("SELECT * FROM parameters")
print(cursor.fetchall())
break
except sr.RequestError as exception:
print("Could not request results from Google Cloud Speech service; {0}".format(exception))
except sr.UnknownValueError:
print("Unable to understand sentence")
except sr.RequestError as exception:
print("Could not request results from Google Cloud Speech service; {0}".format(exception))
except sr.UnknownValueError:
print("Unable to understand sentence")
|
[
"ajosekuruvilla@gmail.com"
] |
ajosekuruvilla@gmail.com
|
a0bc5776b6315165b542ab97826bd7deab9f5951
|
081fa33cad653555a1b9dca4ccb07b8946b31108
|
/Map Filter Reduce/Map.py
|
56686a66eac19181963322521b997651098156e3
|
[] |
no_license
|
ArtheadCourses/AltenGbg
|
fe0e5de8680d42da3cbba69be99888783988186d
|
cbb688b799acc5934c308b1b17da2c9d4f4b3e41
|
refs/heads/master
| 2020-07-07T11:09:03.376063
| 2016-11-17T21:13:55
| 2016-11-17T21:13:55
| 74,036,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
def c2f(t):
return (9.0/5) * t + 32
def f2c(t):
return (5.0/9) * (t-32)
def main():
temp_in_c = [16.3, 21.3, 19.7, 15.4]
#f_temp = list(map(lambda t:(9.0/5) * t + 32, temp_in_c))
f_temp = [(9.0/5) * t + 32 for t in temp_in_c]
c_temp = list(map(f2c, f_temp))
print(f_temp)
print(c_temp)
if __name__ == '__main__':
main()
|
[
"joakim.wassberg@arthead.se"
] |
joakim.wassberg@arthead.se
|
0cf9ec89e897b2cf2d407a7449bff2e420657f5f
|
01e4f0720c32bc9f935a7d1588ad5e80eb6dc33a
|
/Django/Project1_1_5_4/pineapple/views.py
|
49c74693131e24f22375a9cfaa2c283f767c4f1f
|
[] |
no_license
|
nguyenvu2589/Python
|
b65601e3c1e2d996087bb4fba36f3c30ebf5e4b7
|
9e295266d6aa2d84d9409fa9f9117030aad5fd4b
|
refs/heads/master
| 2020-06-29T07:05:39.260148
| 2017-01-28T19:57:04
| 2017-01-28T19:57:04
| 74,441,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,822
|
py
|
# Create your views here.
from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response
from pineapple.models import Category, Document
from pineapple.models import Page
from pineapple.form import CategoryForm
from pineapple.form import PageForm, DocumentForm
from pineapple.form import UserForm, UserProfileForm
from django.shortcuts import render
from easy_thumbnails.files import get_thumbnailer
from django.core.files.storage import FileSystemStorage
from datetime import datetime
import csv
def index(request):
category_list = Category.objects.all()
page_list = Page.objects.all()[:5]
feature = Page.objects.filter(feature=True)
top_five = Page.objects.order_by('-views')[:4]
context_dict = { 'categories': category_list , 'pages': page_list, 'top5':top_five, 'ft': feature}
# request for number of last visit
# from server if not valid , set it to 1
response = render(request, 'pineapple/index.html', context_dict)
return response
def about(request):
return render_to_response('pineapple/about.html')
def menu(request):
context_dict = {}
try:
cat = Category.objects.all()
#context_dict['category_name'] = category
context_dict['category'] = cat
pages = Page.objects.all()
context_dict['page'] = pages
except Category.DoesNotExist:
print " does not exist"
return render(request, 'pineapple/menu.html', context_dict)
def display(request):
context_dict = {}
try:
cat = Category.objects.all()
#context_dict['category_name'] = category
context_dict['category'] = cat
pages = Page.objects.all()
context_dict['page'] = pages
except Category.DoesNotExist:
print " does not exist"
return render(request, 'pineapple/display.html', context_dict)
def order(request):
context_dict = {}
try:
cat = Category.objects.all()
i = 0
#context_dict['category_name'] = category
context_dict['category'] = cat
context_dict['count'] = i
pages = Page.objects.all()
context_dict['page'] = pages
except Category.DoesNotExist:
print " does not exist"
return render(request, 'pineapple/order.html', context_dict)
def contact (request):
context_dict={}
return render(request, 'pineapple/contact.html', context_dict)
def category(request, category_name_slug):
context_dict = {}
try:
category = Category.objects.get(name=category_name_slug)
context_dict['category_name'] = category.name
pages = Page.objects.filter(category=category)
context_dict['page'] = pages
context_dict['category'] = category
context_dict['category_name_slug'] = category_name_slug
except Category.DoesNotExist:
print " does not exist"
return render(request, 'pineapple/category.html', context_dict)
def page(request, page_name_slug):
context_dict ={}
try:
print (page_name_slug)
page =Page.objects.get(slugP = page_name_slug)
context_dict['item'] = page
except Page.DoesNotExist:
print "cant find this page."
return render(request, 'pineapple/page.html', context_dict)
def add_category(request):
if request.method =='POST':
form = CategoryForm(request.POST)
if form.is_valid():
form.save(commit=True)
return index(request)
else:
print form.errors
else:
form = CategoryForm()
return render(request, 'pineapple/add_category.html', {'form' : form})
def add_page(request, category_name_slug):
try :
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form =PageForm(request.POST)
if form.is_valid():
page = form.save(commit=False)
page.category = cat
page.views = 0
page.save()
return category(request, category_name_slug)
else:
print form.errors
else:
form = PageForm()
context_dict = {'form': form, 'category': cat }
return render(request, 'pineapple/add_page.html', context_dict)
def upload(request):
try:
docs = Document.objects.order_by('-time')[:5]
except Document.DoesNotExist:
docs = None
context_dict ={}
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
name = request.FILES['document']
doc = form.save(commit = False)
doc.time = datetime.now()
doc.name = name
print doc.name, "this is doc name"
doc.numEntry = parse_csv(name)
doc.save()
context_dict['docs'] = docs
else :
print form.errors
else:
form = DocumentForm()
context_dict = {'form': form, 'docs': docs }
return render(request, 'pineapple/upload.html', context_dict )
# description .... 1. take it out / 2. change to name,
# after click choose file -> name change to file name
# create history table and delete history. ...
def parse_csv(upload):
count = 0
reader = csv.reader(upload)
for line in reader:
# do something with line...
count += 1
return count
|
[
"noreply@github.com"
] |
nguyenvu2589.noreply@github.com
|
eb9393b0f5d36abf9af3eb9be421adb79d467f9f
|
07870a9ea2b2354f0b2bf5c336b58d58cbb0969f
|
/Level2Code/lesson7page/HomeworkNeed/firstapp/models.py
|
baf95d8e6f58883e3a5a2f2e762b018cb1895724
|
[] |
no_license
|
Kathylovepdf/Python-Web
|
24cf359323f973e95585bec5dcf89616e07d6946
|
43ae05ff2d4c602fdb8f4285418bdd3f0b26ff7f
|
refs/heads/master
| 2020-03-17T12:50:50.022464
| 2019-06-20T07:57:00
| 2019-06-20T07:57:00
| 133,605,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
from django.db import models
from faker import Factory
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=500)
img = models.CharField(max_length=250)
content = models.TextField(null=True, blank=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
createtime = models.DateField()
def __str__(self):
return self.title
# f = open('C:/Users/xuetangx/Desktop/pictures.txt', 'r')
# fake = Factory.create()
# for url in f.readlines():
# a = Article(
# title=fake.text(max_nb_chars=90),
# img=url,
# content=fake.text(max_nb_chars=3000),
# views=fake.pyint(),
# likes=fake.pyint(),
# createtime=fake.date_time(),
# )
# a.save()
|
[
"287778678@qq.com"
] |
287778678@qq.com
|
e92e7215b424397a8125ce7fa0b0d06457ba4f01
|
bb0b75941b431da605f0aeca334d6c2f0289a779
|
/day11/part1.py
|
fdfd9b0548644c578ae2ef83647c0d7c7f4d4d3c
|
[] |
no_license
|
Danis98/AdventOfCode2017
|
d6a6f0d57e9de12cbffd553afdf9cf0d02992df0
|
df719ae684d62c73cb21aef94fc58b7c7e4410e8
|
refs/heads/master
| 2021-09-01T06:21:16.389957
| 2017-12-25T09:39:59
| 2017-12-25T09:39:59
| 112,966,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
cmds = open('day11.input').read().rstrip().split(',')
off = {
'n': (0, 1),
's': (0, -1),
'se': (1, -.5),
'ne': (1, .5),
'sw': (-1, -.5),
'nw': (-1, .5)
}
pos = (0, 0)
for cmd in cmds:
pos = tuple(map(sum, zip(pos, off[cmd])))
dx, dy = float(abs(pos[0])), float(abs(pos[1]))
dist = dx + ((dy-dx/2) if dy>dx else 0)
print dist
|
[
"danielevenier1998@gmail.com"
] |
danielevenier1998@gmail.com
|
f75d6c451ee477af383d38927f032c58996f2f34
|
42081a2e76ef711cdd8f0fb01dcf7eab77036e43
|
/py/ForDeepLearning/신경망내적_구현정리.py
|
66c18103b07cc484b2abaafa5d6c31833a0abcff
|
[] |
no_license
|
suyeony0/Junior2ndSemester
|
a9564ba9d3d7bf045635bf5f64e8da4d6f220f91
|
0c1dc5ce151b4e0d35bb882ae0687b3d296dc6e7
|
refs/heads/master
| 2022-01-14T19:57:05.681723
| 2019-06-22T04:34:11
| 2019-06-22T04:34:11
| 193,189,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
import numpy as np
from sigmoidFunc import sigmoid
def init_network():
network={}
network['W1']=np.array([[0.1,0.3,0.5],[0.2,0.4,0.6]])
network['b1']=np.array([0.1,0.2,0.3])
network['W2']=np.array([[0.1,0.4],[0.2,0.5],[0.3,0.6]])
network['b2']=np.array([0.1,0.2])
network['W3']=np.array([[0.1,0.3],[0.2,0.4]])
network['b3']=np.array([0.1,0.2])
return network
def forward(network,x):
W1,W2,W3 = network['W1'],network['W2'],network['W3']
b1,b2,b3 = network['b1'],network['b2'],network['b3']
a1= np.dot(x,W1)+b1
z1=sigmoid(a1)
a2= np.dot(z1,W2)+b2
z2=sigmoid(a2)
a3= np.dot(z2,W3)+b3
y=a3
return y
network = init_network()
x= np.array([1.0,0.5])
y=forward(network,x)
print(y)
|
[
"gjsgud2@gmail.com"
] |
gjsgud2@gmail.com
|
ac09a157c61df12e34aabe40f75dba7a1c21edc9
|
592e97ebecdeba71c6b4782c439077b9a19a790e
|
/lib/python3.7/site-packages/numdifftools/nd_scipy.py
|
b53f6dd857b79d7cc2f3ebcacc27996751cfef12
|
[] |
no_license
|
ChiLi90/LifetimeFit
|
d9d194d1727727c515c3f64c9d48dd19ff0131e0
|
c6d7392bbe910387acf4552db67fdcb09cf01211
|
refs/heads/master
| 2023-03-24T07:44:18.125777
| 2021-03-21T22:31:02
| 2021-03-21T22:31:02
| 350,130,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,477
|
py
|
from __future__ import division, print_function
from scipy.optimize._numdiff import approx_derivative
from scipy.optimize import approx_fprime
import numpy as np
class _Common(object):
def __init__(self, fun, step=None, method='central', order=2,
bounds=(-np.inf, np.inf), sparsity=None):
self.fun = fun
self.step = step
self.method = method
self.bounds = bounds
self.sparsity = sparsity
class Jacobian(_Common):
"""
Calculate Jacobian with finite difference approximation
Parameters
----------
fun : function
function of one array fun(x, `*args`, `**kwds`)
step : float, optional
Stepsize, if None, optimal stepsize is used, i.e.,
x * _EPS for method==`complex`
x * _EPS**(1/2) for method==`forward`
x * _EPS**(1/3) for method==`central`.
method : {'central', 'complex', 'forward'}
defines the method used in the approximation.
Examples
--------
>>> import numdifftools.nd_scipy as nd
#(nonlinear least squares)
>>> xdata = np.arange(0,1,0.1)
>>> ydata = 1+2*np.exp(0.75*xdata)
>>> fun = lambda c: (c[0]+c[1]*np.exp(c[2]*xdata) - ydata)**2
>>> np.allclose(fun([1, 2, 0.75]).shape, (10,))
True
>>> dfun = nd.Jacobian(fun)
>>> np.allclose(dfun([1, 2, 0.75]), np.zeros((10,3)))
True
>>> fun2 = lambda x : x[0]*x[1]*x[2]**2
>>> dfun2 = nd.Jacobian(fun2)
>>> np.allclose(dfun2([1.,2.,3.]), [[18., 9., 12.]])
True
>>> fun3 = lambda x : np.vstack((x[0]*x[1]*x[2]**2, x[0]*x[1]*x[2]))
TODO: The following does not work:
der3 = nd.Jacobian(fun3)([1., 2., 3.])
np.allclose(der3,
... [[18., 9., 12.], [6., 3., 2.]])
True
np.allclose(nd.Jacobian(fun3)([4., 5., 6.]),
... [[180., 144., 240.], [30., 24., 20.]])
True
np.allclose(nd.Jacobian(fun3)(np.array([[1.,2.,3.], [4., 5., 6.]]).T),
... [[[ 18., 180.],
... [ 9., 144.],
... [ 12., 240.]],
... [[ 6., 30.],
... [ 3., 24.],
... [ 2., 20.]]])
True
"""
def __call__(self, x, *args, **kwds):
x = np.atleast_1d(x)
method = dict(complex='cs', central='3-point', forward='2-point',
backward='2-point')[self.method]
options = dict(method=method, rel_step=self.step, args=args,
kwargs=kwds, bounds=self.bounds, sparsity=self.sparsity)
grad = approx_derivative(self.fun, x, **options)
return grad
class Gradient(Jacobian):
"""
Calculate Gradient with finite difference approximation
Parameters
----------
fun : function
function of one array fun(x, `*args`, `**kwds`)
step : float, optional
Stepsize, if None, optimal stepsize is used, i.e.,
x * _EPS for method==`complex`
x * _EPS**(1/2) for method==`forward`
x * _EPS**(1/3) for method==`central`.
method : {'central', 'complex', 'forward'}
defines the method used in the approximation.
Examples
--------
>>> import numpy as np
>>> import numdifftools.nd_scipy as nd
>>> fun = lambda x: np.sum(x**2)
>>> dfun = nd.Gradient(fun)
>>> np.allclose(dfun([1,2,3]), [ 2., 4., 6.])
True
# At [x,y] = [1,1], compute the numerical gradient
# of the function sin(x-y) + y*exp(x)
>>> sin = np.sin; exp = np.exp
>>> z = lambda xy: sin(xy[0]-xy[1]) + xy[1]*exp(xy[0])
>>> dz = nd.Gradient(z)
>>> grad2 = dz([1, 1])
>>> np.allclose(grad2, [ 3.71828183, 1.71828183])
True
# At the global minimizer (1,1) of the Rosenbrock function,
# compute the gradient. It should be essentially zero.
>>> rosen = lambda x : (1-x[0])**2 + 105.*(x[1]-x[0]**2)**2
>>> rd = nd.Gradient(rosen)
>>> grad3 = rd([1,1])
>>> np.allclose(grad3,[0, 0], atol=1e-7)
True
See also
--------
Hessian, Jacobian
"""
def __call__(self, x, *args, **kwds):
return super(Gradient, self).__call__(np.atleast_1d(x).ravel(),
*args, **kwds).squeeze()
if __name__ == '__main__':
from numdifftools.testing import test_docstrings
test_docstrings(__file__)
|
[
"chili@Chis-iMac.local"
] |
chili@Chis-iMac.local
|
4085071c89dee09290ed07614c62dadf4a3a40e3
|
1390903cd5308a8c88efaf369160d78d6230ef58
|
/xunlei-lixian/lixian.py
|
c3490432b5b7d4a45bc629b29e4884f75bf787c8
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
twotreeszf/AriaThunder
|
119fdddce951cf3d21aa6b7badea40d7c8fcb441
|
c6e052b0e10e9d4c18b4cf996b1d79fd9fb1eb22
|
refs/heads/master
| 2021-01-10T21:20:43.229629
| 2013-12-06T05:17:05
| 2013-12-06T05:17:05
| 14,971,878
| 19
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42,934
|
py
|
__all__ = ['XunleiClient']
import urllib
import urllib2
import cookielib
import re
import time
import os.path
import json
from ast import literal_eval
def retry(f_or_arg, *args):
#retry_sleeps = [1, 1, 1]
retry_sleeps = [1, 2, 3, 5, 10, 20, 30, 60] + [60] * 60
def decorator(f):
def withretry(*args, **kwargs):
for second in retry_sleeps:
try:
return f(*args, **kwargs)
except:
import traceback
logger.debug("Exception happened. Retrying...")
logger.debug(traceback.format_exc())
time.sleep(second)
raise
return withretry
if callable(f_or_arg) and not args:
return decorator(f_or_arg)
else:
a = f_or_arg
assert type(a) == int
assert not args
retry_sleeps = [1] * a
return decorator
class Logger:
def stdout(self, message):
print message
def info(self, message):
print message
def debug(self, message):
pass
def trace(self, message):
pass
logger = Logger()
class WithAttrSnapshot:
def __init__(self, object, **attrs):
self.object = object
self.attrs = attrs
def __enter__(self):
self.old_attrs = []
for k in self.attrs:
if hasattr(self.object, k):
self.old_attrs.append((k, True, getattr(self.object, k)))
else:
self.old_attrs.append((k, False, None))
for k in self.attrs:
setattr(self.object, k, self.attrs[k])
def __exit__(self, exc_type, exc_val, exc_tb):
for k, has_old_attr, v in self.old_attrs:
if has_old_attr:
setattr(self.object, k, v)
else:
delattr(self.object, k)
class WithAttr:
def __init__(self, object):
self.object = object
def __call__(self, **kwargs):
return WithAttrSnapshot(self.object, **kwargs)
def __getattr__(self, k):
return lambda (v): WithAttrSnapshot(self.object, **{k:v})
# TODO: write unit test
class OnDemandTaskList:
def __init__(self, fetch_page, page_size, limit):
self.fetch_page = fetch_page
if limit and page_size > limit:
page_size = limit
self.page_size = page_size
self.limit = limit
self.pages = {}
self.max_task_number = None
self.real_total_task_number = None
self.total_pages = None
def is_out_of_range(self, n):
if self.limit:
if n >= self.limit:
return True
if self.max_task_number:
if n >= self.max_task_number:
return True
if self.real_total_task_number:
if n >= self.real_total_task_number:
return True
def check_out_of_range(self, n):
if self.is_out_of_range(n):
raise IndexError('task index out of range')
def is_out_of_page(self, page):
raise NotImplementedError()
def get_nth_task(self, n):
self.check_out_of_range(n)
page = n / self.page_size
n_in_page = n - page * self.page_size
return self.hit_page(page)[n_in_page]
def touch(self):
self.hit_page(0)
def hit_page(self, page):
if page in self.pages:
return self.pages[page]
info = self.fetch_page(page, self.page_size)
tasks = info['tasks']
if self.max_task_number is None:
self.max_task_number = info['total_task_number']
if self.limit and self.max_task_number > self.limit:
self.max_task_number = self.limit
self.total_pages = self.max_task_number / self.page_size
if self.max_task_number % self.page_size != 0:
self.total_pages += 1
if self.max_task_number == 0:
self.real_total_task_number = 0
if page >= self.total_pages:
tasks = []
elif page == self.total_pages - 1:
if self.page_size * page + len(tasks) > self.max_task_number:
tasks = tasks[0:self.max_task_number - self.page_size * page]
if len(tasks) > 0:
self.real_total_task_number = self.page_size * page + len(tasks)
else:
self.max_task_number -= self.page_size
self.total_pages -= 1
if len(self.pages.get(page-1, [])) == self.page_size:
self.real_total_task_number = self.max_task_number
else:
if len(tasks) == 0:
self.max_task_number = self.page_size * page
self.total_pages = page
if len(self.pages.get(page-1, [])) == self.page_size:
self.real_total_task_number = self.max_task_number
elif len(tasks) < self.page_size:
self.real_total_task_number = self.page_size * page + len(tasks)
self.max_task_number = self.real_total_task_number
self.total_pages = page
else:
pass
for i, t in enumerate(tasks):
t['#'] = self.page_size * page + i
self.pages[page] = tasks
return tasks
def __getitem__(self, n):
return self.get_nth_task(n)
def __iter__(self):
class Iterator:
def __init__(self, container):
self.container = container
self.current = 0
def next(self):
self.container.touch()
assert type(self.container.max_task_number) == int
if self.container.real_total_task_number is None:
if self.current < self.container.max_task_number:
try:
task = self.container[self.current]
except IndexError:
raise StopIteration()
else:
raise StopIteration()
else:
if self.current < self.container.real_total_task_number:
task = self.container[self.current]
else:
raise StopIteration()
self.current += 1
return task
return Iterator(self)
def __len__(self):
if self.real_total_task_number:
return self.real_total_task_number
self.touch()
self.hit_page(self.total_pages-1)
if self.real_total_task_number:
return self.real_total_task_number
count = 0
for t in self:
count += 1
return count
class XunleiClient(object):
default_page_size = 100
default_bt_page_size = 9999
def __init__(self, username=None, password=None, cookie_path=None, login=True, verification_code_reader=None):
self.attr = WithAttr(self)
self.username = username
self.password = password
self.cookie_path = cookie_path
if cookie_path:
self.cookiejar = cookielib.LWPCookieJar()
if os.path.exists(cookie_path):
self.load_cookies()
else:
self.cookiejar = cookielib.CookieJar()
self.page_size = self.default_page_size
self.bt_page_size = self.default_bt_page_size
self.limit = None
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
self.verification_code_reader = verification_code_reader
self.login_time = None
if login:
self.id = self.get_userid_or_none()
if not self.id:
self.login()
self.id = self.get_userid()
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, size):
self._page_size = size
self.set_page_size(size)
@retry
def urlopen(self, url, **args):
logger.debug(url)
# import traceback
# for line in traceback.format_stack():
# print line.strip()
if 'data' in args and type(args['data']) == dict:
args['data'] = urlencode(args['data'])
return self.opener.open(urllib2.Request(url, **args), timeout=60)
def urlread1(self, url, **args):
args.setdefault('headers', {})
headers = args['headers']
headers.setdefault('Accept-Encoding', 'gzip, deflate')
# headers.setdefault('Referer', 'http://lixian.vip.xunlei.com/task.html')
# headers.setdefault('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0')
# headers.setdefault('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
# headers.setdefault('Accept-Language', 'zh-cn,zh;q=0.7,en-us;q=0.3')
response = self.urlopen(url, **args)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
return data
def urlread(self, url, **args):
data = self.urlread1(url, **args)
if self.is_session_timeout(data):
logger.debug('session timed out')
self.login()
data = self.urlread1(url, **args)
return data
def load_cookies(self):
try:
self.cookiejar.load(self.cookie_path, ignore_discard=True, ignore_expires=True)
except IOError:
pass
def save_cookies(self):
if self.cookie_path:
try:
self.cookiejar.save(self.cookie_path, ignore_discard=True)
except IOError:
pass
def get_cookie(self, domain, k):
if self.has_cookie(domain, k):
return self.cookiejar._cookies[domain]['/'][k].value
def has_cookie(self, domain, k):
return domain in self.cookiejar._cookies and k in self.cookiejar._cookies[domain]['/']
def get_userid(self):
if self.has_cookie('.xunlei.com', 'userid'):
return self.get_cookie('.xunlei.com', 'userid')
else:
raise Exception('Probably login failed')
def get_userid_or_none(self):
return self.get_cookie('.xunlei.com', 'userid')
def get_username(self):
return self.get_cookie('.xunlei.com', 'usernewno')
def get_gdriveid(self):
return self.get_cookie('.vip.xunlei.com', 'gdriveid')
def has_gdriveid(self):
return self.has_cookie('.vip.xunlei.com', 'gdriveid')
def get_referer(self):
return 'http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s' % self.id
def set_cookie(self, domain, k, v):
c = cookielib.Cookie(version=0, name=k, value=v, port=None, port_specified=False, domain=domain, domain_specified=True, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={}, rfc2109=False)
self.cookiejar.set_cookie(c)
def del_cookie(self, domain, k):
if self.has_cookie(domain, k):
self.cookiejar.clear(domain=domain, path="/", name=k)
def set_gdriveid(self, id):
self.set_cookie('.vip.xunlei.com', 'gdriveid', id)
def set_page_size(self, n):
self.set_cookie('.vip.xunlei.com', 'pagenum', str(n))
def get_cookie_header(self):
def domain_header(domain):
root = self.cookiejar._cookies[domain]['/']
return '; '.join(k+'='+root[k].value for k in root)
return domain_header('.xunlei.com') + '; ' + domain_header('.vip.xunlei.com')
def is_login_ok(self, html):
return len(html) > 512
def has_logged_in(self):
id = self.get_userid_or_none()
if not id:
return False
#print self.urlopen('http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=0' % id).read().decode('utf-8')
with self.attr(page_size=1):
url = 'http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=0' % id
#url = 'http://dynamic.lixian.vip.xunlei.com/login?cachetime=%d' % current_timestamp()
r = self.is_login_ok(self.urlread(url))
return r
def is_session_timeout(self, html):
is_timeout = html == '''<script>document.cookie ="sessionid=; path=/; domain=xunlei.com"; document.cookie ="lx_sessionid=; path=/; domain=vip.xunlei.com";top.location='http://cloud.vip.xunlei.com/task.html?error=1'</script>''' or html == '''<script>document.cookie ="sessionid=; path=/; domain=xunlei.com"; document.cookie ="lsessionid=; path=/; domain=xunlei.com"; document.cookie ="lx_sessionid=; path=/; domain=vip.xunlei.com";top.location='http://cloud.vip.xunlei.com/task.html?error=2'</script>''' or html == '''<script>document.cookie ="sessionid=; path=/; domain=xunlei.com"; document.cookie ="lsessionid=; path=/; domain=xunlei.com"; document.cookie ="lx_sessionid=; path=/; domain=vip.xunlei.com";document.cookie ="lx_login=; path=/; domain=vip.xunlei.com";top.location='http://cloud.vip.xunlei.com/task.html?error=1'</script>'''
if is_timeout:
logger.trace(html)
return True
maybe_timeout = html == '''rebuild({"rtcode":-1,"list":[]})'''
if maybe_timeout:
if self.login_time and time.time() - self.login_time < 60 * 10: # 10 minutes
return False
else:
logger.trace(html)
return True
return is_timeout
def login(self, username=None, password=None):
username = self.username
password = self.password
if not username and self.has_cookie('.xunlei.com', 'usernewno'):
username = self.get_username()
if not username:
# TODO: don't depend on lixian_config
import lixian_config
username = lixian_config.get_config('username')
# if not username:
# raise NotImplementedError('user is not logged in')
if not password:
raise NotImplementedError('user is not logged in')
logger.debug('login')
cachetime = current_timestamp()
check_url = 'http://login.xunlei.com/check?u=%s&cachetime=%d' % (username, cachetime)
login_page = self.urlopen(check_url).read()
verification_code = self.get_cookie('.xunlei.com', 'check_result')[2:].upper()
if not verification_code:
if not self.verification_code_reader:
raise NotImplementedError('Verification code required')
else:
verification_code_url = 'http://verify2.xunlei.com/image?cachetime=%s' % current_timestamp()
image = self.urlopen(verification_code_url).read()
verification_code = self.verification_code_reader(image)
if verification_code:
verification_code = verification_code.upper()
assert verification_code
password = encypt_password(password)
password = md5(password+verification_code)
login_page = self.urlopen('http://login.xunlei.com/sec2login/', data={'u': username, 'p': password, 'verifycode': verification_code})
self.id = self.get_userid()
with self.attr(page_size=1):
login_page = self.urlopen('http://dynamic.lixian.vip.xunlei.com/login?cachetime=%d&from=0'%current_timestamp()).read()
if not self.is_login_ok(login_page):
logger.trace(login_page)
raise RuntimeError('login failed')
self.save_cookies()
self.login_time = time.time()
def logout(self):
logger.debug('logout')
#session_id = self.get_cookie('.xunlei.com', 'sessionid')
#timestamp = current_timestamp()
#url = 'http://login.xunlei.com/unregister?sessionid=%s&cachetime=%s&noCacheIE=%s' % (session_id, timestamp, timestamp)
#self.urlopen(url).read()
#self.urlopen('http://dynamic.vip.xunlei.com/login/indexlogin_contr/logout/').read()
ckeys = ["vip_isvip","lx_sessionid","vip_level","lx_login","dl_enable","in_xl","ucid","lixian_section"]
ckeys1 = ["sessionid","usrname","nickname","usernewno","userid"]
self.del_cookie('.vip.xunlei.com', 'gdriveid')
for k in ckeys:
self.set_cookie('.vip.xunlei.com', k, '')
for k in ckeys1:
self.set_cookie('.xunlei.com', k, '')
self.save_cookies()
self.login_time = None
def to_page_url(self, type_id, page_index, page_size):
# type_id: 1 for downloading, 2 for completed, 4 for downloading+completed+expired, 11 for deleted, 13 for expired
if type_id == 0:
type_id = 4
page = page_index + 1
p = 1 # XXX: what is it?
# jsonp = 'jsonp%s' % current_timestamp()
# url = 'http://dynamic.cloud.vip.xunlei.com/interface/showtask_unfresh?type_id=%s&page=%s&tasknum=%s&p=%s&interfrom=task&callback=%s' % (type_id, page, page_size, p, jsonp)
url = 'http://dynamic.cloud.vip.xunlei.com/interface/showtask_unfresh?type_id=%s&page=%s&tasknum=%s&p=%s&interfrom=task' % (type_id, page, page_size, p)
return url
@retry(10)
def read_task_page_info_by_url(self, url):
page = self.urlread(url).decode('utf-8', 'ignore')
data = parse_json_response(page)
if not self.has_gdriveid():
gdriveid = data['info']['user']['cookie']
self.set_gdriveid(gdriveid)
self.save_cookies()
# tasks = parse_json_tasks(data)
tasks = [t for t in parse_json_tasks(data) if not t['expired']]
for t in tasks:
t['client'] = self
# current_page = int(re.search(r'page=(\d+)', url).group(1))
total_tasks = int(data['info']['total_num'])
# assert total_pages >= data['global_new']['page'].count('<li><a')
return {'tasks': tasks, 'total_task_number': total_tasks}
def read_task_page_info_by_page_index(self, type_id, page_index, page_size):
return self.read_task_page_info_by_url(self.to_page_url(type_id, page_index, page_size))
def read_tasks(self, type_id=0):
'''read one page'''
page_size = self.page_size
limit = self.limit
if limit and limit < page_size:
page_size = limit
first_page = self.read_task_page_info_by_page_index(type_id, 0, page_size)
tasks = first_page['tasks']
for i, task in enumerate(tasks):
task['#'] = i
return tasks
def read_all_tasks_immediately(self, type_id):
'''read all pages'''
all_tasks = []
page_size = self.page_size
limit = self.limit
if limit and limit < page_size:
page_size = limit
first_page = self.read_task_page_info_by_page_index(type_id, 0, page_size)
all_tasks.extend(first_page['tasks'])
total_tasks = first_page['total_task_number']
if limit and limit < total_tasks:
total_tasks = limit
total_pages = total_tasks / page_size
if total_tasks % page_size != 0:
total_pages += 1
if total_pages == 0:
total_pages = 1
for page_index in range(1, total_pages):
current_page = self.read_task_page_info_by_page_index(type_id, 0, page_size)
all_tasks.extend(current_page['tasks'])
if limit:
all_tasks = all_tasks[0:limit]
for i, task in enumerate(all_tasks):
task['#'] = i
return all_tasks
def read_all_tasks_on_demand(self, type_id):
'''read all pages, lazily'''
fetch_page = lambda page_index, page_size: self.read_task_page_info_by_page_index(type_id, page_index, page_size)
return OnDemandTaskList(fetch_page, self.page_size, self.limit)
def read_all_tasks(self, type_id=0):
'''read all pages'''
return self.read_all_tasks_on_demand(type_id)
def read_completed(self):
'''read first page of completed tasks'''
return self.read_tasks(2)
def read_all_completed(self):
'''read all pages of completed tasks'''
return self.read_all_tasks(2)
@retry(10)
def read_categories(self):
# url = 'http://dynamic.cloud.vip.xunlei.com/interface/menu_get?callback=jsonp%s&interfrom=task' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/menu_get'
html = self.urlread(url).decode('utf-8', 'ignore')
result = parse_json_response(html)
return dict((x['name'], int(x['id'])) for x in result['info'])
def get_category_id(self, category):
return self.read_categories()[category]
def read_all_tasks_by_category(self, category):
category_id = self.get_category_id(category)
jsonp = 'jsonp%s' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/show_class?callback=%s&type_id=%d' % (jsonp, category_id)
html = self.urlread(url)
response = json.loads(re.match(r'^%s\((.+)\)$' % jsonp, html).group(1))
assert response['rtcode'] == '0', response['rtcode']
info = response['info']
tasks = map(convert_task, info['tasks'])
for i, task in enumerate(tasks):
task['client'] = self
task['#'] = i
return tasks
def read_history_page_url(self, url):
self.set_cookie('.vip.xunlei.com', 'lx_nf_all', urllib.quote('page_check_all=history&fltask_all_guoqi=1&class_check=0&page_check=task&fl_page_id=0&class_check_new=0&set_tab_status=11'))
page = self.urlread(url).decode('utf-8', 'ignore')
if not self.has_gdriveid():
gdriveid = re.search(r'id="cok" value="([^"]+)"', page).group(1)
self.set_gdriveid(gdriveid)
self.save_cookies()
tasks = parse_history(page)
for t in tasks:
t['client'] = self
pginfo = re.search(r'<div class="pginfo">.*?</div>', page)
match_next_page = re.search(r'<li class="next"><a href="([^"]+)">[^<>]*</a></li>', page)
return tasks, match_next_page and 'http://dynamic.cloud.vip.xunlei.com'+match_next_page.group(1)
def read_history_page(self, type=0, pg=None):
if pg is None:
url = 'http://dynamic.cloud.vip.xunlei.com/user_history?userid=%s&type=%d' % (self.id, type)
else:
url = 'http://dynamic.cloud.vip.xunlei.com/user_history?userid=%s&p=%d&type=%d' % (self.id, pg, type)
return self.read_history_page_url(url)
def read_history(self, type=0):
'''read one page'''
tasks = self.read_history_page(type)[0]
for i, task in enumerate(tasks):
task['#'] = i
return tasks
def read_all_history(self, type=0):
'''read all pages of deleted/expired tasks'''
all_tasks = []
tasks, next_link = self.read_history_page(type)
all_tasks.extend(tasks)
while next_link:
if self.limit and len(all_tasks) > self.limit:
break
tasks, next_link = self.read_history_page_url(next_link)
all_tasks.extend(tasks)
if self.limit:
all_tasks = all_tasks[0:self.limit]
for i, task in enumerate(all_tasks):
task['#'] = i
return all_tasks
def read_deleted(self):
return self.read_history()
def read_all_deleted(self):
return self.read_all_history()
def read_expired(self):
return self.read_history(1)
def read_all_expired(self):
return self.read_all_history(1)
def list_bt(self, task):
assert task['type'] == 'bt'
url = 'http://dynamic.cloud.vip.xunlei.com/interface/fill_bt_list?callback=fill_bt_list&tid=%s&infoid=%s&g_net=1&p=1&uid=%s&noCacheIE=%s' % (task['id'], task['bt_hash'], self.id, current_timestamp())
with self.attr(page_size=self.bt_page_size):
html = remove_bom(self.urlread(url)).decode('utf-8')
sub_tasks = parse_bt_list(html)
for t in sub_tasks:
t['date'] = task['date']
return sub_tasks
def get_torrent_file_by_info_hash(self, info_hash):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/get_torrent?userid=%s&infoid=%s' % (self.id, info_hash.upper())
response = self.urlopen(url)
torrent = response.read()
if torrent == "<meta http-equiv='Content-Type' content='text/html; charset=utf-8' /><script>alert('\xe5\xaf\xb9\xe4\xb8\x8d\xe8\xb5\xb7\xef\xbc\x8c\xe6\xb2\xa1\xe6\x9c\x89\xe6\x89\xbe\xe5\x88\xb0\xe5\xaf\xb9\xe5\xba\x94\xe7\x9a\x84\xe7\xa7\x8d\xe5\xad\x90\xe6\x96\x87\xe4\xbb\xb6!');</script>":
raise Exception('Torrent file not found on xunlei cloud: '+info_hash)
assert response.headers['content-type'] == 'application/octet-stream'
return torrent
def get_torrent_file(self, task):
return self.get_torrent_file_by_info_hash(task['bt_hash'])
def add_task(self, url):
protocol = parse_url_protocol(url)
assert protocol in ('ed2k', 'http', 'https', 'ftp', 'thunder', 'Flashget', 'qqdl', 'bt', 'magnet'), 'protocol "%s" is not suppoted' % protocol
from lixian_url import url_unmask
url = url_unmask(url)
protocol = parse_url_protocol(url)
assert protocol in ('ed2k', 'http', 'https', 'ftp', 'bt', 'magnet'), 'protocol "%s" is not suppoted' % protocol
if protocol == 'bt':
return self.add_torrent_task_by_info_hash(url[5:])
elif protocol == 'magnet':
return self.add_magnet_task(url)
random = current_random()
check_url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_check?callback=queryCid&url=%s&random=%s&tcache=%s' % (urllib.quote(url), random, current_timestamp())
js = self.urlread(check_url).decode('utf-8')
qcid = re.match(r'^queryCid(\(.+\))\s*$', js).group(1)
qcid = literal_eval(qcid)
if len(qcid) == 8:
cid, gcid, size_required, filename, goldbean_need, silverbean_need, is_full, random = qcid
elif len(qcid) == 9:
cid, gcid, size_required, filename, goldbean_need, silverbean_need, is_full, random, ext = qcid
elif len(qcid) == 10:
cid, gcid, size_required, some_key, filename, goldbean_need, silverbean_need, is_full, random, ext = qcid
else:
raise NotImplementedError(qcid)
assert goldbean_need == 0
assert silverbean_need == 0
if url.startswith('http://') or url.startswith('ftp://'):
task_type = 0
elif url.startswith('ed2k://'):
task_type = 2
else:
raise NotImplementedError()
task_url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_commit?'+urlencode(
{'callback': 'ret_task',
'uid': self.id,
'cid': cid,
'gcid': gcid,
'size': size_required,
'goldbean': goldbean_need,
'silverbean': silverbean_need,
't': filename,
'url': url,
'type': task_type,
'o_page': 'task',
'o_taskid': '0',
})
response = self.urlread(task_url)
assert response == 'ret_task(Array)', response
def add_batch_tasks(self, urls, old_task_ids=None):
assert urls
urls = list(urls)
for url in urls:
if parse_url_protocol(url) not in ('http', 'https', 'ftp', 'ed2k', 'bt', 'thunder', 'magnet'):
raise NotImplementedError('Unsupported: '+url)
urls = filter(lambda u: parse_url_protocol(u) in ('http', 'https', 'ftp', 'ed2k', 'thunder'), urls)
if not urls:
return
#self.urlopen('http://dynamic.cloud.vip.xunlei.com/interface/batch_task_check', data={'url':'\r\n'.join(urls), 'random':current_random()})
jsonp = 'jsonp%s' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/batch_task_commit?callback=%s' % jsonp
if old_task_ids:
batch_old_taskid = ','.join(old_task_ids)
else:
batch_old_taskid = '0' + ',' * (len(urls) - 1) # XXX: what is it?
data = {}
for i in range(len(urls)):
data['cid[%d]' % i] = ''
data['url[%d]' % i] = urllib.quote(to_utf_8(urls[i])) # fix per request #98
data['batch_old_taskid'] = batch_old_taskid
response = self.urlread(url, data=data)
assert_response(response, jsonp, len(urls))
def add_torrent_task_by_content(self, content, path='attachment.torrent'):
assert re.match(r'd\d+:', content), 'Probably not a valid content file [%s...]' % repr(content[:17])
upload_url = 'http://dynamic.cloud.vip.xunlei.com/interface/torrent_upload'
jsonp = 'jsonp%s' % current_timestamp()
commit_url = 'http://dynamic.cloud.vip.xunlei.com/interface/bt_task_commit?callback=%s' % jsonp
content_type, body = encode_multipart_formdata([], [('filepath', path, content)])
response = self.urlread(upload_url, data=body, headers={'Content-Type': content_type}).decode('utf-8')
upload_success = re.search(r'<script>document\.domain="xunlei\.com";var btResult =(\{.*\});</script>', response, flags=re.S)
if upload_success:
bt = json.loads(upload_success.group(1))
bt_hash = bt['infoid']
bt_name = bt['ftitle']
bt_size = bt['btsize']
data = {'uid':self.id, 'btname':bt_name, 'cid':bt_hash, 'tsize':bt_size,
'findex':''.join(f['id']+'_' for f in bt['filelist']),
'size':''.join(f['subsize']+'_' for f in bt['filelist']),
'from':'0'}
response = self.urlread(commit_url, data=data)
#assert_response(response, jsonp)
# skip response check
# assert re.match(r'%s\({"id":"\d+","avail_space":"\d+","progress":1}\)' % jsonp, response), repr(response)
return bt_hash
already_exists = re.search(r"parent\.edit_bt_list\((\{.*\}),'','0'\)", response, flags=re.S)
if already_exists:
bt = json.loads(already_exists.group(1))
bt_hash = bt['infoid']
return bt_hash
raise NotImplementedError(response)
def add_torrent_task_by_info_hash(self, sha1):
return self.add_torrent_task_by_content(self.get_torrent_file_by_info_hash(sha1), sha1.upper()+'.torrent')
def add_torrent_task(self, path):
with open(path, 'rb') as x:
return self.add_torrent_task_by_content(x.read(), os.path.basename(path))
def add_torrent_task_by_info_hash2(self, sha1, old_task_id=None):
'''similar to add_torrent_task_by_info_hash, but faster. I may delete current add_torrent_task_by_info_hash completely in future'''
link = 'http://dynamic.cloud.vip.xunlei.com/interface/get_torrent?userid=%s&infoid=%s' % (self.id, sha1.upper())
return self.add_torrent_task_by_link(link, old_task_id=old_task_id)
def add_magnet_task(self, link):
return self.add_torrent_task_by_link(link)
def add_torrent_task_by_link(self, link, old_task_id=None):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/url_query?callback=queryUrl&u=%s&random=%s' % (urllib.quote(link), current_timestamp())
response = self.urlread(url)
success = re.search(r'queryUrl(\(1,.*\))\s*$', response, flags=re.S)
if not success:
already_exists = re.search(r"queryUrl\(-1,'([^']{40})", response, flags=re.S)
if already_exists:
return already_exists.group(1)
raise NotImplementedError(repr(response))
args = success.group(1).decode('utf-8')
args = literal_eval(args.replace('new Array', ''))
_, cid, tsize, btname, _, names, sizes_, sizes, _, types, findexes, timestamp, _ = args
def toList(x):
if type(x) in (list, tuple):
return x
else:
return [x]
data = {'uid':self.id, 'btname':btname, 'cid':cid, 'tsize':tsize,
'findex':''.join(x+'_' for x in toList(findexes)),
'size':''.join(x+'_' for x in toList(sizes)),
'from':'0'}
if old_task_id:
data['o_taskid'] = old_task_id
data['o_page'] = 'history'
jsonp = 'jsonp%s' % current_timestamp()
commit_url = 'http://dynamic.cloud.vip.xunlei.com/interface/bt_task_commit?callback=%s' % jsonp
response = self.urlread(commit_url, data=data)
#assert_response(response, jsonp)
# skip response check
# assert re.match(r'%s\({"id":"\d+","avail_space":"\d+","progress":1}\)' % jsonp, response), repr(response)
return cid
def readd_all_expired_tasks(self):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/delay_once?callback=anything'
response = self.urlread(url)
def delete_tasks_by_id(self, ids):
jsonp = 'jsonp%s' % current_timestamp()
data = {'taskids': ','.join(ids)+',', 'databases': '0,'}
url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_delete?callback=%s&type=%s&noCacheIE=%s' % (jsonp, 2, current_timestamp()) # XXX: what is 'type'?
response = self.urlread(url, data=data)
response = remove_bom(response)
assert_response(response, jsonp, '{"result":1,"type":2}')
def delete_task_by_id(self, id):
self.delete_tasks_by_id([id])
def delete_task(self, task):
self.delete_task_by_id(task['id'])
def delete_tasks(self, tasks):
self.delete_tasks_by_id([t['id'] for t in tasks])
def pause_tasks_by_id(self, ids):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_pause?tid=%s&uid=%s&noCacheIE=%s' % (','.join(ids)+',', self.id, current_timestamp())
assert self.urlread(url) == 'pause_task_resp()'
def pause_task_by_id(self, id):
self.pause_tasks_by_id([id])
def pause_task(self, task):
self.pause_task_by_id(task['id'])
def pause_tasks(self, tasks):
self.pause_tasks_by_id(t['id'] for t in tasks)
def restart_tasks(self, tasks):
jsonp = 'jsonp%s' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/redownload?callback=%s' % jsonp
form = []
for task in tasks:
assert task['type'] in ('ed2k', 'http', 'https', 'ftp', 'https', 'bt'), "'%s' is not tested" % task['type']
data = {'id[]': task['id'],
'cid[]': '', # XXX: should I set this?
'url[]': task['original_url'],
'download_status[]': task['status']}
if task['type'] == 'ed2k':
data['taskname[]'] = task['name'].encode('utf-8') # XXX: shouldn't I set this for other task types?
form.append(urlencode(data))
form.append(urlencode({'type':1}))
data = '&'.join(form)
response = self.urlread(url, data=data)
assert_response(response, jsonp)
def rename_task(self, task, new_name):
assert type(new_name) == unicode
url = 'http://dynamic.cloud.vip.xunlei.com/interface/rename'
taskid = task['id']
bt = '1' if task['type'] == 'bt' else '0'
url = url+'?'+urlencode({'taskid':taskid, 'bt':bt, 'filename':new_name.encode('utf-8')})
response = self.urlread(url)
assert '"result":0' in response, response
def restart_task(self, task):
self.restart_tasks([task])
def get_task_by_id(self, id):
tasks = self.read_all_tasks(0)
for x in tasks:
if x['id'] == id:
return x
raise Exception('No task found for id '+id)
def current_timestamp():
return int(time.time()*1000)
def current_random():
from random import randint
return '%s%06d.%s' % (current_timestamp(), randint(0, 999999), randint(100000000, 9999999999))
def convert_task(data):
expired = {'0':False, '4': True}[data['flag']]
task = {'id': data['id'],
'type': re.match(r'[^:]+', data['url']).group().lower(),
'name': unescape_html(data['taskname']),
'status': int(data['download_status']),
'status_text': {'0':'waiting', '1':'downloading', '2':'completed', '3':'failed', '5':'pending'}[data['download_status']],
'expired': expired,
'size': int(data['ysfilesize']),
'original_url': unescape_html(data['url']),
'xunlei_url': data['lixian_url'] or None,
'bt_hash': data['cid'],
'dcid': data['cid'],
'gcid': data['gcid'],
'date': data['dt_committed'][:10].replace('-', '.'),
'progress': '%s%%' % data['progress'],
'speed': '%s' % data['speed'],
}
return task
def parse_json_response(html):
m = re.match(ur'^\ufeff?rebuild\((\{.*\})\)$', html)
if not m:
logger.trace(html)
raise RuntimeError('Invalid response')
return json.loads(m.group(1))
def parse_json_tasks(result):
tasks = result['info']['tasks']
return map(convert_task, tasks)
def parse_task(html):
inputs = re.findall(r'<input[^<>]+/>', html)
def parse_attrs(html):
return dict((k, v1 or v2) for k, v1, v2 in re.findall(r'''\b(\w+)=(?:'([^']*)'|"([^"]*)")''', html))
info = dict((x['id'], unescape_html(x['value'])) for x in map(parse_attrs, inputs))
mini_info = {}
mini_map = {}
#mini_info = dict((re.sub(r'\d+$', '', k), info[k]) for k in info)
for k in info:
mini_key = re.sub(r'\d+$', '', k)
mini_info[mini_key] = info[k]
mini_map[mini_key] = k
taskid = mini_map['taskname'][8:]
url = mini_info['f_url']
task_type = re.match(r'[^:]+', url).group().lower()
task = {'id': taskid,
'type': task_type,
'name': mini_info['taskname'],
'status': int(mini_info['d_status']),
'status_text': {'0':'waiting', '1':'downloading', '2':'completed', '3':'failed', '5':'pending'}[mini_info['d_status']],
'size': int(mini_info.get('ysfilesize', 0)),
'original_url': mini_info['f_url'],
'xunlei_url': mini_info.get('dl_url', None),
'bt_hash': mini_info['dcid'],
'dcid': mini_info['dcid'],
'gcid': parse_gcid(mini_info.get('dl_url', None)),
}
m = re.search(r'<em class="loadnum"[^<>]*>([^<>]*)</em>', html)
task['progress'] = m and m.group(1) or ''
m = re.search(r'<em [^<>]*id="speed\d+">([^<>]*)</em>', html)
task['speed'] = m and m.group(1).replace(' ', '') or ''
m = re.search(r'<span class="c_addtime">([^<>]*)</span>', html)
task['date'] = m and m.group(1) or ''
return task
def parse_history(html):
rwbox = re.search(r'<div class="rwbox" id="rowbox_list".*?<!--rwbox-->', html, re.S).group()
rw_lists = re.findall(r'<div class="rw_list".*?<input id="d_tasktype\d+"[^<>]*/>', rwbox, re.S)
return map(parse_task, rw_lists)
def parse_bt_list(js):
result = json.loads(re.match(r'^fill_bt_list\((.+)\)\s*$', js).group(1))['Result']
files = []
for record in result['Record']:
files.append({
'id': record['taskid'],
'index': record['id'],
'type': 'bt',
'name': record['title'], # TODO: support folder
'status': int(record['download_status']),
'status_text': {'0':'waiting', '1':'downloading', '2':'completed', '3':'failed', '5':'pending'}[record['download_status']],
'size': int(record['filesize']),
'original_url': record['url'],
'xunlei_url': record['downurl'],
'dcid': record['cid'],
'gcid': parse_gcid(record['downurl']),
'speed': '',
'progress': '%s%%' % record['percent'],
'date': '',
})
return files
def parse_gcid(url):
if not url:
return
m = re.search(r'&g=([A-F0-9]{40})&', url)
if not m:
return
return m.group(1)
def urlencode(x):
def unif8(u):
if type(u) == unicode:
u = u.encode('utf-8')
return u
return urllib.urlencode([(unif8(k), unif8(v)) for k, v in x.items()])
def encode_multipart_formdata(fields, files):
#http://code.activestate.com/recipes/146306/
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
import mimetypes
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def assert_default_page(response, id):
#assert response == "<script>top.location='http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=0'</script>" % id
assert re.match(r"^<script>top\.location='http://dynamic\.cloud\.vip\.xunlei\.com/user_task\?userid=%s&st=0(&cache=\d+)?'</script>$" % id, response), response
def remove_bom(response):
if response.startswith('\xef\xbb\xbf'):
response = response[3:]
return response
def assert_response(response, jsonp, value=1):
response = remove_bom(response)
assert response == '%s(%s)' % (jsonp, value), repr(response)
def parse_url_protocol(url):
m = re.match(r'([^:]+)://', url)
if m:
return m.group(1)
elif url.startswith('magnet:'):
return 'magnet'
else:
return url
def unescape_html(html):
import xml.sax.saxutils
return xml.sax.saxutils.unescape(html)
def to_utf_8(s):
if type(s) == unicode:
return s.encode('utf-8')
else:
return s
def md5(s):
import hashlib
return hashlib.md5(s).hexdigest().lower()
def encypt_password(password):
if not re.match(r'^[0-9a-f]{32}$', password):
password = md5(md5(password))
return password
def ungzip(s):
from StringIO import StringIO
import gzip
buffer = StringIO(s)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(s):
import zlib
return zlib.decompress(s, -zlib.MAX_WBITS)
|
[
"zhangfan@xiaomi.com"
] |
zhangfan@xiaomi.com
|
40a43128f37987e7fb6d5d88c9328c2e06f19768
|
85185d1f8151d2c9cc8ab14bdf41ced54bf22a81
|
/Python 2.7/Windows/File Handling Projects/FileCopy.py
|
a47c4ad29d214a13810ffbf7b5c8efcd03802af7
|
[] |
no_license
|
giefko/Python
|
a3ec7df9d67f1c5befbe9adb0ffaddb5fcdf65e5
|
ab0a4dbda45a0da315056be3eecb59a24bb70f00
|
refs/heads/master
| 2023-06-07T15:52:36.333622
| 2023-06-02T10:44:54
| 2023-06-02T10:44:54
| 60,038,281
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
import shutil
shutil.copy2('C:path\\filetocopy.file', 'C:\\destinationpath\filetocopy2.file')
|
[
"noreply@github.com"
] |
giefko.noreply@github.com
|
ba76db70515883d184b8a3e17bb109a334de04b3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03088/s073225556.py
|
2e6dc49100efed193992418a7fe03b5008474337
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
n = int(input())
memo = [{} for i in range(n+1)]
mod = 10**9+7
#想定解法の写経
def ok(last4):
#隣接する二つを入れ替えてAGC二ならないかどうかをチェック
for i in range(4):
t = list(last4)
if i >= 1:
t[i], t[i-1] = t[i-1], t[i]
if "".join(t).count('AGC') >= 1:
return False
return True
def dfs(now, last3):
if last3 in memo[now]:
return memo[now][last3]
if now == n:
return 1
ret = 0
for i in 'AGCT':
#もし、次の一文字を足した4文字がずらしてAGCに引っかからなければ
if ok(last3 + i):
#今回をiにした場合のこの後の文字列の数を全て計算
ret = (ret + dfs(now+1, last3[1:] + i)) % mod
memo[now][last3] = ret
return ret
print(dfs(0,'TTT'))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2bcba6fe8c0b61474bdd88ac3bca8d433864894c
|
eb38517d24bb32cd8a33206d4588c3e80f51132d
|
/def_filter2.py
|
f9fe2bfdae95b7b81052c6604ab9bcd876040533
|
[] |
no_license
|
Fernando23296/l_proy
|
2c6e209892112ceafa00c3584883880c856b6983
|
b7fdf99b9bd833ca1c957d106b2429cbd378abd3
|
refs/heads/master
| 2020-04-01T18:01:41.333302
| 2018-12-04T23:45:53
| 2018-12-04T23:45:53
| 153,466,681
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,537
|
py
|
import numpy as np
from matplotlib import pyplot as plt
from scipy import interpolate
import cv2
import imutils
from operator import is_not
from functools import partial
from pylab import *
from random import *
img = cv2.imread('ex2_ppp.png', cv2.IMREAD_COLOR)
dimensions = img.shape
altura = img.shape[0]
width = img.shape[1]
print(width)
ancho = int(width)
altura2 = int(altura)
alfa = int(altura/12)
cons = 0
a = np.empty((13, 50), dtype=object)
for i in range(0, 13):
cons1 = cons
cons2 = cons1+alfa
print("____")
image = img[cons1:cons2, 0:ancho]
#convirtiendo a escala de grises
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#aplicando desenfoque gaussiano
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#threshold?
thresh = cv2.threshold(blurred, 60, 200, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(
thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
count = 1
for c in cnts:
M = cv2.moments(c)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
xx = str(cX)+","+str(cY)
print(i)
a[i][count] = [cX, cY]
print(xx)
cv2.drawContours(image, [c], -1, (0, 0, 255), 2)
#CIRCULO DE CENTRO
cv2.circle(image, (cX, cY), 7, (0, 0, 255), -1)
#COORDENADAS
cv2.putText(image, xx, (cX - 50, cY - 50),
#TIPO DE LETRA, COLOR?
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
count = count+1
cons = cons2
i = i+1
cero = [0, 0]
b = [[cero if x is None else x for x in c] for c in a]
b = b[::-1]
def igualador(l):
contador = 1
for i in range(2, 13):
for ii in range(0, 50):
l[i][ii][1] += alfa*contador
contador = contador+1
return l
b = igualador(b)
def reemplazador(l):
for u in range(1, 13):
for uu in range(0, 50):
if (l[u][uu][0] == 0):
l[u][uu] = None
else:
pass
return l
b = reemplazador(b)
print("altura", altura2)
print("ancho", ancho)
ax = np.zeros(shape=(13, 1), dtype=object)
contador = 0
def limpio(l):
tam = len(l)
a = []
for i in range(0, tam):
if(l[i] != None):
a.append(l[i])
return a
def rellenador(l, ancho, largo):
tam = len(l)
largo = int(largo/12)
ancho = int(ancho/2)
for i in range(1, tam):
if (l[i] == []):
relleno = [ancho, largo*i]
l[i].append(relleno)
else:
pass
return l
def seleccionador(l):
a = []
tam = len(l)
for i in range(1, tam):
tam1 = (len(l[i])-1)
a.append(l[i][randint(0, tam1)])
return a
lis_2 = []
for i in range(1, 13):
a = limpio(b[i])
lis_2.append(a)
print("*"*20)
print (lis_2)
print("*"*20)
lis_3 = []
lis_3 = rellenador(lis_2, ancho, altura2)
ax = seleccionador(lis_3)
print(ax)
ancho2 = int(ancho/2)
axx = np.asarray(ax)
bx = np.array([[ancho2, altura2]])
cx = np.concatenate((axx, bx), axis=0)
dx = np.array([[ancho2, 0]])
axx = np.concatenate((dx, cx), axis=0)
axx = np.array(axx.T)
tck, u = interpolate.splprep(axx, s=0)
unew = np.arange(0, 1.01, 0.01)
out = interpolate.splev(unew, tck)
img = plt.imread("ex2.jpg")
fig, ax = plt.subplots()
ax.imshow(img)
plt.plot(out[0], out[1], color='orange')
plt.plot(axx[0, :], axx[1, :], 'ob')
plt.show()
|
[
"fernando23296@gmail.com"
] |
fernando23296@gmail.com
|
4e4c1fe5871fcd7f67396dca52b856ccb920e14d
|
c958b2dcb95ec9c57ba5142723adee0a1db6a4cf
|
/belajarpython1.py
|
f3a31760eef083633f12f20bb98927881fbf5c06
|
[] |
no_license
|
romstay/belajarpython
|
f2591a9a85d545e0e50a51e46f63b80f030b6d46
|
03995b3549784f5251c9a3fba97632492ed10039
|
refs/heads/main
| 2023-08-24T10:50:14.930735
| 2021-10-25T08:10:10
| 2021-10-25T08:10:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
nama = input (" nama lengkap : ")
panggilan = input (' nama panggilan : ')
ttl = input (' tempat dan tanggal lahir: ')
umur = input (' umur: ')
alamat = input (' alamat: ')
namauniversitas = input (' nama universitas: ')
nim = input (' nim: ')
prodi = input (' prodi: ')
nohp = input (' no hp: ')
print (" jadi nama lengkap saya adalah ",nama," nama panggilan saya ",panggilan,"tempat dan tanggal lahir saya ",ttl)
print (" umur saya ",umur," alamat saya ada di ",alamat," saya berkuliah di ",namauniversitas)
print (" nim saya ",nim," prodi saya ",prodi," no hp saya ",nohp)
|
[
"romikhoiril@gmail.com"
] |
romikhoiril@gmail.com
|
dbde8a8adcdc8fc7e35a9f9a01feccb0711f4d86
|
5dea4c0e1edae0d7b1fe18382d163994381acd09
|
/fivetimes.py
|
c7b9fdca0da649765a7981b01c69e73502a6b5a4
|
[] |
no_license
|
cesarcamarena/automate-the-boring-stuff-with-python
|
8bf8df0801a0255423d9d121c7f3f9e5800f1ae0
|
5334bf41f484c215a15b943a501d009aea4c24ab
|
refs/heads/main
| 2023-08-24T17:25:09.895110
| 2021-10-26T22:13:39
| 2021-10-26T22:13:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
print('My name is')
for i in range(5, -1, -1):
print('Jimmy Five Times ' + str(i))
|
[
"itscesarcamarena@gmail.com"
] |
itscesarcamarena@gmail.com
|
771e67da9791e83017cf0282bf8a2f46b78ff598
|
210c034c4903fd4254632e4b2975044188ef4c0d
|
/poke/views.py
|
24384c166d4ee942189d649e482df71da9f14002
|
[] |
no_license
|
EwgenyKuzmenko/Poke_test
|
b8ddcf22224b2f161dbe30a38e059a040d07355a
|
e5d89f2e9e4175a1a365ee6a820ecb111ae4639c
|
refs/heads/master
| 2023-06-24T16:02:49.048818
| 2021-07-26T07:32:17
| 2021-07-26T07:32:17
| 389,429,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,739
|
py
|
from rest_framework import generics as apigeneric
from .forms import RegisterFormUser
from django.contrib.auth.views import LoginView, LogoutView
from django.views import generic
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import PokemonModel
import requests
import json
from django.shortcuts import HttpResponseRedirect
from django.urls import reverse_lazy
from .serializers import PokeModelSerializer
class UserLogin(LoginView):
template_name = 'login.html'
class UserLogout(LogoutView):
template_name = 'login.html'
class UserRegister(generic.CreateView):
template_name = 'register.html'
form_class = RegisterFormUser
success_url = '/'
class Choice(LoginRequiredMixin, generic.ListView):
"""pokemon status logic implementation and pagination
as well as displaying a table of selected pokemon
"""
login_url = '/'
model = PokemonModel
template_name = 'choice.html'
def get(self, request, *args, **kwargs):
super().get(request, *args, **kwargs)
offset = request.GET.get('offset')
r = requests.get(f'https://pokeapi.co/api/v2/pokemon?offset={offset}&limit=10')
pokemons = json.loads(r.content.decode('utf-8'))['results']
for item in pokemons:
if PokemonModel.objects.filter(pokemon=item['name']).count() >= 1:
item['state'] = 'BUZY'
else:
item['state'] = 'FREE'
next = json.loads(r.content.decode('utf-8'))['next'].split('?')[-1]
prew = None
if json.loads(r.content.decode('utf-8'))['previous']:
prew = json.loads(r.content.decode('utf-8'))['previous'].split('?')[-1]
return self.render_to_response({'pokes': pokemons,
'my_pokes': PokemonModel.objects.filter(custumer=request.user),
'next': next,
'previous': prew})
class Chosen(LoginRequiredMixin, generic.View):
"""The same Pokemon cannot be selected by
different players or multiple times
I Use try: and except: constructions"""
login_url = 'poke:login'
def get(self, request, *args, **kwargs):
try:
PokemonModel.objects.get_or_create(custumer=self.request.user,
pokemon=self.kwargs['name'],
pokemon_url=self.kwargs['name'])
except:
pass
return HttpResponseRedirect(reverse_lazy('poke:choice'))
class UsersAllApi(apigeneric.ListAPIView):
""" API output of all players and their Pokémons"""
queryset = PokemonModel.objects.all()
serializer_class = PokeModelSerializer
|
[
"jekajeka63@MacBook-Pro-AnyMac.local"
] |
jekajeka63@MacBook-Pro-AnyMac.local
|
7070f85803c6e3000a291224b57c0c5e5c857558
|
9daa81d37546145eddd039d9d409e1e6f217ebf7
|
/noise2noise/trainer.py
|
c8c0567a96239095fbe204cf7bfa7a851be51f35
|
[] |
no_license
|
ver228/vesicle_contours
|
e4850ffc571ac515850378b9ec5c5621a76bc085
|
d6619673d8804bb4752c9dbd083dc299ae68304c
|
refs/heads/master
| 2020-03-15T22:18:06.765956
| 2019-01-04T11:15:18
| 2019-01-04T11:15:18
| 132,370,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,493
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 16:46:42 2018
@author: avelinojaver
"""
from .flow import CroppedFlow, _root_dir
from .models import UNet
from tensorboardX import SummaryWriter
import torch
from torch import nn
from torch.utils.data import DataLoader
import os
import datetime
import shutil
import tqdm
log_dir_root = _root_dir.parent / 'results' / 'logs'
def save_checkpoint(state, is_best, save_dir, filename='checkpoint.pth.tar'):
checkpoint_path = os.path.join(save_dir, filename)
torch.save(state, checkpoint_path)
if is_best:
best_path = os.path.join(save_dir, 'model_best.pth.tar')
shutil.copyfile(checkpoint_path, best_path)
def get_loss(loss_type):
if loss_type == 'l1':
criterion = nn.L1Loss()
elif loss_type == 'l1smooth':
criterion = nn.SmoothL1Loss()
elif loss_type == 'l2':
criterion = nn.MSELoss()
else:
raise ValueError(loss_type)
return criterion
def get_model(model_name):
if model_name == 'unet':
model = UNet(n_channels = 1, n_classes = 1)
else:
raise ValueError(model_name)
return model
def train(
loss_type = 'l1',
cuda_id = 0,
batch_size = 8,
model_name = 'unet',
lr = 1e-4,
weight_decay = 0.0,
n_epochs = 2000,
num_workers = 1
):
if torch.cuda.is_available():
print("THIS IS CUDA!!!!")
dev_str = "cuda:" + str(cuda_id)
else:
dev_str = 'cpu'
device = torch.device(dev_str)
gen = CroppedFlow()
loader = DataLoader(gen, batch_size=batch_size, shuffle=True, num_workers=num_workers)
model = get_model(model_name)
model = model.to(device)
criterion = get_loss(loss_type)
model_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(model_params, lr = lr, weight_decay=weight_decay)
now = datetime.datetime.now()
bn = now.strftime('%Y%m%d_%H%M%S') + '_' + model_name
bn = '{}_{}_{}_lr{}_wd{}_batch{}'.format(loss_type, bn, 'adam', lr, weight_decay, batch_size)
log_dir = log_dir_root / bn
logger = SummaryWriter(log_dir = str(log_dir))
#%%
best_loss = 1e10
pbar_epoch = tqdm.trange(n_epochs)
for epoch in pbar_epoch:
#train
model.train()
gen.train()
pbar = tqdm.tqdm(loader)
avg_loss = 0
frac_correct = 0
for X, target in pbar:
X = X.to(device)
target = target.to(device)
pred = model(X)
loss = criterion(pred, target)
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step()
avg_loss += loss.item()
avg_loss /= len(loader)
frac_correct /= len(gen)
tb = [('train_epoch_loss', avg_loss)]
for tt, val in tb:
logger.add_scalar(tt, val, epoch)
state = {
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}
is_best = avg_loss < best_loss
save_checkpoint(state, is_best, save_dir = str(log_dir))
|
[
"ver228@gmail.com"
] |
ver228@gmail.com
|
616fcb5f71f73126b13f14583f8a2a4ecebc3c9f
|
93408557fe012551095256108f20390e874a2077
|
/find_smallest_largest_num_p3.py
|
20592b30537ed61b4e1b7f2afcff111eef584eb7
|
[] |
no_license
|
mamonraab/python-scripts
|
2ed279fb28afae548b0070a6daa0b9f1e967788e
|
7f4b02e9cbf01c163dfda95e476765cf1c4f095f
|
refs/heads/master
| 2021-05-08T15:30:03.366827
| 2016-06-13T08:05:30
| 2016-06-13T08:05:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
# Find the smallest and largest numbers
# This allows the user to enter a list of numbers until the user types done or press enter then the prompt would stop
# Author: Ritchie Ng
largest = None
smallest = None
while True:
num = input('Enter a number: ')
# Handle edge cases
if num == 'done':
break
# Allows user to press enter to complete
if len(num) < 1:
break
# Try and Except to catch input errors
try:
num = float(num)
except:
print('Invalid Input')
# Jumps to the start of the loop without running the code below
continue
# This will be permanently false after the first iteration
if smallest is None:
smallest = num
# Replaces the iteration variable with smaller input num
if num < smallest:
smallest = num
# This will be permanently false after the first iteration
if largest is None:
largest = num
# Replaces the iteration variable with larger input num
elif num > largest:
largest = num
print("Maximum number:", largest)
print("Smallest number:", smallest)
|
[
"ritchieng@u.nus.edu"
] |
ritchieng@u.nus.edu
|
73acb3c61758d2f0f0d38d86153818d8982d72a0
|
a7bdc804b393ee93c5008b7ff54f1fe764c42d5c
|
/test.py
|
89bb3514936efa1b6925057fed06e21c8db552bc
|
[] |
no_license
|
dbdmsdn10/python
|
800edf08e883a0702493bfd492a6193cf86cac05
|
7e50c25e8c252886f2665b4dbf8a8c2c727ca8ea
|
refs/heads/master
| 2022-12-27T09:37:35.610351
| 2020-10-13T11:09:41
| 2020-10-13T11:09:41
| 299,592,844
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 46
|
py
|
print("korean")
print("52")
print("ÇѱÛ")
|
[
"dbdmsdn10@gmail.com"
] |
dbdmsdn10@gmail.com
|
845b58817b301ff27abb99723bdaa9be18f325ff
|
a5afef3c1e71baeae348a02f8a75479faa6f1dff
|
/Serial_output/Host/lidar_visualizer.py
|
b627e0dcc59b7d81bbf7c19f2b9c83210b3cfdee
|
[] |
no_license
|
xinanhuang/SCOUT_LiDAR
|
73df397706ed95b1ae4f7daaa6711e64e25885bb
|
46eceee7ee2eef5d78a961ba494e0adf218b310a
|
refs/heads/master
| 2022-12-04T00:03:51.458505
| 2020-08-04T02:53:32
| 2020-08-04T02:53:32
| 284,855,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
##!/usr/bin/env python
from PyQt5 import QtCore, QtWidgets
import pyqtgraph as pg
import numpy as np
import serial
com_port = "/dev/cu.usbmodem38A2377D30391" # This is on mac os x. For Windows/Linux: 5 == "COM6" == "/dev/tty5"
baudrate = 1440000
class MyWidget(pg.GraphicsWindow):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.mainLayout = QtWidgets.QVBoxLayout()
self.setLayout(self.mainLayout)
self.timer = QtCore.QTimer(self)
self.timer.start()
self.timer.timeout.connect(self.onNewData)
self.raw = serial.Serial(com_port, baudrate)
self.plotItem = self.addPlot(title="Lidar points")
self.plotItem.enableAutoRange(pg.ViewBox.XYAxes,False)
self.plotItem.setXRange(-1000,1000)
self.plotItem.setYRange(-1000,1000)
self.plotItem.setAspectLocked(True,1)
self.plotItem.showGrid(x=True,y=True)
self.plotDataItem = self.plotItem.plot([], pen=None,
symbolBrush=(255,0,0), symbolSize=5, symbolPen=None,)
self.f = open("demofile2.xyz", "a")
def setData(self, x, y):
self.plotDataItem.setData(x, y)
def onNewData(self):
## read lidar data and update
## read 1000 data and draw the loop
## can be further optimized by parse the data myself (duh i mean using python byitself is the defiantion of not being optimized)
x = np.empty(7400)
y = np.empty(7400)
for idx in range (7400):
line = self.raw.readline()
line_split = line.decode().split(' ')
x[idx] = int(line_split[0])
y[idx] = int(line_split[1])
z = int(line_split[2])
##self.f.write(str(line))
## parse input with space and carriage return
self.setData(x, y)
def main():
app = QtWidgets.QApplication([])
pg.setConfigOptions(antialias=False) # True seems to work as well
win = MyWidget()
win.show()
win.resize(800,600)
win.raise_()
app.exec_()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
xinanhuang.noreply@github.com
|
fb48a6638a09ad3cfd2c1102328953beea4067f7
|
061d89229db5a2aef1c033c64df0864fb921dc2e
|
/vaccine_solution_01.py
|
f8a093ace935a30f6a671fefbb26427f55eb680c
|
[] |
no_license
|
abribanez/public_stuff
|
a837abc69b2b1863860a9925de3d0357cb8a47fe
|
ceb9c48770e8734168516754bb28994beb29325a
|
refs/heads/main
| 2023-03-13T16:28:35.820723
| 2021-03-04T03:52:11
| 2021-03-04T03:52:11
| 344,341,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
# 1. Check for any vaccine.py and delete if any.
# 2. Verify that the userSetup.py is not infected, if so rename to userSetup.backup and notify user (email)
# 3. Change ~/maya/script/ folder permissions to read only using os.chmod ?
# 4. Create scriptJob:
def remove_unwanted_script_nodes():
# Delete unwanted scriptNodes
# Restore ~/maya/script/ folder permissions using os.chmod?
# remove the script node -> cmds.scriptJob(kill=temp_script_job)
temp_script_job = cmds.scriptJob(e=("SceneOpened", remove_unwanted_script_nodes))
|
[
"noreply@github.com"
] |
abribanez.noreply@github.com
|
64cc4841d5467f994155c4141a61d3a96dbad42d
|
fa0349f061e07e5b0d060568ac393a00b7ae88cb
|
/models/syntaxsql/modules/having_predictor.py
|
7c7dd8f76f4a392b98f6aef3b0f57b780bc75afe
|
[] |
no_license
|
inyukwo1/text-to-sql-models
|
ea99b1d43c2a26f5f2170f4682d4580998a8c23b
|
fea45ae250531ea60a29c8fe23e2562a0188d7b8
|
refs/heads/master
| 2020-06-13T12:54:12.785392
| 2019-09-09T07:35:22
| 2019-09-09T07:35:22
| 194,660,177
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,891
|
py
|
import os
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from commons.utils import run_lstm, col_tab_name_encode, encode_question, SIZE_CHECK, plain_conditional_weighted_num
from models.syntaxsql.net_utils import to_batch_seq, to_batch_tables
class HavingPredictor(nn.Module):
def __init__(self, H_PARAM, embed_layer, bert=None):
super(HavingPredictor, self).__init__()
self.N_word = H_PARAM['N_WORD']
self.N_depth = H_PARAM['N_depth']
self.N_h = H_PARAM['N_h']
self.gpu = H_PARAM['gpu']
self.use_hs = H_PARAM['use_hs']
self.table_type = H_PARAM['table_type']
self.acc_num = 1
self.embed_layer = embed_layer
self.use_bert = True if bert else False
if bert:
self.q_bert = bert
encoded_num = 768
else:
self.q_lstm = nn.LSTM(input_size=self.N_word, hidden_size=self.N_h//2,
num_layers=self.N_depth, batch_first=True,
dropout=0.3, bidirectional=True)
encoded_num = self.N_h
self.hs_lstm = nn.LSTM(input_size=self.N_word, hidden_size=self.N_h//2,
num_layers=self.N_depth, batch_first=True,
dropout=0.3, bidirectional=True)
self.col_lstm = nn.LSTM(input_size=self.N_word, hidden_size=self.N_h//2,
num_layers=self.N_depth, batch_first=True,
dropout=0.3, bidirectional=True)
self.q_att = nn.Linear(encoded_num, self.N_h)
self.hs_att = nn.Linear(self.N_h, self.N_h)
self.hv_out_q = nn.Linear(encoded_num, self.N_h)
self.hv_out_hs = nn.Linear(self.N_h, self.N_h)
self.hv_out_c = nn.Linear(self.N_h, self.N_h)
self.hv_out = nn.Sequential(nn.Tanh(), nn.Linear(self.N_h, 2)) #for having/none
self.softmax = nn.Softmax() #dim=1
self.CE = nn.CrossEntropyLoss()
self.log_softmax = nn.LogSoftmax()
self.mlsml = nn.MultiLabelSoftMarginLoss()
self.bce_logit = nn.BCEWithLogitsLoss()
self.sigm = nn.Sigmoid()
if self.gpu:
self.cuda()
def forward(self, input_data):
q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, gt_col = input_data
B = len(q_len)
if self.use_bert:
q_enc = self.q_bert(q_emb_var, q_len)
else:
q_enc, _ = run_lstm(self.q_lstm, q_emb_var, q_len)
hs_enc, _ = run_lstm(self.hs_lstm, hs_emb_var, hs_len)
col_enc, _ = col_tab_name_encode(col_emb_var, col_name_len, col_len, self.col_lstm)
# get target/predicted column's embedding
# col_emb: (B, hid_dim)
col_emb = []
for b in range(B):
col_emb.append(col_enc[b, gt_col[b]])
col_emb = torch.stack(col_emb)
q_weighted = plain_conditional_weighted_num(self.q_att, q_enc, q_len, col_emb)
hs_weighted = plain_conditional_weighted_num(self.hs_att, hs_enc, hs_len, col_emb)
hv_score = self.hv_out(self.hv_out_q(q_weighted) + int(self.use_hs)* self.hv_out_hs(hs_weighted) + self.hv_out_c(col_emb))
SIZE_CHECK(hv_score, [B, 2])
return hv_score
def loss(self, score, truth):
loss = 0
data = torch.from_numpy(np.array(truth))
if self.gpu:
data = data.cuda()
truth_var = Variable(data)
loss = self.CE(score, truth_var)
return loss
def evaluate(self, score, gt_data):
return self.check_acc(score, gt_data)
def check_acc(self, score, truth):
err = 0
B = len(score)
pred = []
for b in range(B):
if self.gpu:
argmax_score = np.argmax(score[b].data.cpu().numpy())
else:
argmax_score = np.argmax(score[b].data.numpy())
pred.append(argmax_score)
for b, (p, t) in enumerate(zip(pred, truth)):
if p != t:
err += 1
return err
def preprocess(self, batch):
q_seq, history, label = to_batch_seq(batch)
q_emb_var, q_len = self.embed_layer.gen_x_q_batch(q_seq)
hs_emb_var, hs_len = self.embed_layer.gen_x_history_batch(history)
col_seq, tab_seq, par_tab_nums, foreign_keys = to_batch_tables(batch, self.table_type)
col_emb_var, col_name_len, col_len = self.embed_layer.gen_col_batch(col_seq)
gt_col = np.zeros(q_len.shape, dtype=np.int64)
index = 0
for item in batch:
gt_col[index] = item["gt_col"]
index += 1
input_data = (q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, gt_col)
gt_data = label
return input_data, gt_data
def save_model(self, save_dir):
print('Saving model...')
torch.save(self.state_dict(), os.path.join(save_dir, "having_models.dump"))
|
[
"hkkang@dblab.postech.ac.kr"
] |
hkkang@dblab.postech.ac.kr
|
ef605e1ca662f2873971f6027708de8746a86c63
|
8f84abe87489cf4d054097d3101603bf63768b32
|
/ICP-5/Source/LinearRegression.py
|
c55dde00daff992bb8494aec103e24c2b9f6136d
|
[] |
no_license
|
SASLEENREZA/Python_DeepLearning
|
4777a8b474a9e96e4f075c369979085428ddbc52
|
b8d61989b52dc9af22cdea43e0ab273e998e26ee
|
refs/heads/master
| 2020-03-27T05:41:26.218544
| 2018-12-08T04:52:46
| 2018-12-08T04:52:46
| 146,040,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
import numpy as num
import matplotlib.pyplot as mat
a=num.array([2.9,6.7,4.9,7.9,9.8,6.9,6.1,6.2,6,5.1,4.7,4.4,5.8])
b=num.array([4,7.4,5,7.2,7.9,6.1,6,5.8,5.2,4.2,4,4.4,5.2])
#calc mean for two lists
mean_a=num.mean(a)
mean_b=num.mean(b)
#calc deviations for slope
x=num.sum((a-mean_a)*(b-mean_b))
y=num.sum(pow(a-mean_a,2))
slope=x/y
intercept_y=mean_b-(slope*mean_a)
print("slope is {}".format(slope))
print("Y Intercept is {}".format(intercept_y))
#calc linear regression
values=(slope*a)+intercept_y
#plot linear regression graph
mat.scatter(a,b)
mat.plot(a,values, color='red')
#give the labels
mat.xlabel("males")
mat.ylabel("females")
#show the graph
mat.show()
|
[
"35543680+SASLEENREZA@users.noreply.github.com"
] |
35543680+SASLEENREZA@users.noreply.github.com
|
12074642230d4b54034d58f55c5f371df6cb2997
|
876cfcdd0eb947b90ca990694efd5a4d3a92a970
|
/Python_stack/python/OOP/users _bankaccts.py
|
3b7d912111cd992471493c4f946a9c9b698ffe81
|
[] |
no_license
|
lindseyvaughn/Dojo-Assignments
|
1786b13a6258469a2fd923df72c0641ce60ccbb2
|
3b37284cdd813b6702f5843c113f7bc7137a56c0
|
refs/heads/master
| 2023-01-13T20:19:50.152115
| 2019-12-13T23:16:48
| 2019-12-13T23:16:48
| 209,396,128
| 0
| 0
| null | 2023-01-07T11:56:19
| 2019-09-18T20:15:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
class BankAccount:
def __init__(self, int_rate=0.01, balance=0):
self.int_rate = int_rate
self.balance = balance
def make_deposit(self,amount):
self.balance += amount
return self
def make_withdrawl(self,amount):
self.balance -= amount
return self
def display_account_info(self):
print(f"int_rate{self.int_rate}info:{self.balance}")
return self
def yield_interest(self):
if self.balance > 0:
self.balance = self.balance + self.balance * self.int_rate
return self
class User:
def __init__ (self,name,email):
self.name = name
self.email = email
self.account = BankAccount(int_rate=0.02, balance=0)
return self
def make_deposit(self,amount):
self.account.make_deposit (amount)
return self
def make_withdrawl(self,amount):
self.account.make_withdrawl (amount)
return self
def display_account_info(self):
print(f"user:{self.name}, balance:{self.balance}")
return self
def transfer_money(self,other_user,amount):
self.account.balance -= amount
other_user.account.balance += amount
return self
lin= User("Lindsey Vaughn","lindsey.l.vaughn@gmail.com")
tae= User("Dante", "tae.d.d@gmail.com")
lo=User("lauren", "lo.d.d@gmail.com")
lin.make_deposit(100).make_deposit(100).make_deposit(103).make_withdrawl(60).yield_interest().display_account_info()
tae.make_deposit(200).make_deposit(200).make_withdrawl(50).make_withdrawl(50).make_withdrawl(50).make_withdrawl(50).display_account_info()
lo.make_deposit(50).make_deposit(20).make_withdrawl(20).make_withdrawl(5).make_withdrawl(20).make_withdrawl(40).display_account_info()
|
[
"lindsey.l.vaughn@gmail.com"
] |
lindsey.l.vaughn@gmail.com
|
53447d8c889bf04c409aed89efe7dfc477f7aa3f
|
40e4b8e883af056979536e703edd8ee503dd35ca
|
/main_w.py
|
1091593b8702db5395d686ba115c568041b6dc94
|
[] |
no_license
|
bjzhh/zhh
|
1d84ef97a28d37913e1962fc2f56baa140889b8e
|
fdc70c821e7717863dc93006f89a6ae779dacca3
|
refs/heads/master
| 2020-08-04T18:27:06.476637
| 2019-10-02T02:03:07
| 2019-10-02T02:03:07
| 212,236,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,542
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_w.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
self.menu_2 = QtWidgets.QMenu(self.menubar)
self.menu_2.setObjectName("menu_2")
self.menuOffice_tools = QtWidgets.QMenu(self.menubar)
self.menuOffice_tools.setObjectName("menuOffice_tools")
self.menu_3 = QtWidgets.QMenu(self.menubar)
self.menu_3.setObjectName("menu_3")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionIFS = QtWidgets.QAction(MainWindow)
self.actionIFS.setObjectName("actionIFS")
self.actionCRM = QtWidgets.QAction(MainWindow)
self.actionCRM.setObjectName("actionCRM")
self.actionPAS = QtWidgets.QAction(MainWindow)
self.actionPAS.setObjectName("actionPAS")
self.actionESS = QtWidgets.QAction(MainWindow)
self.actionESS.setObjectName("actionESS")
self.actionOMS = QtWidgets.QAction(MainWindow)
self.actionOMS.setObjectName("actionOMS")
self.actionIMC = QtWidgets.QAction(MainWindow)
self.actionIMC.setObjectName("actionIMC")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionExcel = QtWidgets.QAction(MainWindow)
self.actionExcel.setObjectName("actionExcel")
self.actionword = QtWidgets.QAction(MainWindow)
self.actionword.setObjectName("actionword")
self.menu.addAction(self.actionSave)
self.menu.addAction(self.actionQuit)
self.menu_2.addAction(self.actionIFS)
self.menu_2.addAction(self.actionCRM)
self.menu_2.addAction(self.actionPAS)
self.menu_2.addAction(self.actionESS)
self.menu_2.addAction(self.actionOMS)
self.menu_2.addAction(self.actionIMC)
self.menuOffice_tools.addAction(self.actionExcel)
self.menuOffice_tools.addAction(self.actionword)
self.menu_3.addAction(self.actionAbout)
self.menubar.addAction(self.menu.menuAction())
self.menubar.addAction(self.menu_2.menuAction())
self.menubar.addAction(self.menuOffice_tools.menuAction())
self.menubar.addAction(self.menu_3.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.menu.setTitle(_translate("MainWindow", "文件"))
self.menu_2.setTitle(_translate("MainWindow", "快速入口"))
self.menuOffice_tools.setTitle(_translate("MainWindow", "工具集"))
self.menu_3.setTitle(_translate("MainWindow", "帮助"))
self.actionAbout.setText(_translate("MainWindow", "About"))
self.actionIFS.setText(_translate("MainWindow", "IFS"))
self.actionCRM.setText(_translate("MainWindow", "CRM"))
self.actionPAS.setText(_translate("MainWindow", "PAS"))
self.actionESS.setText(_translate("MainWindow", "ESS"))
self.actionOMS.setText(_translate("MainWindow", "OMS"))
self.actionIMC.setText(_translate("MainWindow", "IMC"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionQuit.setText(_translate("MainWindow", "Quit"))
self.actionExcel.setText(_translate("MainWindow", "Excel拆分"))
self.actionword.setText(_translate("MainWindow", "word"))
|
[
"510809889@qq.com"
] |
510809889@qq.com
|
8ef7a09f3a19933bcc30991ae03e1390bdadf03b
|
8728634f466d92d1ff5b80fb360f3b169ba7f4ce
|
/dictionary_exm2.py
|
b9127b4b1b201c2fa1d794336c46dc68faf14b24
|
[] |
no_license
|
raj13aug/python-refresher-nataraj
|
b385a402eed8ce07895b2bc44875e3e7d85969be
|
e3da2574e7b412c34435ac3586e5f791250e6237
|
refs/heads/master
| 2023-04-26T00:24:13.033546
| 2021-06-07T13:51:33
| 2021-06-07T13:51:33
| 365,724,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
my_friend = {
"jose": {"last_seen": 6},
"anne":6
}
print(my_friend["jose"]["last_seen"])
|
[
"noreply@github.com"
] |
raj13aug.noreply@github.com
|
fbb0912e91ce99d03fdd24a6b432f951b473b3a7
|
740ec70e2374d0743e92e0149cbca79e02e0f221
|
/powcoin/data.py
|
0bd6613335f23ac8183c79cd5ff34e38782531c7
|
[] |
no_license
|
jason-me/BUIDL-Week1
|
b0402d1b4299047fc4b7435df82e71f0986934e5
|
c4358c052b4e5a920a4f49a596aa9ae2ebe885bf
|
refs/heads/master
| 2022-11-26T03:29:34.664295
| 2019-09-15T00:06:51
| 2019-09-15T00:06:51
| 156,129,889
| 1
| 0
| null | 2022-11-22T03:17:56
| 2018-11-04T22:08:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,412
|
py
|
from powcoin import *
from pprint import pprint
import identities as ids
from copy import deepcopy
node = Node("x")
alice_node = Node("x")
bob_node = Node("x")
def send_tx(n, sender_private_key, recipient_public_key, amount):
utxos = n.fetch_utxos(sender_private_key.get_verifying_key())
return prepare_simple_tx(utxos, sender_private_key, recipient_public_key, amount)
#################################
# Alice mines the genesis block #
#################################
mined_genesis_block = None
# FIXME HACK
print("Genesis mined")
for n in [node, alice_node, bob_node]:
mined_genesis_block = mine_genesis_block(n, ids.alice_public_key)
print(mined_genesis_block)
print()
########################
# Bob mines next block #
########################
print("Bob mines first real block")
coinbase = prepare_coinbase(ids.bob_public_key, height=1)
alice_to_bob = send_tx(bob_node, ids.alice_private_key, ids.bob_public_key, 10)
unmined_block = Block(txns=[coinbase, alice_to_bob],
prev_id=mined_genesis_block.id)
first_mined_block = mine_block(unmined_block)
node.handle_block(first_mined_block)
alice_node.handle_block(first_mined_block)
bob_node.handle_block(first_mined_block)
print(first_mined_block)
print()
###################################################################
# Bob and Alice both mine next block. Node discover's Bob's first #
###################################################################
# Bob's
print("Bob's first fork block:")
coinbase = prepare_coinbase(ids.bob_public_key, height=2)
alice_to_bob = send_tx(bob_node, ids.alice_private_key, ids.bob_public_key, 10)
unmined_block = Block(txns=[coinbase, alice_to_bob],
prev_id=first_mined_block.id)
bob_fork_block = mine_block(unmined_block)
node.handle_block(bob_fork_block)
bob_node.handle_block(bob_fork_block)
print(bob_fork_block)
print()
# Alice's
print("Alice's first fork block:")
coinbase = prepare_coinbase(ids.alice_public_key, height=2)
bob_to_alice = send_tx(alice_node, ids.bob_private_key, ids.alice_public_key, 10)
unmined_block = Block(txns=[coinbase, bob_to_alice],
prev_id=first_mined_block.id)
alice_fork_block = mine_block(unmined_block)
node.handle_block(alice_fork_block)
alice_node.handle_block(alice_fork_block)
print(alice_fork_block)
print()
assert node.chain == [
mined_genesis_block,
first_mined_block,
bob_fork_block,
]
#################################################
# Alice triggers reorg attempt with a bad block #
#################################################
old_chain = deepcopy(node.chain)
old_branches = deepcopy(node.branches)
print("Alice's bad second fork block:")
coinbase = prepare_coinbase(ids.alice_public_key, height=3)
bob_to_alice = send_tx(alice_node, ids.bob_private_key, ids.alice_public_key, 10)
# FIXME: badsignatureerror is bad example b/c this can be checked w/o utxo db
bob_to_alice.tx_outs[0].amount = 100000
unmined_block = Block(txns=[coinbase, bob_to_alice],
prev_id=alice_fork_block.id)
alice_second_fork_block = mine_block(unmined_block)
node.handle_block(alice_second_fork_block)
print(alice_second_fork_block)
print()
# Assert chain is unchanged
assert node.chain == [
mined_genesis_block,
first_mined_block,
bob_fork_block,
]
assert node.chain == old_chain
assert node.branches == old_branches
###################################################################
# Again, they both mine next block. Node discover's Alice's first #
###################################################################
# Alice's
print("Alice's second fork block:")
coinbase = prepare_coinbase(ids.alice_public_key, height=3)
bob_to_alice = send_tx(alice_node, ids.bob_private_key, ids.alice_public_key, 10)
unmined_block = Block(txns=[coinbase, bob_to_alice],
prev_id=alice_fork_block.id)
alice_second_fork_block = mine_block(unmined_block)
node.handle_block(alice_second_fork_block)
alice_node.handle_block(alice_second_fork_block)
print(alice_second_fork_block)
print()
expected = [
mined_genesis_block,
first_mined_block,
alice_fork_block,
alice_second_fork_block,
]
assert node.chain == expected
# Bob's
print("Bob's second fork block:")
coinbase = prepare_coinbase(ids.bob_public_key, height=3)
alice_to_bob = send_tx(bob_node, ids.alice_private_key, ids.bob_public_key, 10)
unmined_block = Block(txns=[coinbase, alice_to_bob],
prev_id=bob_fork_block.id)
bob_second_fork_block = mine_block(unmined_block)
node.handle_block(bob_second_fork_block)
bob_node.handle_block(bob_second_fork_block)
print(bob_second_fork_block)
print()
expected = [
mined_genesis_block,
first_mined_block,
alice_fork_block,
alice_second_fork_block,
]
assert node.chain == expected
#################################
# Alice attempts a double-spend #
#################################
print("Alice's double-spend:")
# Collect initial data
alice_starting_balance = node.fetch_balance(ids.alice_public_key)
starting_chain_height = len(node.chain) - 1
# Attempt the double-spend
coinbase = prepare_coinbase(ids.alice_public_key, height=4)
# `alice_to_bob` has already been mined!
unmined_block = Block(txns=[coinbase, alice_to_bob],
prev_id=alice_second_fork_block.id)
alice_double_spend_block = mine_block(unmined_block)
try:
node.handle_block(alice_double_spend_block)
except:
print("error raised attempting double spend")
# Collect final data
alice_ending_balance = node.fetch_balance(ids.alice_public_key)
ending_chain_height = len(node.chain) - 1
# Assert that the block wasn't accepted, Alice's balance didn't change
assert alice_starting_balance == alice_ending_balance
assert starting_chain_height == ending_chain_height
####################
# Test the mempool #
####################
print()
print("Testing mempool")
print()
alice_to_bob = send_tx(node, ids.alice_private_key, ids.bob_public_key, 20)
node.handle_tx(alice_to_bob)
assert alice_to_bob in node.mempool
node.handle_tx(alice_to_bob)
assert alice_to_bob in node.mempool
coinbase = prepare_coinbase(ids.bob_public_key, height=4)
unmined_block = Block(txns=[coinbase, alice_to_bob],
prev_id=alice_second_fork_block.id)
alice_third_block = mine_block(unmined_block)
node.handle_block(alice_third_block)
assert alice_to_bob not in node.mempool
|
[
"jamoen7@gmail.com"
] |
jamoen7@gmail.com
|
d0ac9a0df121380d9572471ed8141c2ebd36aa74
|
beb72fa812b9751bf4a1ae748dcb710fefb622da
|
/python_work/venv/Scripts/pip3-script.py
|
123f6da8d04f4cc12fa378809362a9a6a13313d5
|
[] |
no_license
|
lustudent/Python
|
9c5745557c31679f8d5dcfa237d11847b950a011
|
a368766adb04650f3d07a39f62476f841f569889
|
refs/heads/master
| 2020-05-17T21:35:29.386159
| 2018-05-02T08:42:47
| 2018-05-02T10:06:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
#!G:\python_work\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
|
[
"542487083@qq.com"
] |
542487083@qq.com
|
bbc6b93f4afd37cade8251c3dc9fdba0cc7d8962
|
01d24f7494981b278de61e0f76e9893c15680052
|
/manage.py
|
da660dfdbf25c0cb5fa9fcee293944744dc7dcca
|
[] |
no_license
|
mamurjon/mydj_bot
|
66cc34defa30f4963d0bef5412af4e25a01c35c4
|
bf2de591492274950761c8ffa219f1cfd766f2b1
|
refs/heads/master
| 2022-12-21T14:33:20.394960
| 2017-05-27T12:56:35
| 2017-05-27T12:56:35
| 92,585,790
| 0
| 0
| null | 2022-12-07T23:56:18
| 2017-05-27T08:52:16
|
Python
|
UTF-8
|
Python
| false
| false
| 806
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mydj_bot.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"m.kodirov@student.inha.uz"
] |
m.kodirov@student.inha.uz
|
48319a3066b2839a18b404a5c7367571de80acd0
|
8bf20553b6b4bac94fb43057bc271ab7b0a6d8cd
|
/confidence_classifier/models/gan.py
|
ae30a9eb51b82557eb7c993ea89692499fd945d5
|
[] |
no_license
|
hrvojebusic/ms_thesis
|
10bfd9fe625a1d645d5176e3d2f743a1bd84baf0
|
3d58276942e929efb1bd7397aaf0935474954632
|
refs/heads/master
| 2020-06-19T13:31:14.285160
| 2019-07-16T19:13:55
| 2019-07-16T19:13:55
| 196,726,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,853
|
py
|
# Reference code is https://github.com/pytorch/examples/blob/master/dcgan/main.py
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class _netD(nn.Module):
def __init__(self, ngpu, nc, ndf):
super(_netD, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input size. (nc) x 32 x 32
nn.Conv2d(nc, ndf * 2, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1)
class _netG(nn.Module):
def __init__(self, ngpu, nz, ngf, nc):
super(_netG, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, nc, 4, 2, 1, bias=False),
nn.Sigmoid()
# state size. (nc) x 32 x 32
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
def Generator(n_gpu, nz, ngf, nc):
model = _netG(n_gpu, nz, ngf, nc)
model.apply(weights_init)
return model
def Discriminator(n_gpu, nc, ndf):
model = _netD(n_gpu, nc, ndf)
model.apply(weights_init)
return model
|
[
"hrv.busic@gmail.com"
] |
hrv.busic@gmail.com
|
7e1cdc31b9825a8badb2b995852760e6fc516f62
|
b3f5c9941447b958ea1cfa69eb84f1866791dc52
|
/2-convolutional-neural-networks/intro-to-tensorflow/mini-batch/batches.py
|
317f950a989e3fe26f4694c21da62b4c8eaf7034
|
[
"MIT"
] |
permissive
|
ivan-magda/deep-learning-foundation
|
b8704941601cd578b1b0589a219ecc85df0107f2
|
05a0df56c8223547bd7e8b62653a67f265c8e5ca
|
refs/heads/master
| 2023-03-27T09:56:50.785186
| 2021-03-30T23:28:11
| 2021-03-30T23:28:11
| 82,394,012
| 3
| 0
|
MIT
| 2021-03-30T23:28:12
| 2017-02-18T14:57:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
from pprint import pprint
import math
def batches(batch_size, features, labels):
"""
Create batches of features and labels
:param batch_size: The batch size
:param features: List of features
:param labels: List of labels
:return: Batches of (Features, Labels)
"""
assert len(features) == len(labels)
# TODO: Implement batching
outout_batches = []
sample_size = len(features)
for start_i in range(0, sample_size, batch_size):
end_i = start_i + batch_size
batch = [features[start_i:end_i], labels[start_i:end_i]]
outout_batches.append(batch)
return outout_batches
# 4 Samples of features
example_features = [
['F11','F12','F13','F14'],
['F21','F22','F23','F24'],
['F31','F32','F33','F34'],
['F41','F42','F43','F44']]
# 4 Samples of labels
example_labels = [
['L11','L12'],
['L21','L22'],
['L31','L32'],
['L41','L42']]
# PPrint prints data structures like 2d arrays, so they are easier to read
pprint(batches(3, example_features, example_labels))
|
[
"imagda15@gmail.com"
] |
imagda15@gmail.com
|
587bbd0c59a5546f193f2c2687af0a168361196a
|
cb8338aa7144633caefa4a5cab5846e6edb75948
|
/Entrega 7/MatmulD.py
|
a804ad271613e5cd319e06e39a4a66781c359e86
|
[] |
no_license
|
jtcastellani/MCOC2020-P0
|
58fcb35ad6f8b70b9027ca3d7303cc94e27adb60
|
6a6d0f6dbef075cbd9cca3c1a84401bea44f75c4
|
refs/heads/master
| 2022-12-06T14:17:58.867090
| 2020-08-22T03:47:19
| 2020-08-22T03:47:19
| 284,736,070
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
from time import perf_counter
import numpy as np
from numpy import double
from scipy.linalg import solve as spsolve, inv as spinv
from scipy.sparse.linalg import inv as SparseInv, spsolve as SparseSolve
from scipy.sparse import csr_matrix as disp
def mlp(N, dtype=double): #script para crear la matriz laplaciana obtenida del foro entrega GOLF
matriz = np.zeros((N,N),dtype=dtype)
np.fill_diagonal(matriz,2)
for i in range(N):
for j in range(N):
if i+1 == j or i-1 == j:
matriz[i][j] = -1
return(matriz)
Ns = [ #tamaño de las matrices
2, 5, 10,
20, 40, 60,
100,
160, 250,
350, 500, 1000, 2000,
3000, 5000, 8000,
12000]
for e in range(5):
Te = []
Ts = []
name = (f"MatmulD{e}.txt")
fid = open(name,"w")
for i in Ns:
print(f"i = {i}")
t1 = perf_counter()
A = disp(mlp(i))
B = disp(mlp(i))
t2 = perf_counter()
C = A@B
t3 = perf_counter()
ens = t2 - t1
sol = t3 - t2
Te.append(ens)
Ts.append(sol)
fid.write(f"{i} {ens} {sol}\n")
fid.flush()
fid.close()
|
[
"noreply@github.com"
] |
jtcastellani.noreply@github.com
|
2a8f5e13d4ba4e54f3d64ea74a7efc6e1b8f2818
|
d383e171a136d0999551684fbfa8092c61c066f4
|
/curriculum design/CommandLine.py
|
6034cfa369bd7f6e3b95847fb65cc5ad0a67a866
|
[] |
no_license
|
EdSP29/OperatingSystem
|
54e253b1c94b56ad285f24b93c3f32692767bee8
|
33fbfab3f56841c6de1ab5d1284dba8f657a9c5b
|
refs/heads/master
| 2020-06-11T03:40:43.489614
| 2019-06-25T07:22:01
| 2019-06-25T07:22:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
from os import system
from Kernel import my_kernel
class CommandLine:
"""
用于用户交互的命令行界面
"""
def __init__(self):
self.user = CommandLine.login_in()
# todo 当前工作目录初始化
self._current_directory = '/'
def parse_user_input(self, user_input):
"""
处理用户输入
"""
# 对命令进行切割
command_list = user_input.split(' ')
# 解析命令第一个参数
if command_list[0] == 'ls':
try:
# 用户输入了路径参数 和 没有输入(当前路径)两种情况
print(my_kernel.read_directory_or_file(
command_list[1] if len(command_list) > 1 else self._current_directory))
except FileNotFoundError:
print('路径错误')
@staticmethod
def login_in():
"""
登录
"""
# todo 在这,会通过内核访问/etc/users文件,若无,则需要创建root账户 暂且略过
tem_login_test = {'root': "123456"}
while True:
user = input('账户\n')
psw = input('密码\n')
try:
if tem_login_test[user] == psw:
system('cls')
return user
except:
print('账户或密码错误')
def get_user_input():
system('cls')
ui = CommandLine()
start_of_line = ui.user + ':$ '
if ui.user == 'root':
start_of_line = ui.user + ':# '
while True:
user_input = input(start_of_line)
ui.parse_user_input(user_input)
# 当用户使用exit命令退出时,要考虑内核是否需要shutdown
|
[
"757320383@qq.com"
] |
757320383@qq.com
|
fdb341f2db343d61ad77718b2804d85d91577ffa
|
af5d36407ac920a6ac1e826c70fe3acafd53948b
|
/tests/python/unittest/test_auto_scheduler_common.py
|
078e1ae8e854db6bbbe38427288866d5e91283a8
|
[
"Apache-2.0",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
ZihengJiang/tvm-hago
|
e91f2ac46c9f34e981af6d443cc66459d6715fe9
|
6e71860aeb502de2366ff6c39f513f16dbaf0924
|
refs/heads/hago
| 2023-08-12T19:20:32.444311
| 2021-09-28T17:42:21
| 2021-09-28T17:42:21
| 288,336,264
| 4
| 1
|
Apache-2.0
| 2021-09-28T17:42:22
| 2020-08-18T02:36:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,513
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common functions for auto_scheduler test cases"""
import threading
from tvm import te, auto_scheduler
import topi
@auto_scheduler.register_workload
def matmul_auto_scheduler_test(N, M, K):
A = te.placeholder((N, K), name='A')
B = te.placeholder((K, M), name='B')
k = te.reduce_axis((0, K), name='k')
C = te.compute((N, M), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name='C')
return [A, B, C]
@auto_scheduler.register_workload("matmul_auto_scheduler_test_rename_1")
def matmul_auto_scheduler_test_rename_0(N, M, K):
A = te.placeholder((N, K), name='A')
B = te.placeholder((K, M), name='B')
k = te.reduce_axis((0, K), name='k')
C = te.compute((N, M), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name='C')
return [A, B, C]
def conv2d_nchw_bn_relu(N, H, W, CI, CO, kernel_size, strides, padding, dilation=1):
data = te.placeholder((N, CI, H, W), name='Data')
kernel = te.placeholder((CO, CI, kernel_size, kernel_size), name='Kernel')
bias = te.placeholder((CO, 1, 1), name='Bias')
bn_scale = te.placeholder((CO, 1, 1), name='Bn_scale')
bn_offset = te.placeholder((CO, 1, 1), name='Bn_offset')
OH = (H + 2 * padding - (kernel_size - 1) * dilation - 1) // strides + 1
OW = (W + 2 * padding - (kernel_size - 1) * dilation - 1) // strides + 1
conv = topi.nn.conv2d_nchw(data, kernel, strides, padding, dilation)
conv = te.compute((N, CO, OH, OW),
lambda i, j, k, l: conv[i, j, k, l] + bias[j, 0, 0],
name='Bias_add')
conv = te.compute((N, CO, OH, OW),
lambda i, j, k, l: conv[i, j, k, l] * bn_scale[j, 0, 0],
name='Bn_mul')
conv = te.compute((N, CO, OH, OW),
lambda i, j, k, l: conv[i, j, k, l] + bn_offset[j, 0, 0],
name='Bn_add')
out = topi.nn.relu(conv)
return [data, kernel, bias, bn_offset, bn_scale, out]
def get_tiled_matmul():
A, B, C = matmul_auto_scheduler_test(512, 512, 512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
its0 = s0.split(C, s0[C].iters[0], [4, 8, 8])
its1 = s0.split(C, s0[C].iters[4], [8, 4, 4])
s0.reorder(C, [its0[0], its1[0], its0[1], its1[1], its0[2], its1[2], its0[3], its1[3],
s0[C].iters[8]])
return dag, s0
class PropagatingThread(threading.Thread):
def run(self):
self.exc = None
try:
self.ret = self._target(*self._args, **self._kwargs)
except BaseException as e:
self.exc = e
def join(self):
super(PropagatingThread, self).join()
if self.exc:
raise self.exc
return self.ret
|
[
"noreply@github.com"
] |
ZihengJiang.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.