blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32dd6e795e4e8c7656c208ec826cb9d894bcef01
|
ad03fdcabdd4ab333bdfd12f6c41c3d13353df63
|
/newsfeed/migrations/0004_configuration_total_votes.py
|
718f3e7e358c74413b36cbe896ab0ef1c9aee695
|
[] |
no_license
|
ReEnTrust/mediationtool
|
109aefa8354af07568d4b5ab6251c88eab27f925
|
77207ed30c054456e904d1a1ecd3d81baf718b36
|
refs/heads/main
| 2023-04-19T10:08:50.629107
| 2021-03-14T15:10:19
| 2021-03-14T15:10:19
| 364,901,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# Generated by Django 3.0.4 on 2021-01-08 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsfeed', '0003_auto_20210108_1426'),
]
operations = [
migrations.AddField(
model_name='configuration',
name='total_votes',
field=models.IntegerField(default=0),
),
]
|
[
"blegaste@ed.ac.uk"
] |
blegaste@ed.ac.uk
|
b86f82fa64afbad4e2ec93ee5e6511bee3b549c7
|
8819b3e55756bf6e53bab13714e1e25c887e93bb
|
/pysonic.py
|
3624f1037ac25598b61741de10d628f026b0c544
|
[] |
no_license
|
saibotd/couchpytato
|
c8f049ed9936d3eda537c45642cb972d57176cf4
|
f7146d9cacdb93426e65bbe9a30cb9ec50088320
|
refs/heads/master
| 2021-01-22T04:33:42.223304
| 2013-07-17T16:47:26
| 2013-07-17T16:47:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
import pySonic, time
class Music:
def __init__(self):
self.music = pySonic.Source()
self.playing = False
self.paused = False
def isplaying(self):
return self.music.IsPlaying()
def play(self, name=None):
ok = True
if self.paused:
self.music.Play()
self.playing = True
self.paused = False
elif name:
try:
self.music.Sound = pySonic.FileStream(name)
except:
ok = False
if ok:
self.music.Play()
self.playing = True
self.paused = False
else:
ok = False
return ok
def pause(self):
if self.isplaying():
self.music.Pause()
self.playing = False
self.paused = True
def time(self, what=0):
if self.isplaying():
secs = int(self.music.CurrentTime)
tim = time.localtime(secs)
min = str(tim[4])
sec = str(tim[5])
if len(min) == 1:
min = '0' + min
if len(sec) == 1:
sec = '0' + sec
return min + ':' + sec
else:
return None
def stop(self):
if self.isplaying():
self.music.Stop()
self.playing = 0
|
[
"tobi@saibotd.com"
] |
tobi@saibotd.com
|
c3a0d221d0881ea417f3e5b03fd1a8fe558c52c1
|
632d58b9f7ae470d9ec2b0e88af0aa8054dfa40e
|
/src/ryzom_django/management/commands/ryzom_bundle.py
|
48c255b344ea621534b03d56660dbf76563dd28f
|
[] |
no_license
|
yourlabs/ryzom
|
8d06bf829ee9d31d33fa9353fdf187241c82b6ef
|
425859e2de30c3b939756a23a064fb1affe04b02
|
refs/heads/master
| 2023-05-13T10:27:09.766272
| 2023-05-02T14:49:25
| 2023-05-02T14:49:25
| 192,992,635
| 5
| 1
| null | 2022-10-11T20:19:52
| 2019-06-20T22:03:37
|
Python
|
UTF-8
|
Python
| false
| false
| 873
|
py
|
import os
from django.core.management.base import BaseCommand, CommandError
from ryzom_django import bundle
class Command(BaseCommand):
help = 'Write JS & CSS bundles to ryzom_django/static/bundle.*'
def handle(self, *args, **options):
static_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'..',
'static',
)
)
if not os.path.exists(static_path):
os.makedirs(static_path)
with open(f'{static_path}/bundle.js', 'w+') as f:
f.write(bundle.js())
with open(f'{static_path}/bundle.css', 'w+') as f:
f.write(bundle.css())
self.stdout.write(self.style.SUCCESS(f'Successfully wrote {static_path}/bundle.*'))
self.stdout.write('Do not forget to collectstatic!')
|
[
"jpic@yourlabs.org"
] |
jpic@yourlabs.org
|
69479901b7cfdb541375dc320f6e72740a4e772b
|
e7a0ed2c4752253a87ff74bad6761165a37e834b
|
/BellmanFord.py
|
f861af60616a1e7b182af0abd57db1815c5610c6
|
[] |
no_license
|
ilius/Algorithms-Python
|
09ac02ff6010e882775d22824940f22185a768c5
|
1d7d44859650ab9d36e6316c39a5d1400b3d3bc1
|
refs/heads/master
| 2021-01-17T14:27:55.846495
| 2013-11-19T20:18:30
| 2013-11-19T20:18:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
import GraphLib
import DirectedEdge, ShortestPath
# negative cycle
with open('tinyEWDnc.txt', 'r') as f:
V = int(f.readline().strip())
E = int(f.readline().strip())
text = f.read()
f.close()
Gnc = GraphLib.EdgeWeightedDigraph(V)
lines = text.split('\n')
for line in lines[:-1]: # last line is empty
l = line.split()
v = int(l[0])
w = int(l[1])
weight = float(l[2])
Gnc.addEdge(DirectedEdge.DEdge(v, w, weight))
# negative weight
with open('tinyEWDn.txt', 'r') as f:
V = int(f.readline().strip())
E = int(f.readline().strip())
text = f.read()
f.close()
Gn = GraphLib.EdgeWeightedDigraph(V)
lines = text.split('\n')
for line in lines[:-1]: # last line is empty
l = line.split()
v = int(l[0])
w = int(l[1])
weight = float(l[2])
Gn.addEdge(DirectedEdge.DEdge(v, w, weight))
bn = ShortestPath.BellmanFord(Gn, 0)
bn.hasNegativeCycle()
bn.hasPathTo(6)
bn.pathTo(6)
bn.distTo(6)
|
[
"leslieklein@comcast.net"
] |
leslieklein@comcast.net
|
4dd83c3d16e32174d0bbaec60784760de259507b
|
f969727f9bfce4984fde818b69129542b03f4bb0
|
/intro/53_validTime.py
|
e04dd241853e23a35c1c594d5e226a3d5359837b
|
[] |
no_license
|
big-ssk/CodeSignalPython
|
dc83476c2fa9c0f43a3fa22fb3507467a705cbeb
|
a9b3e8c66fd4dc6a595e058b1928153d466ecd66
|
refs/heads/master
| 2023-01-07T22:44:41.056283
| 2020-11-10T15:52:06
| 2020-11-10T15:52:06
| 297,742,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
def validTime(time):
hours, minutes = time.split(':')
return 0 <= int(hours) < 24 and 0 <= int(minutes) < 60
|
[
"noreply@github.com"
] |
big-ssk.noreply@github.com
|
9af0f22d01f8392e47d62ef608e0396880a6a4dc
|
9d7905e71a4abda02000fc363fb82a16fb1e3bd0
|
/control/actuator_properties.py
|
2ed9690ff6428c5352b73dabe662fe570d823403
|
[] |
no_license
|
gaffarelj/DSE-Mars-SRV
|
088a5cb111f6036be540b7e8defd8e95e9a3fd5a
|
6a70d239e4cba091e24d4423e2c10db1ffe033af
|
refs/heads/master
| 2023-01-30T08:45:16.244930
| 2020-12-16T21:33:56
| 2020-12-16T21:33:56
| 257,545,459
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,970
|
py
|
import numpy as np
from matplotlib import pyplot as plt
#vehicle constants
length_body = 14.01
length = 14.01 + 3.6
body_radius = 7.61 / 2
capsule_radius_bottom = 2.3
capsule_radius_top = 1.4
x_cg = 0.
z_cg = 0.
z_cg_full = 5.276 #m
z_cg_empty = 9.238 #m
z_cg_orbit = 9.007
z_cg_end_ascent = 6.120
x_body_side = body_radius
x_capsule_bottomside = capsule_radius_bottom
x_capsule_topside = capsule_radius_top
y_bdoy_side = body_radius
y_capsule_bottomside = capsule_radius_bottom
y_capsule_topside = capsule_radius_top
z_body_top = length_body
z_capsule_bottom = z_body_top + 1.2
z_capsule_top = z_capsule_bottom + 2.7
Ix = 2875350.278 #kg/m^2
Iz = 306700.3372 #kg/m^2
Iy = 2875350.278 #kg/m^2
#RCS propellant properties
Isp = 140 #bi liquid, LCH4
Isp_mono = 140 #mono liquid H2O2
g = 9.80665
#engines
nx = 6
ny = 6
nz = 4
def thruster_arms(z_cg):
#vehicle constants
length_body = 14.01
length = 14.01 + 3.6
body_radius = 7.61 / 2
capsule_radius_bottom = 2.3
capsule_radius_top = 1.4
x_cg = 0.
y_cg = 0.
y_cg_full = 7.7085 #m
y_cg_empty = 10.0344 #m
y_cg_orbit = 6.62390
x_body_side = body_radius
x_capsule_bottomside = capsule_radius_bottom
x_capsule_topside = capsule_radius_top
z_body_side = body_radius
z_capsule_bottomside = capsule_radius_bottom
z_capsule_topside = capsule_radius_top
y_body_top = length_body
y_capsule_bottom = y_body_top + 1.2
y_capsule_top = y_capsule_bottom + 2.7
lx_bottom = x_body_side
lx_top = x_capsule_bottomside
lz_bottom = z_body_side
lz_top = z_capsule_bottomside
ly_bottom = y_cg - 1.
ly_top = y_cg_orbit * 2 - y_cg
return lx_bottom, lx_top, ly_bottom, ly_top, lz_bottom, lz_top
def RCS_torque_to_thrust(T,axis,cg,scenario):
lx_bottom, lx_top, ly_bottom, ly_top, lz_bottom, lz_top = thruster_arms(cg)
# n: number of thrusters to provide torque
if axis == 'x' or axis == 'z':
if scenario == 'normal':
n_bottom = 2
n_top = 2
thrust = T / (ly_bottom * n_bottom + ly_top * n_top)
elif scenario == 'error_bottom' or scenario == 'error_top':
n = 1
thrust = T / (lz_bottom * n)
elif scenario == 'failure':
n_bottom = 1
n_top = 1
thrust = T / (lz_bottom * n_bottom + lz_top * n_top)
elif axis == "y":
if scenario == 'normal':
n_bottom = 2
n_top = 2
thrust = T / (n_top * ly_top + n_bottom * ly_bottom)
elif scenario == 'error_bottom':
n = 1
thrust = T / (lx_bottom)
elif scenario == 'error_top':
n = 1
thrust = T / (lx_top)
elif scenario == 'failure':
n_bottom = 1
n_top = 1
thrust = T / (lz_bottom * n_bottom + lz_top * n_top)
return thrust
def RCS_displacement_to_thrust(F,axis,scenario):
if axis == "x" or axis == 'z':
if scenario == 'normal':
n_bottom = 1
n_top = 1
n = n_bottom + n_top
elif scenario == 'failure':
n_bottom = 1
n_top = 1
n = n_bottom + n_top
if axis == "y":
if scenario == 'normal':
n_bottom = 4
n_top = 0
n = n_bottom + n_top
if scenario == 'failure':
n_bottom = 2
n_top = 0
n = n_bottom + n_top
f = F / n
return f
def RCS_thrust_to_torque(f,axis,cg):
lx_bottom, lx_top, ly_bottom, ly_top, lz_bottom, lz_top = thruster_arms(cg)
if axis == 'x' or axis == 'z':
n_bottom = 2
n_top = 2
T = f * (lz_bottom * n_bottom + lz_top * n_top)
elif axis == 'y':
#bottom RCS
n_bottom = 2
n_top = 2
T = f * (n_top * ly_top + n_bottom * ly_bottom)
return T
def slew(thrust,tburn,slew_angle,I):
torque = RCS_thrust_to_torque(thrust,'z','normal')
spin_acc = torque / I
spin_rate = spin_acc * tburn
slew_time = slew_angle / spin_rate
return slew_time,torque
def thrust_error(f,cg,angle):
lx_bottom, lx_top, ly_bottom, ly_top, lz_bottom, lz_top = thruster_arms(cg)
T_error_x = np.sin(angle*np.pi/180) * ly_bottom * f
T_error_z = T_error_x
T_error_y = np.sin(angle*np.pi/180) * lx_bottom * f
return T_error_x, T_error_y, T_error_z
def RCSpropellant(f,t,Isp):
g = 9.80665
impulse = f * t
Mp = impulse / (Isp * g)
return Mp
|
[
"wiegerhelsdingen@gmail.com"
] |
wiegerhelsdingen@gmail.com
|
1f62074c0c85f84ac88700f413546240cba19622
|
ec78979fd8479e884ab93d723360744db5152134
|
/wechat_stat.py
|
e05254f8304d487894b38f59d8004251e12e30bd
|
[] |
no_license
|
xushubo/learn-python
|
49c5f4fab1ac0e06c91eaa6bd54159fd661de0b9
|
8cb6f0cc23d37011442a56f1c5a11f99b1179ce6
|
refs/heads/master
| 2021-01-19T17:00:05.247958
| 2017-09-03T03:22:28
| 2017-09-03T03:22:28
| 101,032,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
import itchat
from echarts import Echart, Legend, Pie
itchat.login() #登录
friends = itchat.get_friends(update=True)[0:] #获取好友列表
male = female = other = 0 #初始化计数器,男、女、不填的
for i in friends[1:]: #遍历好友列表,列表第一个是自己,所以从1开始计算 1表示男性,2女性
sex = i['Sex']
if sex == 1:
male +=1
elif sex == 2:
female += 1
else:
other += 1
total = len(friends[1:])
print('wechat好友总数:%d' % total)
print('男性好友: %.2f%%' % (float(male)/total*100))
print('女性好友: %.2f%%' % (float(female)/total*100))
print('其他: %.2f%%' % (float(other)/total*100))
'''
chart = Echart('%s的微信好友性别比例' % (friends[0]['NickName']), 'from WeChat')
chart.use(Pie('WeChat', [{'value': male, 'name': '男性 %.2f%%' % (float(male) / total * 100)}, {'value': female, 'name': '女性 %.2f%%' % (float(female) / total * 100)}, {'value': other, 'name': '其他 %.2f%%' % (float(other) / total * 100)}], radius=["50%", "70%"]))
chart.use(Legend(['male', 'female', 'other']))
del chart.json['xAxis']
del chart.json['yAxis']
chart.plot()
'''
|
[
"tmac523@163.com"
] |
tmac523@163.com
|
4ff054f06535f914bf1194cc99ced72fe1853799
|
36191115c3f91a1dadb675ba4f46611423c0e9d7
|
/telegram_unvoicer_bot/telegram/const.py
|
ac79ca8b37c47a7f9b691907dc78cd0019b54933
|
[
"Apache-2.0"
] |
permissive
|
nabokihms/telegram_unvoicer_bot
|
00805b9e89135fbbc77bbd7b5e28696bb3f34cb5
|
bdc75d8d4bd25d5914523e984c22f2ac05f022e1
|
refs/heads/master
| 2023-02-22T19:25:22.964360
| 2022-10-14T10:35:22
| 2022-10-14T10:35:22
| 133,100,125
| 5
| 0
|
Apache-2.0
| 2023-02-13T02:41:49
| 2018-05-12T00:16:01
|
Python
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
from os import environ
from typing import FrozenSet
TELEGRAM_BOT_API_KEY: str = environ['TELEGRAM_API_KEY']
TELEGRAM_BOT_API_URL_PREFIX: str = 'https://api.telegram.org'
TELEGRAM_BOT_API_URL: str = \
f'{TELEGRAM_BOT_API_URL_PREFIX}/bot{TELEGRAM_BOT_API_KEY}/'
TELEGRAM_BOT_FILE_PATH_API_URL: str = \
f'{TELEGRAM_BOT_API_URL_PREFIX}/file/bot{TELEGRAM_BOT_API_KEY}/'
TELEGRAM_MESSAGE_AUDIO_KEYS: FrozenSet[str] = frozenset(
('voice', 'audio', 'document')
)
|
[
"noreply@github.com"
] |
nabokihms.noreply@github.com
|
f9f9bdbea20f756b687e0c829b73e3f40f91236b
|
92dbbd758ec9c8ce0bfa2275cb1e3f0db5938f7d
|
/scripts/delta_scan-join_five.py
|
675afb5bc3c0131a7ca5d58a6fa11be53da061b0
|
[] |
no_license
|
RoslawSzaybo/bosons_on_lattice
|
cb5bdd0ded6d08f93faf1da410bc37939904ba4d
|
3c82c5fbd2b27d806526bd88d23b603a6b26dbc4
|
refs/heads/master
| 2020-03-25T11:39:41.477564
| 2018-11-29T18:26:23
| 2018-11-29T18:26:23
| 143,741,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,839
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 12:24:12 2018
"""
import numpy as np
import pickle
import sys
def delta(pkl):
return pkl[1]['delta']
zero = 1e-14
def the_same(a,b):
delta_a = delta(a)
delta_b = delta(b)
diff = abs(delta_b-delta_a)
return (diff < zero)
def repetitions(c):
repeted_idxes = []
for idx, elem in enumerate(c):
if idx == 0:
continue
if the_same(c[idx-1], elem):
repeted_idxes += [idx]
return repeted_idxes
def clean_repetitions(c):
to_del = repetitions(c)
print("Initial number of data points\t\t {dp}".format(dp=len(c)))
print("Number of elements to be removed\t {td}".format(td=len(to_del)))
for idx in reversed(to_del):
c.pop(idx)
print("Final number of data points\t\t {dp}".format(dp=len(c)))
return c
def main():
if len(sys.argv) != 6:
print("it joins four!!!")
print("it joins four!!!")
print("it joins four!!!")
print("I need name of a file as a command line argument! like this:")
print("$ python join.py A.pkl B.pkl C.pkl D.pkl E.pkl output.pkl")
sys.exit()
in_a = sys.argv[1]
in_b = sys.argv[2]
in_c = sys.argv[3]
in_d = sys.argv[4]
out = sys.argv[5]
print("="*80)
print("Join")
print("="*80)
print("Reading files in progress")
with open(in_a, 'rb') as g:
a = pickle.load(g)
with open(in_b, 'rb') as g:
b = pickle.load(g)
with open(in_c, 'rb') as g:
x = pickle.load(g)
with open(in_d, 'rb') as g:
y = pickle.load(g)
print("Files are open.")
c = sorted(a+b+x+y, key = delta)
print("Data in now united to a one, sorted file.")
d = clean_repetitions(c)
print("All repetitions are removed.")
with open(out, 'wb') as g:
pickle.dump(d,g)
print("Data is saved to {}".format(out))
return 0
if __name__ == '__main__':
main()
|
[
"pawel.wojcik5@gmail.com"
] |
pawel.wojcik5@gmail.com
|
f5d2f37e9340e9e1de69ab7c207e279391fe5bf5
|
5343fd379fc858be81c8993019201fe16e44599f
|
/Assignment4/data_loader.py
|
ad3559ba867e89fbf7ac5607be7721c36c73f772
|
[] |
no_license
|
FreddieSun/CS520_IntroToAI
|
d39a7d842108c189655b500365f4a480632bd6ee
|
f53a0fbb7a4431a7667c4894d6f36d324c1b79b4
|
refs/heads/master
| 2021-10-09T02:57:17.464117
| 2018-12-18T07:19:23
| 2018-12-18T07:19:23
| 150,920,609
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,705
|
py
|
import scipy.misc
from glob import glob
import numpy as np
class DataLoader():
def __init__(self, img_res=(128, 128)):
self.img_res = img_res
def load_train_batch(self, batch_size=1, is_testing=False):
path = glob('./datasets/train/*')
self.n_batches = int(len(path) / batch_size)
for i in range(self.n_batches):
batch = path[i*batch_size:(i+1)*batch_size]
imgs_A, imgs_B = [], []
for img in batch:
img = self.imread(img)
h, w, _ = img.shape
half_w = int(w/2)
img_A = img[:, :half_w, :]
img_B = img[:, half_w:, :]
img_A = scipy.misc.imresize(img_A, self.img_res)
img_B = scipy.misc.imresize(img_B, self.img_res)
if not is_testing and np.random.random() > 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.array(imgs_A)/127.5 - 1.
imgs_B = np.array(imgs_B)/127.5 - 1.
yield imgs_A, imgs_B
def load_test_batch(self, batch_size):
path = glob('./datasets/test/*' )
self.n_batches = int(len(path) / batch_size)
for i in range(self.n_batches):
batch = path[i*batch_size:(i+1)*batch_size]
imgs_A, imgs_B = [], []
for img in batch:
img = self.imread(img)
h, w, _ = img.shape
half_w = int(w/2)
img_A = img[:, :half_w, :]
img_B = img[:, half_w:, :]
img_A = scipy.misc.imresize(img_A, self.img_res)
img_B = scipy.misc.imresize(img_B, self.img_res)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.array(imgs_A)/127.5 - 1.
imgs_B = np.array(imgs_B)/127.5 - 1.
yield imgs_A, imgs_B
def load_self_test_batch(self, batch_size):
path = glob('./self_test/*')
self.n_batches = int(len(path) / batch_size)
for i in range(self.n_batches):
batch = path[i * batch_size:(i + 1) * batch_size]
imgs_A, imgs_B = [], []
for img in batch:
img = self.imread(img)
h, w, _ = img.shape
img_A = img[:, :w, :]
img_A = scipy.misc.imresize(img_A, self.img_res)
imgs_A.append(img_A)
imgs_A = np.array(imgs_A) / 127.5 - 1.
yield imgs_A
def imread(self, path):
return scipy.misc.imread(path, mode='RGB').astype(np.float)
|
[
"xl422@scarletmail.rutgers.edu"
] |
xl422@scarletmail.rutgers.edu
|
db68fe190b85a166a45fd651494dca56572f4e4d
|
a2793557adc64285f9965d25cefc4cea3cff8333
|
/env.py
|
322886ce58433d697bb194cf6602a6d1c3384a75
|
[] |
no_license
|
liuzhonghaolpp/H-DQN
|
bab3b6c4ea44640b473d2ddf2a7dbcc60e56b894
|
1e70d74424fc7679982db0372cce9bd9446970bb
|
refs/heads/master
| 2023-03-18T06:58:00.697140
| 2021-02-28T13:30:51
| 2021-02-28T13:30:51
| 336,790,849
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,735
|
py
|
import numpy as np
import torch
from gym import spaces, core
from gym.envs.classic_control import rendering
import entity
from utils import uav_mobility
import configparser
import time
config = configparser.RawConfigParser()
config.read('./paramaters.ini')
# 参数
MAX_AoI = 100
DURATION = int(config.get('simulation parameters', 'duration'))
IoT_COMMUNICATION_RANGE = int(config.get('simulation parameters', 'iot_communication_range'))
BS_COMMUNICATION_RAGE = int(config.get('simulation parameters', 'BS_communication_rage'))
class MyEnv(core.Env):
def __init__(self):
self.action_space = spaces.Box(low=-1, high=1, shape=(2,))
self.observation_space = spaces.Box(shape=(14, ))
self.num_sensors = 6
self.sensors = []
self.side_length = 1000 # 目标区域的边长
self.time = 0
self.get_reward = False
self.reward = 0
self.viewer = None
for i in range(self.num_sensors):
sensor = entity.Sensor(i)
self.sensors.append(sensor)
self.uav = entity.UAV()
self.BS = entity.BS()
def reset(self):
# 初始化sensor的位置(非随机初始化)
self.sensors[0].pos = np.array([250, 550])
self.sensors[1].pos = np.array([550, 150])
self.sensors[2].pos = np.array([750, 250])
self.sensors[3].pos = np.array([350, 850])
self.sensors[4].pos = np.array([550, 750])
self.sensors[5].pos = np.array([750, 950])
# 初始化UAV的位置和所携带数据AoI
self.uav.pos = np.array([500, 500])
self.uav.aoi = np.array([MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI])
# 初始化BS的AoI
self.BS.pos = np.array([500, 500])
self.BS.aoi = np.array([MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI])
# 初始化响应参数
self.time = 0
self.get_reward = False
self.reward = 0
obs = self._get_observation()
return obs
def step(self, action):
self.time += 1
self.get_reward = False
self.reward = 0
self.uav.action = action
# 更新UAV的位置
uav_mobility.get_next_pos(self.uav)
# 查看UAV是否在sensor的通信范围内,且未采集该设备数据,然后更新uav_aoi
self._update_uav_aoi()
# 查看UAV是否在BS通信范围内,且携带有未上传的数据,然后更新bs_aoi
self._update_bs_aoi()
done = self._get_done()
reward = self._get_reward()
obs = self._get_observation()
info = {}
return obs, reward, done, info
def render(self, mode='human'):
screen_width = 500 # 按比例缩小一下,1:2的比例
screen_height = 500
# 如果没有viewer,创建viewer和uav、landmarks
if self.viewer is None:
self.viewer = rendering.Viewer(screen_height, screen_width)
self.viewer.set_bounds(0, 500, 0, 500)
self.viewer.geoms.clear()
for sensor in self.sensors:
geom = rendering.make_circle(sensor.size)
geom.set_color(1, 0, 0)
geom_form = rendering.Transform(translation=(sensor.pos[0]/2, sensor.pos[1]/2))
geom.add_attr(geom_form)
self.viewer.add_geom(geom)
geom = rendering.make_circle(self.BS.size)
geom.set_color(0, 0, 1)
geom_form = rendering.Transform(translation=(self.BS.pos[0]/2, self.BS.pos[1]/2))
geom.add_attr(geom_form)
self.viewer.add_geom(geom)
geom = rendering.make_circle(self.uav.size)
geom.set_color(0, 1, 0)
geom_form = rendering.Transform(translation=(self.uav.pos[0]/2, self.uav.pos[1]/2))
geom.add_attr(geom_form)
self.viewer.add_geom(geom)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def _get_observation(self):
obs_uav = np.concatenate((self.uav.pos, self.uav.aoi), axis=0)
obs_bs = self.BS.aoi
obs = np.concatenate((obs_uav, obs_bs), axis=0)
return obs
def _get_done(self):
done = False
if self.uav.pos[0] < 0 or self.uav.pos[0] > 1000 or self.uav.pos[1] < 0 or self.uav.pos[1] > 1:
done = True
return done
def _get_reward(self):
if self.get_reward:
return self.reward
else:
return 0
def _update_uav_aoi(self):
for i, sensor in enumerate(self.sensors):
distance = np.sqrt(np.sum((sensor.pos - self.uav.pos)**2))
if distance <= IoT_COMMUNICATION_RANGE:
self.uav.aoi[i] = 1
else:
self.uav.aoi[i] += 1
def _update_bs_aoi(self):
distance = np.sqrt(np.sum((self.uav.pos - self.BS.pos)**2))
if distance <= BS_COMMUNICATION_RAGE:
self.get_reward = True
for i in range(len(self.BS.aoi)):
if self.BS.aoi[i] > self.uav.aoi[i]:
self.reward += self.BS.aoi[i] - self.uav.aoi[i]
self.BS.aoi[i] = min(self.BS.aoi[i], self.uav.aoi[i])
else:
for i in range(len(self.BS.aoi)):
self.BS.aoi[i] += 1
if __name__ == '__main__':
env = MyEnv()
obs = env.reset()
env.render()
while True:
action = env.action_space.sample()
obs, reward, done, _ = env.step(action)
print(obs)
print(reward)
print('---------------')
env.render()
time.sleep(4)
# 可以在未全部采集完时完成交付,也可以在全部采集完成后只进行一次交付。
# 动态环境,QoS函数的参数是动态变化的,即用户对应用的延时要求是动态变化的。
|
[
"384037404@qq.com"
] |
384037404@qq.com
|
c16aac2e875043d857d88fc4d33e2dd6def2bc57
|
0a24ca351b483e769c44c1651f839fe3fbf4c3e7
|
/vurapy/config/production_env.py
|
ccb6f823e7fe5e59ffc77e485828fca246c8d7f2
|
[
"MIT"
] |
permissive
|
crisboleda/vurapy
|
f7be49f2681f8b47f634a0bc27042ed451e6839b
|
12e35fb9373131181b1b8d4d5701fbbf4231dab8
|
refs/heads/master
| 2022-12-02T20:54:53.375384
| 2020-08-22T17:17:15
| 2020-08-22T17:17:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
from .base import *
from decouple import config
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default=['*'], cast=list)
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"cristhian.2002.0@gmail.com"
] |
cristhian.2002.0@gmail.com
|
b9dac58212d011f1c76f030f0631c55f20b3f02f
|
77ab593ed55a6d46b1778f6d41bc70ced3f8cd46
|
/face_ID_net/face_1024s/face_1024_vals.py
|
2b929cc2b67254c5a37f697a6093fc0d6f3d68f1
|
[] |
no_license
|
wosxcc/bot
|
e93b92fbca79a915feb186160f3f72c99218ffcb
|
c097f5455bc6264c9f778fb72900475963836153
|
refs/heads/master
| 2021-06-12T12:43:47.314071
| 2018-12-14T08:51:43
| 2018-12-14T08:51:43
| 128,619,488
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
import os
import cv2 as cv
import numpy as np
import random
import tensorflow as tf
from face_ID_net.face_1024s.ID_pb_net1024s import face_net
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
IMG_H=64
IMG_W =64
N_CLASSES =1024
learning_rate =0.001
def face_val(image_arr,run_train):
print('搞毛线啊')
log_dir = './face72/face_big1024/'
with tf.Graph().as_default():
graph = face_net(1, IMG_H,IMG_W, N_CLASSES,learning_rate,2,run_train)
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(log_dir)
if ckpt and ckpt.model_checkpoint_path:
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('没有保存的模型')
if run_train ==True:
pos_d,neg_d = sess.run([graph['d_pos'],graph['d_neg']],feed_dict={graph['x']: np.reshape(image_arr, (3, 64, 64, 3))})
return pos_d, neg_d
elif run_train ==False:
print('下面出错了',len(image_arr),image_arr[0].shape)
anchor_data = sess.run(graph['anchor_out'],feed_dict={graph['x']: np.reshape(image_arr, ( 1, 64, 64, 3))})
print('上面出错了')
return anchor_data
pacth = 'E:/faceID'
for i in range(10):
file = random.sample(os.listdir(pacth),1)[0]
while(1):
negative_file= random.sample(os.listdir(pacth),1)[0]
if negative_file!=file:
break
print(file,negative_file)
anchor_img = random.sample(os.listdir(pacth+'/'+file),1)[0]
while(1):
positive_img = random.sample(os.listdir(pacth+'/'+file),1)[0]
if anchor_img!=positive_img:
break
negative_img = random.sample(os.listdir(pacth+'/'+negative_file),1)[0]
img_anchor=cv.imread(pacth+'/'+file+'/'+anchor_img)
img_positive=cv.imread(pacth+'/'+file+'/'+positive_img)
img_negative=cv.imread(pacth+'/'+negative_file+'/'+negative_img)
sh_anchor=cv.resize(img_anchor,(240,240),interpolation=cv.INTER_CUBIC)
sh_positive=cv.resize(img_positive,(240,240),interpolation=cv.INTER_CUBIC)
sh_negative=cv.resize(img_negative,(240,240),interpolation=cv.INTER_CUBIC)
image_data=[]
image_data.append(cv.resize(img_anchor,(64,64),interpolation=cv.INTER_CUBIC))
image_data.append(cv.resize(img_negative,(64,64),interpolation=cv.INTER_CUBIC))
image_data.append(cv.resize(img_positive,(64,64),interpolation=cv.INTER_CUBIC))
image_data =np.array(image_data,dtype='float32')
image_data =(image_data-128.0)/256.0
anchor_score = face_val(image_data[0],False)
print(anchor_score)
pos_d,neg_d =face_val(image_data,True)
print(pos_d,neg_d)
cv.imshow('anchor', sh_anchor)
cv.imshow('positive', sh_positive)
cv.imshow('negative', sh_negative)
cv.waitKey()
cv.destroyAllWindows()
|
[
"821022156@qq.com"
] |
821022156@qq.com
|
fd2e4f64b8d23dc7435ca8f180adc95a3899a98b
|
3a37b6ce2c1c481f6aded64b2d0c4421f7db1210
|
/hpc_bin/wrf_postproc
|
eb26ac737814efe9bbbab6838f79c0a64670559b
|
[] |
no_license
|
dbreusch/pythonExamples
|
99c24dc1d28b8c3be3b4fadd30a05d8ae317b0c0
|
7b8c06208fefcadf918cb9517ac313535c4df010
|
refs/heads/master
| 2022-10-13T12:21:52.182752
| 2020-06-14T23:10:50
| 2020-06-14T23:10:50
| 265,911,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,217
|
#!/usr/bin/env python
# wrf_postproc
# run wrf_postproc.ncl on a specific file
# output goes to a directory matching yy/mm/dd of input file
# 03/30/17, dbr, removed hardcoded dependence on home directory name
import pdb, sys, os
nargin = len(sys.argv)
if nargin < 2:
print "Syntax: wrf_postproc input_fn ensemble expname model branch"
print " input_fn = name of wrfout file to be processed"
print " ens = ensemble"
print " expname = region name (gis, ant)"
print " model = model name (erai, cesmle)"
print " branch = time period (e.g., historical)"
sys.exit()
# get command line args
input_fn = sys.argv[1]
enss = sys.argv[2]
ens = int( enss )
expname = sys.argv[3]
model = sys.argv[4]
branch = sys.argv[5]
# get home directory
home = os.path.expanduser("~")
# ncl command
ncl_cmd = home+"/wrf/ncl/wrfout_postproc.ncl"
# split filename and gather yy/mm/dd info
dir_tmp = os.path.dirname( input_fn )
file_in = os.path.basename( input_fn )
file_yy = file_in[ 11:15 ]
file_mm = file_in[ 16:18 ]
file_dd = file_in[ 19:21 ]
# convert input dir_in to working dir_in
a = dir_tmp.split('/')
if len(a) < 3:
print "Input file name too short, missing parent directories"
sys.exit()
dir1 = a[-2]
dir2 = a[-1]
if ens > 0:
base = home+"/scratch/wrf/"+expname+"_"+model+"/"+branch+"/wrf/run/"+enss+"/"
else:
base = home+"/scratch/wrf/"+expname+"_"+model+"/"+branch+"/wrf/run/"
dir_in = base+dir1+"/"+dir2+"/"
# output names
if ens > 0:
dir_out = home+"/scratch/wrf/%s_%s/%s/wrf/postproc/%s/%s/%s%s%s/" % (expname, model, branch, enss, file_yy, file_yy, file_mm, file_dd)
else:
dir_out = home+"/scratch/wrf/%s_%s/%s/wrf/postproc/%s/%s%s%s/" % (expname, model, branch, file_yy, file_yy, file_mm, file_dd)
file_out = file_in+".nc"
ofn = dir_out+file_out
if os.path.exists( ofn ):
os.unlink( ofn )
if not os.path.exists( dir_out ):
try:
os.makedirs( dir_out )
except OSError:
pass
# print "Caught makedir of existing dir "+dir_out
arg1 = "'dir_in=\""+dir_in+"\"'"
arg2 = "'file_in=\""+file_in+"\"'"
arg3 = "'dir_out=\""+dir_out+"\"'"
arg4 = "'file_out=\""+file_out+"\"'"
cmd = "ncl "+arg1+" "+arg2+" "+arg3+" "+arg4+" "+ncl_cmd
#pdb.set_trace()
os.system( cmd )
|
[
"dbreusch@me.com"
] |
dbreusch@me.com
|
|
602f15f787ddf6f4abf25caf955202c918bb21a8
|
0bde172fb37280fa96758144dcec767990a89565
|
/run.py
|
6b7ebbdfe73be630d7affd8741dcdd2f91792392
|
[] |
no_license
|
JonatanMariscal/P2_E1
|
a809dfaf11e2dedf008b1e0264e596d4786a4f4d
|
04d4de2c91ac83859e7cce3d745024350c82db58
|
refs/heads/master
| 2022-06-17T02:14:45.405225
| 2020-05-10T19:47:10
| 2020-05-10T19:47:10
| 262,860,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,151
|
py
|
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
import SimpleITK as sitk
def resample_image(image, reference):
pixel_spacing = image.GetSpacing()
new_spacing = [old_sz * old_spc / new_sz for old_sz, old_spc, new_sz in
zip(image.GetSize(), pixel_spacing, reference.GetSize())]
image_resampled = sitk.Resample(image, reference.GetSize(), sitk.Transform(), sitk.sitkNearestNeighbor,
image.GetOrigin(), new_spacing,
image.GetDirection(), 0.0, image.GetPixelIDValue())
return image_resampled
# Register two images with same shape.
def register_images(image, reference):
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(reference, image.GetPixelID()),
image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=250)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkNearestNeighbor)
registration_method.SetOptimizerAsGradientDescent(learningRate=3.0, numberOfIterations=10000,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
final_transform = registration_method.Execute(sitk.Cast(reference, sitk.sitkFloat32),
sitk.Cast(image, sitk.sitkFloat32))
register = sitk.ResampleImageFilter()
register.SetReferenceImage(reference)
register.SetInterpolator(sitk.sitkNearestNeighbor)
register.SetTransform(final_transform)
ds_register = register.Execute(image)
return ds_register
def main():
def mode_selector():
status = selector.get()
if not status:
frame_alpha.tkraise()
selector.set(True)
else:
frame.tkraise()
selector.set(False)
def update_slice(self):
pos = slice_selector.get()
alpha = alpha_selector.get()
status = selector.get()
if not status:
axs[0].imshow(ds_array[pos,:,:], cmap=plt.cm.get_cmap(colormap.get()))
axs[1].imshow(phantom_array[pos,:,:], cmap=plt.cm.get_cmap(colormap.get()))
fig.canvas.draw_idle()
else:
ax.imshow(ds_array[pos, :, :], cmap=plt.cm.get_cmap(colormap.get()))
ax.imshow(phantom_array[pos, :, :], cmap=plt.cm.get_cmap("prism"), alpha=alpha/100)
fig2.canvas.draw_idle()
slice_pos = "Nº Slice: " + str(pos)
label_slice.config(text=slice_pos)
#Reading RM_Brain_3D-SPGR DICOM
path_dcm = "data/RM_Brain_3D-SPGR"
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(path_dcm)
reader.SetFileNames(dicom_names)
ds = reader.Execute()
#Reading phantom DICOM
ds_phantom = sitk.ReadImage('data/icbm_avg_152_t1_tal_nlin_symmetric_VI.dcm')
phantom_array = sitk.GetArrayFromImage(ds_phantom) # z, y, x
#Reading atlas DICOM
ds_atlas = sitk.ReadImage('data/AAL3_1mm.dcm')
# Resample Brain DICOM and atlas DICOM to phantom shape
ds_resample = resample_image(ds, ds_phantom)
ds_atlas_resample = resample_image(ds_atlas, ds_phantom)
# Register Brain DICOM and atlas DICOM with phantom
ds_atlas_register = register_images(ds_atlas_resample, ds_phantom)
atlas_array = sitk.GetArrayFromImage(ds_atlas_register) # z, y, x
ds_register = register_images(ds_resample, ds_phantom)
ds_array = sitk.GetArrayFromImage(ds_register) # z, y, x
# Creating window and frames
root = tk.Tk()
root.title("DICOM Image Display")
top_frame = tk.Frame() # frame with buttons and sliders
frame = tk.Frame() #frame with synchron visualizator
frame_alpha = tk.Frame() #frame with alpha visualizator
top_frame.grid(row = 0, column = 0, sticky = tk.W, columnspan=6)
frame.grid(row = 1,sticky="nsew", column = 0, columnspan=6)
frame_alpha.grid(row = 1,sticky="nsew", column = 0, columnspan=6)
frame.tkraise()
selector = tk.BooleanVar()
# Displaying images on synchron visualizator
fig, axs = plt.subplots(1,2, figsize=(15, 6), dpi=100, sharex=True, sharey=True)
axs = axs.ravel()
colormap = tk.StringVar()
colormap.set("bone")
axs[0].imshow(ds_array[0,:,:], cmap=plt.cm.get_cmap(colormap.get()))
axs[1].imshow(phantom_array[0,:,:], cmap=plt.cm.get_cmap(colormap.get()))
canvas = FigureCanvasTkAgg(fig, master=frame)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, expand=1)
toolbar = NavigationToolbar2Tk(canvas, frame)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, expand=1)
# Displaying images on alpha visualizator
fig2, ax = plt.subplots(1, figsize=(15, 6), dpi=100, sharex=True, sharey=True)
alpha = 0
ax.imshow(ds_array[0, :, :], cmap=plt.cm.get_cmap(colormap.get()))
ax.imshow(phantom_array[0, :, :], cmap=plt.cm.get_cmap("prism"), alpha=alpha/100)
canvas_alpha = FigureCanvasTkAgg(fig2, master=frame_alpha)
canvas_alpha.draw()
canvas_alpha.get_tk_widget().pack(side=tk.TOP, expand=1)
toolbar_alpha = NavigationToolbar2Tk(canvas_alpha, frame_alpha)
toolbar_alpha.update()
canvas_alpha.get_tk_widget().pack(side=tk.TOP, expand=1)
# Selecting slices
pos = 0
slice_selector = tk.Scale(top_frame, label="Slice selector", from_=0, to=ds_array.shape[0] - 1,
orient=tk.HORIZONTAL, length=400,
command=update_slice, tickinterval=20)
slice_selector.pack(side=tk.LEFT, anchor=tk.NW)
# Showing actual number of slice
label_slice = tk.Label(top_frame)
label_slice.pack(side=tk.TOP, anchor=tk.NW, before=slice_selector)
slice_pos = "Nº Slice: " + str(pos)
label_slice.config(text=slice_pos)
# Change between synchron and alhpa visualization
b = tk.Button(top_frame, text="Mode selector", command=mode_selector, width=10)
b.pack(side=tk.TOP)
# Selecting which percentage of alpha use for alpha visualization
alpha_selector = tk.Scale(top_frame, label="alpha value", from_=0, to=100,
orient=tk.HORIZONTAL, length=400,
command=update_slice, tickinterval=5)
alpha_selector.pack(side=tk.TOP)
root.mainloop()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
JonatanMariscal.noreply@github.com
|
b9504f50d0a006b0e32c812812e12c7e725481ff
|
50ec136d757fbd6033748ea0d0b084e1663fb4ff
|
/train.py
|
dd7a40588b03e4b8bea191ca677398465c207191
|
[] |
no_license
|
Tyler-Shamsuddoha/python-image-classifier-keras
|
2b67cc196f8f692e22b65f6505ad779c57db53bc
|
2e734bbb08290ff70a35e566114014762ff4bd06
|
refs/heads/master
| 2023-06-12T19:55:50.369444
| 2021-07-07T20:20:21
| 2021-07-07T20:20:21
| 353,856,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,729
|
py
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D
from tensorflow.keras.preprocessing.image import img_to_array, ImageDataGenerator
from imutils import paths
from tensorflow.keras.losses import categorical_crossentropy #cosine_proximity,
from tensorflow.keras.optimizers import Nadam, Adam
from tensorflow.keras.utils import plot_model
from sklearn.neural_network import MLPClassifier
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.applications import InceptionResNetV2
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import cv2
import numpy as np
import tensorflow as tf
import random
import os
import matplotlib.pyplot as plt
import sklearn
# Set random seeds to ensure the reproducible results
SEED = 309
np.random.seed(SEED)
random.seed(SEED)
tf.random.set_seed(SEED)
IMG_HEIGHT = 64
IMG_WIDTH = 64
INPUT_SHAPE = (64, 64)
# conv_base = InceptionResNetV2(weights = 'imagenet', include_top = False, input_shape = (IMG_HEIGHT,IMG_WIDTH,3))
train_dir = './Train_data'
test_dir = './test_data'
validation_images_dir = './Validation_Sets'
batch_size = 32
epoch = 100
def construct_model():
"""
Construct the CNN model.
***
Please add your model implementation here, and don't forget compile the model
E.g., model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
NOTE, You must include 'accuracy' in as one of your metrics, which will be used for marking later.
***
:return: model: the initial CNN model
"""
model = Sequential([
Conv2D(16, kernel_size=3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
MaxPooling2D(2, 2),
Conv2D(32, kernel_size=3, padding='same', activation='relu'),
MaxPooling2D(2, 2),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(2, 2),
Conv2D(128, 3, padding='same', activation='relu'),
Flatten(),
Dense(64, 'relu'),
Dropout(0.5),
Dense(3, activation='softmax')
])
model.compile(loss=categorical_crossentropy, optimizer= Nadam(), metrics=['accuracy'])
return model
# *************************************
# Building Model Using Transfer Learning
# model = Sequential([
# conv_base,
# Flatten(),
# Dense(512, 'relu'),
# Dense(3, activation='softmax')
# ])
# model.compile(loss=categorical_crossentropy, optimizer= Nadam(), metrics=['accuracy'])
# return model
def train_model(model):
"""
Train the CNN model
***
Please add your training implementation here, including pre-processing and training
***
:param model: the initial CNN model
:return:model: the trained CNN model
"""
print("Loading Images...")
validation_image_generator = ImageDataGenerator(rescale=1./255, rotation_range=40)
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
directory=validation_images_dir,
target_size=INPUT_SHAPE,
class_mode='categorical')
print("Loaded Validation Set Images Successfully\n")
train_image_generator = ImageDataGenerator(rescale=1./255, zoom_range=0.2, rotation_range=40)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=INPUT_SHAPE,
class_mode='categorical')
print("Loaded Training Images Successfully\n")
print("Starting training....\n")
model = construct_model()
filepath = 'model/newmodel111.h5'
model_checkpoint = ModelCheckpoint(filepath, monitor='val_loss', save_best_only= True, verbose=1, mode = 'min')
# early_stop = EarlyStopping(filepath, monitor='val_acc', mode='max', patience=5)
history = model.fit_generator(
train_data_gen,
steps_per_epoch= 3748/batch_size,
epochs= epoch,
validation_data= val_data_gen,
validation_steps= 562/batch_size,
callbacks = [model_checkpoint],
)
visualise_results(history)
return model
def save_model(model):
"""
Save the keras model for later evaluation
:param model: the trained CNN model
:return:
"""
# ***
# Please remove the comment to enable model save.
# However, it will overwrite the baseline model we provided.
# ***
model.save("model/new_model.h5")
print("Model Saved Successfully.")
def visualise_results(history):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
if __name__ == '__main__':
model = train_model(train_dir)
save_model(model)
|
[
"tylershamsuddoha@gmail.com"
] |
tylershamsuddoha@gmail.com
|
1bc7d1b8360e79b34496216b5177441ca38e9763
|
22a8a20397148dd4aef2fbd529121df8b91c918f
|
/sklearn/iris_classification.py
|
c1cf8b64148764486906cd0a30d043cd5a016d06
|
[] |
no_license
|
greenchapter/playground
|
87313d6926e21ee79c45114b0ff922948e35c601
|
033f300ccb8ef55dcbebb8197d5d1a8709fb258e
|
refs/heads/main
| 2022-06-01T08:57:16.896340
| 2022-05-13T18:09:45
| 2022-05-13T18:09:45
| 96,816,180
| 0
| 0
| null | 2022-05-13T18:09:46
| 2017-07-10T19:59:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
#
# Beispiel der Klassifikations von Iris-Blumen
#
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
# Lade den Iris-Datenset
data_train = pd.read_csv('./iris.csv')
# Die 3 zu erkennenden Klassifikationsklassen werden zu numerischen Werten 0, 1 bzw. 2 umgewandelt.
data_train.loc[data_train['species']=='Iris-setosa', 'species']=0
data_train.loc[data_train['species']=='Iris-versicolor', 'species']=1
data_train.loc[data_train['species']=='Iris-virginica', 'species']=2
data_train = data_train.apply(pd.to_numeric)
# Der eingelesene Datenset wird als Matrix dargestellt
data_train_array = data_train.to_numpy()
# Das Datenset wird in zwei separate Kategorie gespaltet: Testdaten und Trainingsdaten.
# 80% der Daten werden zum Trainieren und 20% zum Testen des Modells verwendet.
# Da es sich bei der Eingabe um einen Vektor handelt, werden wird den Großbuchstaben X benutzen;
# Für die Ausgabe hingegen handelt es sich um ein einzelner Werte,
# daher die Bezeichung mit dem Kleinbuchstaben y
X_train, X_test, y_train, y_test = train_test_split(data_train_array[:,:4],
data_train_array[:,4],
test_size=0.2)
# VERSION 1
# Ein neuronales Netz zur Klassifikation (MultiLayerPerceptron) wird mit folgenden Eigenschaften gebildet:
# einem Input-Layer mit 4 Neuronen, die die Merkmale der Iris-Planze repräsentieren;
# einem Hidden-Layer mit 10 Neuronen
# eime Output-Layer mit 4 Neuronen, die die zu erkennenden Klassen repräsentieren.
# Dabei wird als Aktivierungsfunktion relu und als Optimierer adam verwenden.
mlp = MLPClassifier(hidden_layer_sizes=(10,),activation='relu', solver='adam', max_iter=350, batch_size=10, verbose=True)
# VERSION 2
# In der zweiten Variante werden 2 Hidden-Layers mit jeweils 5 bzw. 3 Neuronen verwendet
# Dabei wird als Aktivierungsfunktion tanh und als Optimierer adam verwenden.
# mlp = MLPClassifier(hidden_layer_sizes=(5,3),activation='tanh', solver='adam', max_iter=500, batch_size=10, verbose=True)
# Das neuronale Netz wird mit den Trainingsdaten traniert
mlp.fit(X_train, y_train)
# Das Ergebnis des Training wird ausgegeben
print("Trainingsergebnis: %5.3f" % mlp.score(X_train, y_train))
# Das Modell wird mit den Testdatensdaten evaluiert
predictions = mlp.predict(X_test)
# und die Konfusionsmatrix ausgegeben
print(confusion_matrix(y_test,predictions))
# Aus der Konfusionsmatrix werden precison, recall und f1-score berechnet und ausgebenen
print(classification_report(y_test,predictions))
# Das Modell wird getest und das Ergebnis ausgegeben
print("Testergebnis: %5.3f" % mlp.score(X_test,y_test))
# Folgendes gibt die Werte der Gewichte pro Layer aus
print("WEIGHTS:", mlp.coefs_)
print("BIASES:", mlp.intercepts_)
# Das Modell wird beispielsweise zur Vorhersage auf folgenden Werten
# aus dem Testset angewandt mit den Merkmalen [sepal-length, sepal-width,
# petal-length, petal-width]
print(mlp.predict([[5.1,3.5,1.4,0.2], [5.9,3.,5.1,1.8], [4.9,3.,1.4,0.2], [5.8,2.7,4.1,1.]]))
# Die Loss-Kurve wird visualisiert und in der Datei Plot_of_loss_values.png im PNG-Format gespeichert.
loss_values = mlp.loss_curve_
plt.plot(loss_values)
plt.savefig("./Plot_of_loss_values.png")
plt.show()
|
[
"noreply@github.com"
] |
greenchapter.noreply@github.com
|
77e79f9ef67d7b2a99e8a1c2d037a274848b9c17
|
ea3272d707f3a6e5d097301d300a0ea97ddd82b5
|
/psm/oop1/oop1_2/info_hiding_property.py
|
b12f77b06d357a78fd4c81646ba553fa9c6dce8c
|
[] |
no_license
|
gambler1541/BootCamp
|
d05850f256ed7a8baa02545551176959a66a9bb3
|
b025dd07a8fedd58366f96c9b516f134a95138f1
|
refs/heads/master
| 2020-04-07T07:21:51.363439
| 2019-06-08T11:10:27
| 2019-06-08T11:10:27
| 158,173,790
| 1
| 0
| null | 2018-11-19T06:38:36
| 2018-11-19T06:38:36
| null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
class Account:
def __init__(self, name, money):
self.user = name
# 인스턴스 멤버 선언이 아니라 setter 메서드를 호출
self.balance = money
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, money):
if money < 0:
return
# 실제 인스턴스 멤버 선언이 일어나는 부분
self._balance = money
if __name__ == '__main__':
my_acnt = Account('greg', 5000)
# setter 함수를 통해 변경을 시도하므로 _balance 메버의 값은 음수로 변경되지 않음
# 음수로 변경되지 않았으므로 실행 결과는 5000이 나옴
my_acnt.balance =- 3000
# getter 함수인 balance() 메서드를 호출해 _balance apaqjdp wjqrms,
print(my_acnt.balance)
|
[
"sungmin3231@gmail.com"
] |
sungmin3231@gmail.com
|
7a5460a7b68fee36094e0189412d4ec7e108cebe
|
c11666437e45f6b771f4004e919dccfdab6c4640
|
/pet_cats.py
|
cfadb02fc56200f91fb9545b7a676ebdd094354a
|
[] |
no_license
|
mesare11/pet_market
|
30ba4b06161550e2abdd1e2fac073d9b1c906c91
|
c8e4006e578898c381be6f76cf6e05af39b94862
|
refs/heads/master
| 2021-01-02T19:02:34.933362
| 2020-07-23T12:16:01
| 2020-07-23T12:16:01
| 239,755,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
from pet_goods import pet_goods
from pet_animals import pet_animals
class pet_cats(pet_animals):
def __init__(self, name, price, kind_pet, age, cats_breed):
pet_animals.__init__(self, name, price, kind_pet, age)
self.cats_breed = cats_breed
def display_cats(self):
print(self.name, self.price, self.kind_pet, self.age, self.cats_breed)
def add_cat():
goods_array=[]
name=input("Please input name goods: ")
price=int(input("Please input price goods: "))
kind_pet=input("Please input kind of pet: ")
age=input("Please input age pet: ")
cats_breed=input("Please input cats breed: ")
goods_array.append(name)
goods_array.append(price)
goods_array.append(kind_pet)
goods_array.append(age)
goods_array.append(cats_breed)
|
[
"mr.mesare@gmail.com"
] |
mr.mesare@gmail.com
|
a291c7bfaadb64ce0f0f8fe7ff044a54344a7ba5
|
77c8d29caad199fb239133e6267d1b75bd2dfe48
|
/packages/pyright-internal/typeshed-fallback/stdlib/builtins.pyi
|
78b29c5aa7e794980ce9212e92a26b6fbb0c2072
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
simpoir/pyright
|
9c80e596f99dfd1341a55373a96d8795cb72fb56
|
320a0a2fd31e4ffc69d4bd96d7202bbe8d8cb410
|
refs/heads/master
| 2023-04-18T06:42:16.194352
| 2021-04-29T15:20:19
| 2021-04-29T15:20:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54,998
|
pyi
|
import sys
import types
from _typeshed import (
AnyPath,
OpenBinaryMode,
OpenBinaryModeReading,
OpenBinaryModeUpdating,
OpenBinaryModeWriting,
OpenTextMode,
ReadableBuffer,
SupportsDivMod,
SupportsKeysAndGetItem,
SupportsLessThan,
SupportsLessThanT,
SupportsRDivMod,
SupportsWrite,
)
from ast import AST, mod
from io import BufferedRandom, BufferedReader, BufferedWriter, FileIO, TextIOWrapper
from types import CodeType, TracebackType
from typing import (
IO,
AbstractSet,
Any,
BinaryIO,
ByteString,
Callable,
Container,
Dict,
FrozenSet,
Generic,
ItemsView,
Iterable,
Iterator,
KeysView,
List,
Mapping,
MutableMapping,
MutableSequence,
MutableSet,
NoReturn,
Optional,
Protocol,
Reversible,
Sequence,
Set,
Sized,
SupportsAbs,
SupportsBytes,
SupportsComplex,
SupportsFloat,
SupportsInt,
SupportsRound,
Tuple,
Type,
TypeVar,
Union,
ValuesView,
overload,
runtime_checkable,
)
from typing_extensions import Literal
if sys.version_info >= (3, 9):
from types import GenericAlias
class _SupportsIndex(Protocol):
def __index__(self) -> int: ...
class _SupportsTrunc(Protocol):
def __trunc__(self) -> int: ...
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
_T_contra = TypeVar("_T_contra", contravariant=True)
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
_S = TypeVar("_S")
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_T3 = TypeVar("_T3")
_T4 = TypeVar("_T4")
_T5 = TypeVar("_T5")
_TT = TypeVar("_TT", bound="type")
_TBE = TypeVar("_TBE", bound="BaseException")
class object:
__doc__: Optional[str]
__dict__: Dict[str, Any]
__slots__: Union[str, Iterable[str]]
__module__: str
__annotations__: Dict[str, Any]
@property
def __class__(self: _T) -> Type[_T]: ...
# Ignore errors about type mismatch between property getter and setter
@__class__.setter
def __class__(self, __type: Type[object]) -> None: ... # type: ignore # noqa: F811
def __init__(self) -> None: ...
def __new__(cls) -> Any: ...
def __setattr__(self, name: str, value: Any) -> None: ...
def __eq__(self, o: object) -> bool: ...
def __ne__(self, o: object) -> bool: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __hash__(self) -> int: ...
def __format__(self, format_spec: str) -> str: ...
def __getattribute__(self, name: str) -> Any: ...
def __delattr__(self, name: str) -> None: ...
def __sizeof__(self) -> int: ...
def __reduce__(self) -> Union[str, Tuple[Any, ...]]: ...
def __reduce_ex__(self, protocol: int) -> Union[str, Tuple[Any, ...]]: ...
def __dir__(self) -> Iterable[str]: ...
def __init_subclass__(cls) -> None: ...
class staticmethod(object): # Special, only valid as a decorator.
__func__: Callable[..., Any]
__isabstractmethod__: bool
def __init__(self, f: Callable[..., Any]) -> None: ...
def __new__(cls: Type[_T], *args: Any, **kwargs: Any) -> _T: ...
def __get__(self, obj: _T, type: Optional[Type[_T]] = ...) -> Callable[..., Any]: ...
class classmethod(object): # Special, only valid as a decorator.
__func__: Callable[..., Any]
__isabstractmethod__: bool
def __init__(self, f: Callable[..., Any]) -> None: ...
def __new__(cls: Type[_T], *args: Any, **kwargs: Any) -> _T: ...
def __get__(self, obj: _T, type: Optional[Type[_T]] = ...) -> Callable[..., Any]: ...
class type(object):
__base__: type
__bases__: Tuple[type, ...]
__basicsize__: int
__dict__: Dict[str, Any]
__dictoffset__: int
__flags__: int
__itemsize__: int
__module__: str
__mro__: Tuple[type, ...]
__name__: str
__qualname__: str
__text_signature__: Optional[str]
__weakrefoffset__: int
@overload
def __init__(self, o: object) -> None: ...
@overload
def __init__(self, name: str, bases: Tuple[type, ...], dict: Dict[str, Any], **kwds: Any) -> None: ...
@overload
def __new__(cls, o: object) -> type: ...
@overload
def __new__(cls, name: str, bases: Tuple[type, ...], namespace: Dict[str, Any], **kwds: Any) -> type: ...
def __call__(self, *args: Any, **kwds: Any) -> Any: ...
def __subclasses__(self: _TT) -> List[_TT]: ...
# Note: the documentation doesnt specify what the return type is, the standard
# implementation seems to be returning a list.
def mro(self) -> List[type]: ...
def __instancecheck__(self, instance: Any) -> bool: ...
def __subclasscheck__(self, subclass: type) -> bool: ...
@classmethod
def __prepare__(metacls, __name: str, __bases: Tuple[type, ...], **kwds: Any) -> Mapping[str, Any]: ...
if sys.version_info >= (3, 10):
def __or__(self, t: Any) -> types.Union: ...
class super(object):
@overload
def __init__(self, t: Any, obj: Any) -> None: ...
@overload
def __init__(self, t: Any) -> None: ...
@overload
def __init__(self) -> None: ...
class int:
@overload
def __new__(cls: Type[_T], x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc] = ...) -> _T: ...
@overload
def __new__(cls: Type[_T], x: Union[str, bytes, bytearray], base: int) -> _T: ...
if sys.version_info >= (3, 8):
def as_integer_ratio(self) -> Tuple[int, Literal[1]]: ...
@property
def real(self) -> int: ...
@property
def imag(self) -> int: ...
@property
def numerator(self) -> int: ...
@property
def denominator(self) -> int: ...
def conjugate(self) -> int: ...
def bit_length(self) -> int: ...
def to_bytes(self, length: int, byteorder: str, *, signed: bool = ...) -> bytes: ...
@classmethod
def from_bytes(
cls, bytes: Union[Iterable[int], SupportsBytes], byteorder: str, *, signed: bool = ...
) -> int: ... # TODO buffer object argument
def __add__(self, x: int) -> int: ...
def __sub__(self, x: int) -> int: ...
def __mul__(self, x: int) -> int: ...
def __floordiv__(self, x: int) -> int: ...
def __truediv__(self, x: int) -> float: ...
def __mod__(self, x: int) -> int: ...
def __divmod__(self, x: int) -> Tuple[int, int]: ...
def __radd__(self, x: int) -> int: ...
def __rsub__(self, x: int) -> int: ...
def __rmul__(self, x: int) -> int: ...
def __rfloordiv__(self, x: int) -> int: ...
def __rtruediv__(self, x: int) -> float: ...
def __rmod__(self, x: int) -> int: ...
def __rdivmod__(self, x: int) -> Tuple[int, int]: ...
@overload
def __pow__(self, __x: Literal[2], __modulo: Optional[int] = ...) -> int: ...
@overload
def __pow__(self, __x: int, __modulo: Optional[int] = ...) -> Any: ... # Return type can be int or float, depending on x.
def __rpow__(self, x: int, mod: Optional[int] = ...) -> Any: ...
def __and__(self, n: int) -> int: ...
def __or__(self, n: int) -> int: ...
def __xor__(self, n: int) -> int: ...
def __lshift__(self, n: int) -> int: ...
def __rshift__(self, n: int) -> int: ...
def __rand__(self, n: int) -> int: ...
def __ror__(self, n: int) -> int: ...
def __rxor__(self, n: int) -> int: ...
def __rlshift__(self, n: int) -> int: ...
def __rrshift__(self, n: int) -> int: ...
def __neg__(self) -> int: ...
def __pos__(self) -> int: ...
def __invert__(self) -> int: ...
def __trunc__(self) -> int: ...
def __ceil__(self) -> int: ...
def __floor__(self) -> int: ...
def __round__(self, ndigits: Optional[int] = ...) -> int: ...
def __getnewargs__(self) -> Tuple[int]: ...
def __eq__(self, x: object) -> bool: ...
def __ne__(self, x: object) -> bool: ...
def __lt__(self, x: int) -> bool: ...
def __le__(self, x: int) -> bool: ...
def __gt__(self, x: int) -> bool: ...
def __ge__(self, x: int) -> bool: ...
def __str__(self) -> str: ...
def __float__(self) -> float: ...
def __int__(self) -> int: ...
def __abs__(self) -> int: ...
def __hash__(self) -> int: ...
def __bool__(self) -> bool: ...
def __index__(self) -> int: ...
class float:
def __new__(cls: Type[_T], x: Union[SupportsFloat, _SupportsIndex, str, bytes, bytearray] = ...) -> _T: ...
def as_integer_ratio(self) -> Tuple[int, int]: ...
def hex(self) -> str: ...
def is_integer(self) -> bool: ...
@classmethod
def fromhex(cls, __s: str) -> float: ...
@property
def real(self) -> float: ...
@property
def imag(self) -> float: ...
def conjugate(self) -> float: ...
def __add__(self, x: float) -> float: ...
def __sub__(self, x: float) -> float: ...
def __mul__(self, x: float) -> float: ...
def __floordiv__(self, x: float) -> float: ...
def __truediv__(self, x: float) -> float: ...
def __mod__(self, x: float) -> float: ...
def __divmod__(self, x: float) -> Tuple[float, float]: ...
def __pow__(
self, x: float, mod: None = ...
) -> float: ... # In Python 3, returns complex if self is negative and x is not whole
def __radd__(self, x: float) -> float: ...
def __rsub__(self, x: float) -> float: ...
def __rmul__(self, x: float) -> float: ...
def __rfloordiv__(self, x: float) -> float: ...
def __rtruediv__(self, x: float) -> float: ...
def __rmod__(self, x: float) -> float: ...
def __rdivmod__(self, x: float) -> Tuple[float, float]: ...
def __rpow__(self, x: float, mod: None = ...) -> float: ...
def __getnewargs__(self) -> Tuple[float]: ...
def __trunc__(self) -> int: ...
if sys.version_info >= (3, 9):
def __ceil__(self) -> int: ...
def __floor__(self) -> int: ...
@overload
def __round__(self, ndigits: None = ...) -> int: ...
@overload
def __round__(self, ndigits: int) -> float: ...
def __eq__(self, x: object) -> bool: ...
def __ne__(self, x: object) -> bool: ...
def __lt__(self, x: float) -> bool: ...
def __le__(self, x: float) -> bool: ...
def __gt__(self, x: float) -> bool: ...
def __ge__(self, x: float) -> bool: ...
def __neg__(self) -> float: ...
def __pos__(self) -> float: ...
def __str__(self) -> str: ...
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __abs__(self) -> float: ...
def __hash__(self) -> int: ...
def __bool__(self) -> bool: ...
class complex:
@overload
def __new__(cls: Type[_T], real: float = ..., imag: float = ...) -> _T: ...
@overload
def __new__(cls: Type[_T], real: Union[str, SupportsComplex, _SupportsIndex]) -> _T: ...
@property
def real(self) -> float: ...
@property
def imag(self) -> float: ...
def conjugate(self) -> complex: ...
def __add__(self, x: complex) -> complex: ...
def __sub__(self, x: complex) -> complex: ...
def __mul__(self, x: complex) -> complex: ...
def __pow__(self, x: complex, mod: None = ...) -> complex: ...
def __truediv__(self, x: complex) -> complex: ...
def __radd__(self, x: complex) -> complex: ...
def __rsub__(self, x: complex) -> complex: ...
def __rmul__(self, x: complex) -> complex: ...
def __rpow__(self, x: complex, mod: None = ...) -> complex: ...
def __rtruediv__(self, x: complex) -> complex: ...
def __eq__(self, x: object) -> bool: ...
def __ne__(self, x: object) -> bool: ...
def __neg__(self) -> complex: ...
def __pos__(self) -> complex: ...
def __str__(self) -> str: ...
def __abs__(self) -> float: ...
def __hash__(self) -> int: ...
def __bool__(self) -> bool: ...
class _FormatMapMapping(Protocol):
def __getitem__(self, __key: str) -> Any: ...
class str(Sequence[str]):
@overload
def __new__(cls: Type[_T], o: object = ...) -> _T: ...
@overload
def __new__(cls: Type[_T], o: bytes, encoding: str = ..., errors: str = ...) -> _T: ...
def capitalize(self) -> str: ...
def casefold(self) -> str: ...
def center(self, __width: int, __fillchar: str = ...) -> str: ...
def count(self, x: str, __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def encode(self, encoding: str = ..., errors: str = ...) -> bytes: ...
def endswith(
self, __suffix: Union[str, Tuple[str, ...]], __start: Optional[int] = ..., __end: Optional[int] = ...
) -> bool: ...
def expandtabs(self, tabsize: int = ...) -> str: ...
def find(self, __sub: str, __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def format(self, *args: object, **kwargs: object) -> str: ...
def format_map(self, map: _FormatMapMapping) -> str: ...
def index(self, __sub: str, __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def isalnum(self) -> bool: ...
def isalpha(self) -> bool: ...
if sys.version_info >= (3, 7):
def isascii(self) -> bool: ...
def isdecimal(self) -> bool: ...
def isdigit(self) -> bool: ...
def isidentifier(self) -> bool: ...
def islower(self) -> bool: ...
def isnumeric(self) -> bool: ...
def isprintable(self) -> bool: ...
def isspace(self) -> bool: ...
def istitle(self) -> bool: ...
def isupper(self) -> bool: ...
def join(self, __iterable: Iterable[str]) -> str: ...
def ljust(self, __width: int, __fillchar: str = ...) -> str: ...
def lower(self) -> str: ...
def lstrip(self, __chars: Optional[str] = ...) -> str: ...
def partition(self, __sep: str) -> Tuple[str, str, str]: ...
def replace(self, __old: str, __new: str, __count: int = ...) -> str: ...
if sys.version_info >= (3, 9):
def removeprefix(self, __prefix: str) -> str: ...
def removesuffix(self, __suffix: str) -> str: ...
def rfind(self, __sub: str, __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def rindex(self, __sub: str, __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def rjust(self, __width: int, __fillchar: str = ...) -> str: ...
def rpartition(self, __sep: str) -> Tuple[str, str, str]: ...
def rsplit(self, sep: Optional[str] = ..., maxsplit: int = ...) -> List[str]: ...
def rstrip(self, __chars: Optional[str] = ...) -> str: ...
def split(self, sep: Optional[str] = ..., maxsplit: int = ...) -> List[str]: ...
def splitlines(self, keepends: bool = ...) -> List[str]: ...
def startswith(
self, __prefix: Union[str, Tuple[str, ...]], __start: Optional[int] = ..., __end: Optional[int] = ...
) -> bool: ...
def strip(self, __chars: Optional[str] = ...) -> str: ...
def swapcase(self) -> str: ...
def title(self) -> str: ...
def translate(self, __table: Union[Mapping[int, Union[int, str, None]], Sequence[Union[int, str, None]]]) -> str: ...
def upper(self) -> str: ...
def zfill(self, __width: int) -> str: ...
@staticmethod
@overload
def maketrans(__x: Union[Dict[int, _T], Dict[str, _T], Dict[Union[str, int], _T]]) -> Dict[int, _T]: ...
@staticmethod
@overload
def maketrans(__x: str, __y: str, __z: Optional[str] = ...) -> Dict[int, Union[int, None]]: ...
def __add__(self, s: str) -> str: ...
# Incompatible with Sequence.__contains__
def __contains__(self, o: str) -> bool: ... # type: ignore
def __eq__(self, x: object) -> bool: ...
def __ge__(self, x: str) -> bool: ...
def __getitem__(self, i: Union[int, slice]) -> str: ...
def __gt__(self, x: str) -> bool: ...
def __hash__(self) -> int: ...
def __iter__(self) -> Iterator[str]: ...
def __le__(self, x: str) -> bool: ...
def __len__(self) -> int: ...
def __lt__(self, x: str) -> bool: ...
def __mod__(self, x: Any) -> str: ...
def __mul__(self, n: int) -> str: ...
def __ne__(self, x: object) -> bool: ...
def __repr__(self) -> str: ...
def __rmul__(self, n: int) -> str: ...
def __str__(self) -> str: ...
def __getnewargs__(self) -> Tuple[str]: ...
class bytes(ByteString):
@overload
def __new__(cls: Type[_T], ints: Iterable[int]) -> _T: ...
@overload
def __new__(cls: Type[_T], string: str, encoding: str, errors: str = ...) -> _T: ...
@overload
def __new__(cls: Type[_T], length: int) -> _T: ...
@overload
def __new__(cls: Type[_T]) -> _T: ...
@overload
def __new__(cls: Type[_T], o: SupportsBytes) -> _T: ...
def capitalize(self) -> bytes: ...
def center(self, __width: int, __fillchar: bytes = ...) -> bytes: ...
def count(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def decode(self, encoding: str = ..., errors: str = ...) -> str: ...
def endswith(
self, __suffix: Union[bytes, Tuple[bytes, ...]], __start: Optional[int] = ..., __end: Optional[int] = ...
) -> bool: ...
def expandtabs(self, tabsize: int = ...) -> bytes: ...
def find(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
if sys.version_info >= (3, 8):
def hex(self, sep: Union[str, bytes] = ..., bytes_per_sep: int = ...) -> str: ...
else:
def hex(self) -> str: ...
def index(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def isalnum(self) -> bool: ...
def isalpha(self) -> bool: ...
if sys.version_info >= (3, 7):
def isascii(self) -> bool: ...
def isdigit(self) -> bool: ...
def islower(self) -> bool: ...
def isspace(self) -> bool: ...
def istitle(self) -> bool: ...
def isupper(self) -> bool: ...
def join(self, __iterable_of_bytes: Iterable[Union[ByteString, memoryview]]) -> bytes: ...
def ljust(self, __width: int, __fillchar: bytes = ...) -> bytes: ...
def lower(self) -> bytes: ...
def lstrip(self, __bytes: Optional[bytes] = ...) -> bytes: ...
def partition(self, __sep: bytes) -> Tuple[bytes, bytes, bytes]: ...
def replace(self, __old: bytes, __new: bytes, __count: int = ...) -> bytes: ...
if sys.version_info >= (3, 9):
def removeprefix(self, __prefix: bytes) -> bytes: ...
def removesuffix(self, __suffix: bytes) -> bytes: ...
def rfind(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def rindex(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def rjust(self, __width: int, __fillchar: bytes = ...) -> bytes: ...
def rpartition(self, __sep: bytes) -> Tuple[bytes, bytes, bytes]: ...
def rsplit(self, sep: Optional[bytes] = ..., maxsplit: int = ...) -> List[bytes]: ...
def rstrip(self, __bytes: Optional[bytes] = ...) -> bytes: ...
def split(self, sep: Optional[bytes] = ..., maxsplit: int = ...) -> List[bytes]: ...
def splitlines(self, keepends: bool = ...) -> List[bytes]: ...
def startswith(
self, __prefix: Union[bytes, Tuple[bytes, ...]], __start: Optional[int] = ..., __end: Optional[int] = ...
) -> bool: ...
def strip(self, __bytes: Optional[bytes] = ...) -> bytes: ...
def swapcase(self) -> bytes: ...
def title(self) -> bytes: ...
def translate(self, __table: Optional[bytes], delete: bytes = ...) -> bytes: ...
def upper(self) -> bytes: ...
def zfill(self, __width: int) -> bytes: ...
@classmethod
def fromhex(cls, __s: str) -> bytes: ...
@classmethod
def maketrans(cls, frm: bytes, to: bytes) -> bytes: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[int]: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __hash__(self) -> int: ...
@overload
def __getitem__(self, i: int) -> int: ...
@overload
def __getitem__(self, s: slice) -> bytes: ...
def __add__(self, s: bytes) -> bytes: ...
def __mul__(self, n: int) -> bytes: ...
def __rmul__(self, n: int) -> bytes: ...
def __mod__(self, value: Any) -> bytes: ...
# Incompatible with Sequence.__contains__
def __contains__(self, o: Union[int, bytes]) -> bool: ... # type: ignore
def __eq__(self, x: object) -> bool: ...
def __ne__(self, x: object) -> bool: ...
def __lt__(self, x: bytes) -> bool: ...
def __le__(self, x: bytes) -> bool: ...
def __gt__(self, x: bytes) -> bool: ...
def __ge__(self, x: bytes) -> bool: ...
def __getnewargs__(self) -> Tuple[bytes]: ...
class bytearray(MutableSequence[int], ByteString):
@overload
def __init__(self) -> None: ...
@overload
def __init__(self, ints: Iterable[int]) -> None: ...
@overload
def __init__(self, string: str, encoding: str, errors: str = ...) -> None: ...
@overload
def __init__(self, length: int) -> None: ...
def capitalize(self) -> bytearray: ...
def center(self, __width: int, __fillchar: bytes = ...) -> bytearray: ...
def count(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def copy(self) -> bytearray: ...
def decode(self, encoding: str = ..., errors: str = ...) -> str: ...
def endswith(
self, __suffix: Union[bytes, Tuple[bytes, ...]], __start: Optional[int] = ..., __end: Optional[int] = ...
) -> bool: ...
def expandtabs(self, tabsize: int = ...) -> bytearray: ...
def find(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
if sys.version_info >= (3, 8):
def hex(self, sep: Union[str, bytes] = ..., bytes_per_sep: int = ...) -> str: ...
else:
def hex(self) -> str: ...
def index(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def insert(self, __index: int, __item: int) -> None: ...
def isalnum(self) -> bool: ...
def isalpha(self) -> bool: ...
if sys.version_info >= (3, 7):
def isascii(self) -> bool: ...
def isdigit(self) -> bool: ...
def islower(self) -> bool: ...
def isspace(self) -> bool: ...
def istitle(self) -> bool: ...
def isupper(self) -> bool: ...
def join(self, __iterable_of_bytes: Iterable[Union[ByteString, memoryview]]) -> bytearray: ...
def ljust(self, __width: int, __fillchar: bytes = ...) -> bytearray: ...
def lower(self) -> bytearray: ...
def lstrip(self, __bytes: Optional[bytes] = ...) -> bytearray: ...
def partition(self, __sep: bytes) -> Tuple[bytearray, bytearray, bytearray]: ...
if sys.version_info >= (3, 9):
def removeprefix(self, __prefix: bytes) -> bytearray: ...
def removesuffix(self, __suffix: bytes) -> bytearray: ...
def replace(self, __old: bytes, __new: bytes, __count: int = ...) -> bytearray: ...
def rfind(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def rindex(self, __sub: Union[bytes, int], __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ...
def rjust(self, __width: int, __fillchar: bytes = ...) -> bytearray: ...
def rpartition(self, __sep: bytes) -> Tuple[bytearray, bytearray, bytearray]: ...
def rsplit(self, sep: Optional[bytes] = ..., maxsplit: int = ...) -> List[bytearray]: ...
def rstrip(self, __bytes: Optional[bytes] = ...) -> bytearray: ...
def split(self, sep: Optional[bytes] = ..., maxsplit: int = ...) -> List[bytearray]: ...
def splitlines(self, keepends: bool = ...) -> List[bytearray]: ...
def startswith(
self, __prefix: Union[bytes, Tuple[bytes, ...]], __start: Optional[int] = ..., __end: Optional[int] = ...
) -> bool: ...
def strip(self, __bytes: Optional[bytes] = ...) -> bytearray: ...
def swapcase(self) -> bytearray: ...
def title(self) -> bytearray: ...
def translate(self, __table: Optional[bytes], delete: bytes = ...) -> bytearray: ...
def upper(self) -> bytearray: ...
def zfill(self, __width: int) -> bytearray: ...
@classmethod
def fromhex(cls, __string: str) -> bytearray: ...
@classmethod
def maketrans(cls, __frm: bytes, __to: bytes) -> bytes: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[int]: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __int__(self) -> int: ...
def __float__(self) -> float: ...
__hash__: None # type: ignore
@overload
def __getitem__(self, i: int) -> int: ...
@overload
def __getitem__(self, s: slice) -> bytearray: ...
@overload
def __setitem__(self, i: int, x: int) -> None: ...
@overload
def __setitem__(self, s: slice, x: Union[Iterable[int], bytes]) -> None: ...
def __delitem__(self, i: Union[int, slice]) -> None: ...
def __add__(self, s: bytes) -> bytearray: ...
def __iadd__(self, s: Iterable[int]) -> bytearray: ...
def __mul__(self, n: int) -> bytearray: ...
def __rmul__(self, n: int) -> bytearray: ...
def __imul__(self, n: int) -> bytearray: ...
def __mod__(self, value: Any) -> bytes: ...
# Incompatible with Sequence.__contains__
def __contains__(self, o: Union[int, bytes]) -> bool: ... # type: ignore
def __eq__(self, x: object) -> bool: ...
def __ne__(self, x: object) -> bool: ...
def __lt__(self, x: bytes) -> bool: ...
def __le__(self, x: bytes) -> bool: ...
def __gt__(self, x: bytes) -> bool: ...
def __ge__(self, x: bytes) -> bool: ...
class memoryview(Sized, Container[int]):
format: str
itemsize: int
shape: Optional[Tuple[int, ...]]
strides: Optional[Tuple[int, ...]]
suboffsets: Optional[Tuple[int, ...]]
readonly: bool
ndim: int
obj: Union[bytes, bytearray]
c_contiguous: bool
f_contiguous: bool
contiguous: bool
nbytes: int
def __init__(self, obj: ReadableBuffer) -> None: ...
def __enter__(self) -> memoryview: ...
def __exit__(
self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]
) -> None: ...
def cast(self, format: str, shape: Union[List[int], Tuple[int]] = ...) -> memoryview: ...
@overload
def __getitem__(self, i: int) -> int: ...
@overload
def __getitem__(self, s: slice) -> memoryview: ...
def __contains__(self, x: object) -> bool: ...
def __iter__(self) -> Iterator[int]: ...
def __len__(self) -> int: ...
@overload
def __setitem__(self, s: slice, o: bytes) -> None: ...
@overload
def __setitem__(self, i: int, o: int) -> None: ...
if sys.version_info >= (3, 8):
def tobytes(self, order: Optional[Literal["C", "F", "A"]] = ...) -> bytes: ...
else:
def tobytes(self) -> bytes: ...
def tolist(self) -> List[int]: ...
if sys.version_info >= (3, 8):
def toreadonly(self) -> memoryview: ...
def release(self) -> None: ...
if sys.version_info >= (3, 8):
def hex(self, sep: Union[str, bytes] = ..., bytes_per_sep: int = ...) -> str: ...
else:
def hex(self) -> str: ...
class bool(int):
def __new__(cls: Type[_T], __o: object = ...) -> _T: ...
@overload
def __and__(self, x: bool) -> bool: ...
@overload
def __and__(self, x: int) -> int: ...
@overload
def __or__(self, x: bool) -> bool: ...
@overload
def __or__(self, x: int) -> int: ...
@overload
def __xor__(self, x: bool) -> bool: ...
@overload
def __xor__(self, x: int) -> int: ...
@overload
def __rand__(self, x: bool) -> bool: ...
@overload
def __rand__(self, x: int) -> int: ...
@overload
def __ror__(self, x: bool) -> bool: ...
@overload
def __ror__(self, x: int) -> int: ...
@overload
def __rxor__(self, x: bool) -> bool: ...
@overload
def __rxor__(self, x: int) -> int: ...
def __getnewargs__(self) -> Tuple[int]: ...
class slice(object):
start: Any
step: Any
stop: Any
@overload
def __init__(self, stop: Any) -> None: ...
@overload
def __init__(self, start: Any, stop: Any, step: Any = ...) -> None: ...
__hash__: None # type: ignore
def indices(self, len: int) -> Tuple[int, int, int]: ...
class tuple(Sequence[_T_co], Generic[_T_co]):
def __new__(cls: Type[_T], iterable: Iterable[_T_co] = ...) -> _T: ...
def __len__(self) -> int: ...
def __contains__(self, x: object) -> bool: ...
@overload
def __getitem__(self, x: int) -> _T_co: ...
@overload
def __getitem__(self, x: slice) -> Tuple[_T_co, ...]: ...
def __iter__(self) -> Iterator[_T_co]: ...
def __lt__(self, x: Tuple[_T_co, ...]) -> bool: ...
def __le__(self, x: Tuple[_T_co, ...]) -> bool: ...
def __gt__(self, x: Tuple[_T_co, ...]) -> bool: ...
def __ge__(self, x: Tuple[_T_co, ...]) -> bool: ...
@overload
def __add__(self, x: Tuple[_T_co, ...]) -> Tuple[_T_co, ...]: ...
@overload
def __add__(self, x: Tuple[Any, ...]) -> Tuple[Any, ...]: ...
def __mul__(self, n: int) -> Tuple[_T_co, ...]: ...
def __rmul__(self, n: int) -> Tuple[_T_co, ...]: ...
def count(self, __value: Any) -> int: ...
def index(self, __value: Any, __start: int = ..., __stop: int = ...) -> int: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
class function:
# TODO not defined in builtins!
__name__: str
__module__: str
__code__: CodeType
__qualname__: str
__annotations__: Dict[str, Any]
class list(MutableSequence[_T], Generic[_T]):
@overload
def __init__(self) -> None: ...
@overload
def __init__(self, iterable: Iterable[_T]) -> None: ...
def clear(self) -> None: ...
def copy(self) -> List[_T]: ...
def append(self, __object: _T) -> None: ...
def extend(self, __iterable: Iterable[_T]) -> None: ...
def pop(self, __index: int = ...) -> _T: ...
def index(self, __value: _T, __start: int = ..., __stop: int = ...) -> int: ...
def count(self, __value: _T) -> int: ...
def insert(self, __index: int, __object: _T) -> None: ...
def remove(self, __value: _T) -> None: ...
def reverse(self) -> None: ...
@overload
def sort(self: List[SupportsLessThanT], *, key: None = ..., reverse: bool = ...) -> None: ...
@overload
def sort(self, *, key: Callable[[_T], SupportsLessThan], reverse: bool = ...) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T]: ...
def __str__(self) -> str: ...
__hash__: None # type: ignore
@overload
def __getitem__(self, i: _SupportsIndex) -> _T: ...
@overload
def __getitem__(self, s: slice) -> List[_T]: ...
@overload
def __setitem__(self, i: _SupportsIndex, o: _T) -> None: ...
@overload
def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ...
def __delitem__(self, i: Union[_SupportsIndex, slice]) -> None: ...
def __add__(self, x: List[_T]) -> List[_T]: ...
def __iadd__(self: _S, x: Iterable[_T]) -> _S: ...
def __mul__(self, n: int) -> List[_T]: ...
def __rmul__(self, n: int) -> List[_T]: ...
def __imul__(self: _S, n: int) -> _S: ...
def __contains__(self, o: object) -> bool: ...
def __reversed__(self) -> Iterator[_T]: ...
def __gt__(self, x: List[_T]) -> bool: ...
def __ge__(self, x: List[_T]) -> bool: ...
def __lt__(self, x: List[_T]) -> bool: ...
def __le__(self, x: List[_T]) -> bool: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
class dict(MutableMapping[_KT, _VT], Generic[_KT, _VT]):
@overload
def __init__(self: Dict[_KT, _VT]) -> None: ...
@overload
def __init__(self: Dict[str, _VT], **kwargs: _VT) -> None: ...
@overload
def __init__(self, map: SupportsKeysAndGetItem[_KT, _VT], **kwargs: _VT) -> None: ...
@overload
def __init__(self, iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
def __new__(cls: Type[_T1], *args: Any, **kwargs: Any) -> _T1: ...
def clear(self) -> None: ...
def copy(self) -> Dict[_KT, _VT]: ...
def popitem(self) -> Tuple[_KT, _VT]: ...
def setdefault(self, __key: _KT, __default: _VT = ...) -> _VT: ...
@overload
def update(self, __m: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
@overload
def update(self, __m: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
@overload
def update(self, **kwargs: _VT) -> None: ...
def keys(self) -> KeysView[_KT]: ...
def values(self) -> ValuesView[_VT]: ...
def items(self) -> ItemsView[_KT, _VT]: ...
@classmethod
@overload
def fromkeys(cls, __iterable: Iterable[_T]) -> Dict[_T, Any]: ...
@classmethod
@overload
def fromkeys(cls, __iterable: Iterable[_T], __value: _S) -> Dict[_T, _S]: ...
def __len__(self) -> int: ...
def __getitem__(self, k: _KT) -> _VT: ...
def __setitem__(self, k: _KT, v: _VT) -> None: ...
def __delitem__(self, v: _KT) -> None: ...
def __iter__(self) -> Iterator[_KT]: ...
if sys.version_info >= (3, 8):
def __reversed__(self) -> Iterator[_KT]: ...
def __str__(self) -> str: ...
__hash__: None # type: ignore
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
def __or__(self, __value: Mapping[_KT, _VT]) -> Dict[_KT, _VT]: ...
def __ior__(self, __value: Mapping[_KT, _VT]) -> Dict[_KT, _VT]: ...
class set(MutableSet[_T], Generic[_T]):
def __init__(self, iterable: Iterable[_T] = ...) -> None: ...
def add(self, element: _T) -> None: ...
def clear(self) -> None: ...
def copy(self) -> Set[_T]: ...
def difference(self, *s: Iterable[Any]) -> Set[_T]: ...
def difference_update(self, *s: Iterable[Any]) -> None: ...
def discard(self, element: _T) -> None: ...
def intersection(self, *s: Iterable[Any]) -> Set[_T]: ...
def intersection_update(self, *s: Iterable[Any]) -> None: ...
def isdisjoint(self, s: Iterable[Any]) -> bool: ...
def issubset(self, s: Iterable[Any]) -> bool: ...
def issuperset(self, s: Iterable[Any]) -> bool: ...
def pop(self) -> _T: ...
def remove(self, element: _T) -> None: ...
def symmetric_difference(self, s: Iterable[_T]) -> Set[_T]: ...
def symmetric_difference_update(self, s: Iterable[_T]) -> None: ...
def union(self, *s: Iterable[_T]) -> Set[_T]: ...
def update(self, *s: Iterable[_T]) -> None: ...
def __len__(self) -> int: ...
def __contains__(self, o: object) -> bool: ...
def __iter__(self) -> Iterator[_T]: ...
def __str__(self) -> str: ...
def __and__(self, s: AbstractSet[object]) -> Set[_T]: ...
def __iand__(self, s: AbstractSet[object]) -> Set[_T]: ...
def __or__(self, s: AbstractSet[_S]) -> Set[Union[_T, _S]]: ...
def __ior__(self, s: AbstractSet[_S]) -> Set[Union[_T, _S]]: ...
def __sub__(self, s: AbstractSet[Optional[_T]]) -> Set[_T]: ...
def __isub__(self, s: AbstractSet[Optional[_T]]) -> Set[_T]: ...
def __xor__(self, s: AbstractSet[_S]) -> Set[Union[_T, _S]]: ...
def __ixor__(self, s: AbstractSet[_S]) -> Set[Union[_T, _S]]: ...
def __le__(self, s: AbstractSet[object]) -> bool: ...
def __lt__(self, s: AbstractSet[object]) -> bool: ...
def __ge__(self, s: AbstractSet[object]) -> bool: ...
def __gt__(self, s: AbstractSet[object]) -> bool: ...
__hash__: None # type: ignore
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
class frozenset(AbstractSet[_T_co], Generic[_T_co]):
def __init__(self, iterable: Iterable[_T_co] = ...) -> None: ...
def copy(self) -> FrozenSet[_T_co]: ...
def difference(self, *s: Iterable[object]) -> FrozenSet[_T_co]: ...
def intersection(self, *s: Iterable[object]) -> FrozenSet[_T_co]: ...
def isdisjoint(self, s: Iterable[_T_co]) -> bool: ...
def issubset(self, s: Iterable[object]) -> bool: ...
def issuperset(self, s: Iterable[object]) -> bool: ...
def symmetric_difference(self, s: Iterable[_T_co]) -> FrozenSet[_T_co]: ...
def union(self, *s: Iterable[_T_co]) -> FrozenSet[_T_co]: ...
def __len__(self) -> int: ...
def __contains__(self, o: object) -> bool: ...
def __iter__(self) -> Iterator[_T_co]: ...
def __str__(self) -> str: ...
def __and__(self, s: AbstractSet[_T_co]) -> FrozenSet[_T_co]: ...
def __or__(self, s: AbstractSet[_S]) -> FrozenSet[Union[_T_co, _S]]: ...
def __sub__(self, s: AbstractSet[_T_co]) -> FrozenSet[_T_co]: ...
def __xor__(self, s: AbstractSet[_S]) -> FrozenSet[Union[_T_co, _S]]: ...
def __le__(self, s: AbstractSet[object]) -> bool: ...
def __lt__(self, s: AbstractSet[object]) -> bool: ...
def __ge__(self, s: AbstractSet[object]) -> bool: ...
def __gt__(self, s: AbstractSet[object]) -> bool: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
class enumerate(Iterator[Tuple[int, _T]], Generic[_T]):
def __init__(self, iterable: Iterable[_T], start: int = ...) -> None: ...
def __iter__(self) -> Iterator[Tuple[int, _T]]: ...
def __next__(self) -> Tuple[int, _T]: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
class range(Sequence[int]):
start: int
stop: int
step: int
@overload
def __init__(self, stop: _SupportsIndex) -> None: ...
@overload
def __init__(self, start: _SupportsIndex, stop: _SupportsIndex, step: _SupportsIndex = ...) -> None: ...
def count(self, value: int) -> int: ...
def index(self, value: int) -> int: ... # type: ignore
def __len__(self) -> int: ...
def __contains__(self, o: object) -> bool: ...
def __iter__(self) -> Iterator[int]: ...
@overload
def __getitem__(self, i: _SupportsIndex) -> int: ...
@overload
def __getitem__(self, s: slice) -> range: ...
def __repr__(self) -> str: ...
def __reversed__(self) -> Iterator[int]: ...
class property(object):
def __init__(
self,
fget: Optional[Callable[[Any], Any]] = ...,
fset: Optional[Callable[[Any, Any], None]] = ...,
fdel: Optional[Callable[[Any], None]] = ...,
doc: Optional[str] = ...,
) -> None: ...
def getter(self, fget: Callable[[Any], Any]) -> property: ...
def setter(self, fset: Callable[[Any, Any], None]) -> property: ...
def deleter(self, fdel: Callable[[Any], None]) -> property: ...
def __get__(self, obj: Any, type: Optional[type] = ...) -> Any: ...
def __set__(self, obj: Any, value: Any) -> None: ...
def __delete__(self, obj: Any) -> None: ...
def fget(self) -> Any: ...
def fset(self, value: Any) -> None: ...
def fdel(self) -> None: ...
class _NotImplementedType(Any): # type: ignore
# A little weird, but typing the __call__ as NotImplemented makes the error message
# for NotImplemented() much better
__call__: NotImplemented # type: ignore
NotImplemented: _NotImplementedType
def abs(__x: SupportsAbs[_T]) -> _T: ...
def all(__iterable: Iterable[object]) -> bool: ...
def any(__iterable: Iterable[object]) -> bool: ...
def ascii(__obj: object) -> str: ...
def bin(__number: Union[int, _SupportsIndex]) -> str: ...
if sys.version_info >= (3, 7):
def breakpoint(*args: Any, **kws: Any) -> None: ...
def callable(__obj: object) -> bool: ...
def chr(__i: int) -> str: ...
# This class is to be exported as PathLike from os,
# but we define it here as _PathLike to avoid import cycle issues.
# See https://github.com/python/typeshed/pull/991#issuecomment-288160993
_AnyStr_co = TypeVar("_AnyStr_co", str, bytes, covariant=True)
@runtime_checkable
class _PathLike(Protocol[_AnyStr_co]):
def __fspath__(self) -> _AnyStr_co: ...
if sys.version_info >= (3, 8):
def compile(
source: Union[str, bytes, mod, AST],
filename: Union[str, bytes, _PathLike[Any]],
mode: str,
flags: int = ...,
dont_inherit: int = ...,
optimize: int = ...,
*,
_feature_version: int = ...,
) -> Any: ...
else:
def compile(
source: Union[str, bytes, mod, AST],
filename: Union[str, bytes, _PathLike[Any]],
mode: str,
flags: int = ...,
dont_inherit: int = ...,
optimize: int = ...,
) -> Any: ...
def copyright() -> None: ...
def credits() -> None: ...
def delattr(__obj: Any, __name: str) -> None: ...
def dir(__o: object = ...) -> List[str]: ...
@overload
def divmod(__x: SupportsDivMod[_T_contra, _T_co], __y: _T_contra) -> _T_co: ...
@overload
def divmod(__x: _T_contra, __y: SupportsRDivMod[_T_contra, _T_co]) -> _T_co: ...
def eval(
__source: Union[str, bytes, CodeType], __globals: Optional[Dict[str, Any]] = ..., __locals: Optional[Mapping[str, Any]] = ...
) -> Any: ...
def exec(
__source: Union[str, bytes, CodeType],
__globals: Optional[Dict[str, Any]] = ...,
__locals: Optional[Mapping[str, Any]] = ...,
) -> Any: ...
def exit(code: object = ...) -> NoReturn: ...
@overload
def filter(__function: None, __iterable: Iterable[Optional[_T]]) -> Iterator[_T]: ...
@overload
def filter(__function: Callable[[_T], Any], __iterable: Iterable[_T]) -> Iterator[_T]: ...
def format(__value: object, __format_spec: str = ...) -> str: ... # TODO unicode
def getattr(__o: Any, name: str, __default: Any = ...) -> Any: ...
def globals() -> Dict[str, Any]: ...
def hasattr(__obj: Any, __name: str) -> bool: ...
def hash(__obj: object) -> int: ...
def help(*args: Any, **kwds: Any) -> None: ...
def hex(__number: Union[int, _SupportsIndex]) -> str: ...
def id(__obj: object) -> int: ...
def input(__prompt: Any = ...) -> str: ...
@overload
def iter(__iterable: Iterable[_T]) -> Iterator[_T]: ...
@overload
def iter(__function: Callable[[], Optional[_T]], __sentinel: None) -> Iterator[_T]: ...
@overload
def iter(__function: Callable[[], _T], __sentinel: Any) -> Iterator[_T]: ...
def isinstance(__obj: object, __class_or_tuple: Union[type, Tuple[Union[type, Tuple[Any, ...]], ...]]) -> bool: ...
def issubclass(__cls: type, __class_or_tuple: Union[type, Tuple[Union[type, Tuple[Any, ...]], ...]]) -> bool: ...
def len(__obj: Sized) -> int: ...
def license() -> None: ...
def locals() -> Dict[str, Any]: ...
@overload
def map(__func: Callable[[_T1], _S], __iter1: Iterable[_T1]) -> Iterator[_S]: ...
@overload
def map(__func: Callable[[_T1, _T2], _S], __iter1: Iterable[_T1], __iter2: Iterable[_T2]) -> Iterator[_S]: ...
@overload
def map(
__func: Callable[[_T1, _T2, _T3], _S], __iter1: Iterable[_T1], __iter2: Iterable[_T2], __iter3: Iterable[_T3]
) -> Iterator[_S]: ...
@overload
def map(
__func: Callable[[_T1, _T2, _T3, _T4], _S],
__iter1: Iterable[_T1],
__iter2: Iterable[_T2],
__iter3: Iterable[_T3],
__iter4: Iterable[_T4],
) -> Iterator[_S]: ...
@overload
def map(
__func: Callable[[_T1, _T2, _T3, _T4, _T5], _S],
__iter1: Iterable[_T1],
__iter2: Iterable[_T2],
__iter3: Iterable[_T3],
__iter4: Iterable[_T4],
__iter5: Iterable[_T5],
) -> Iterator[_S]: ...
@overload
def map(
__func: Callable[..., _S],
__iter1: Iterable[Any],
__iter2: Iterable[Any],
__iter3: Iterable[Any],
__iter4: Iterable[Any],
__iter5: Iterable[Any],
__iter6: Iterable[Any],
*iterables: Iterable[Any],
) -> Iterator[_S]: ...
@overload
def max(
__arg1: SupportsLessThanT, __arg2: SupportsLessThanT, *_args: SupportsLessThanT, key: None = ...
) -> SupportsLessThanT: ...
@overload
def max(__arg1: _T, __arg2: _T, *_args: _T, key: Callable[[_T], SupportsLessThanT]) -> _T: ...
@overload
def max(__iterable: Iterable[SupportsLessThanT], *, key: None = ...) -> SupportsLessThanT: ...
@overload
def max(__iterable: Iterable[_T], *, key: Callable[[_T], SupportsLessThanT]) -> _T: ...
@overload
def max(__iterable: Iterable[SupportsLessThanT], *, key: None = ..., default: _T) -> Union[SupportsLessThanT, _T]: ...
@overload
def max(__iterable: Iterable[_T1], *, key: Callable[[_T1], SupportsLessThanT], default: _T2) -> Union[_T1, _T2]: ...
@overload
def min(
__arg1: SupportsLessThanT, __arg2: SupportsLessThanT, *_args: SupportsLessThanT, key: None = ...
) -> SupportsLessThanT: ...
@overload
def min(__arg1: _T, __arg2: _T, *_args: _T, key: Callable[[_T], SupportsLessThanT]) -> _T: ...
@overload
def min(__iterable: Iterable[SupportsLessThanT], *, key: None = ...) -> SupportsLessThanT: ...
@overload
def min(__iterable: Iterable[_T], *, key: Callable[[_T], SupportsLessThanT]) -> _T: ...
@overload
def min(__iterable: Iterable[SupportsLessThanT], *, key: None = ..., default: _T) -> Union[SupportsLessThanT, _T]: ...
@overload
def min(__iterable: Iterable[_T1], *, key: Callable[[_T1], SupportsLessThanT], default: _T2) -> Union[_T1, _T2]: ...
@overload
def next(__i: Iterator[_T]) -> _T: ...
@overload
def next(__i: Iterator[_T], default: _VT) -> Union[_T, _VT]: ...
def oct(__number: Union[int, _SupportsIndex]) -> str: ...
_OpenFile = Union[AnyPath, int]
_Opener = Callable[[str, int], int]
# Text mode: always returns a TextIOWrapper
@overload
def open(
file: _OpenFile,
mode: OpenTextMode = ...,
buffering: int = ...,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
newline: Optional[str] = ...,
closefd: bool = ...,
opener: Optional[_Opener] = ...,
) -> TextIOWrapper: ...
# Unbuffered binary mode: returns a FileIO
@overload
def open(
file: _OpenFile,
mode: OpenBinaryMode,
buffering: Literal[0],
encoding: None = ...,
errors: None = ...,
newline: None = ...,
closefd: bool = ...,
opener: Optional[_Opener] = ...,
) -> FileIO: ...
# Buffering is on: return BufferedRandom, BufferedReader, or BufferedWriter
@overload
def open(
file: _OpenFile,
mode: OpenBinaryModeUpdating,
buffering: Literal[-1, 1] = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
closefd: bool = ...,
opener: Optional[_Opener] = ...,
) -> BufferedRandom: ...
@overload
def open(
file: _OpenFile,
mode: OpenBinaryModeWriting,
buffering: Literal[-1, 1] = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
closefd: bool = ...,
opener: Optional[_Opener] = ...,
) -> BufferedWriter: ...
@overload
def open(
file: _OpenFile,
mode: OpenBinaryModeReading,
buffering: Literal[-1, 1] = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
closefd: bool = ...,
opener: Optional[_Opener] = ...,
) -> BufferedReader: ...
# Buffering cannot be determined: fall back to BinaryIO
@overload
def open(
file: _OpenFile,
mode: OpenBinaryMode,
buffering: int,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
closefd: bool = ...,
opener: Optional[_Opener] = ...,
) -> BinaryIO: ...
# Fallback if mode is not specified
@overload
def open(
file: _OpenFile,
mode: str,
buffering: int = ...,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
newline: Optional[str] = ...,
closefd: bool = ...,
opener: Optional[_Opener] = ...,
) -> IO[Any]: ...
def ord(__c: Union[str, bytes]) -> int: ...
def print(
*values: object,
sep: Optional[str] = ...,
end: Optional[str] = ...,
file: Optional[SupportsWrite[str]] = ...,
flush: bool = ...,
) -> None: ...
_E = TypeVar("_E", contravariant=True)
_M = TypeVar("_M", contravariant=True)
class _SupportsPow2(Protocol[_E, _T_co]):
def __pow__(self, __other: _E) -> _T_co: ...
class _SupportsPow3(Protocol[_E, _M, _T_co]):
def __pow__(self, __other: _E, __modulo: _M) -> _T_co: ...
if sys.version_info >= (3, 8):
@overload
def pow(base: int, exp: int, mod: None = ...) -> Any: ... # returns int or float depending on whether exp is non-negative
@overload
def pow(base: int, exp: int, mod: int) -> int: ...
@overload
def pow(base: float, exp: float, mod: None = ...) -> float: ...
@overload
def pow(base: _SupportsPow2[_E, _T_co], exp: _E) -> _T_co: ...
@overload
def pow(base: _SupportsPow3[_E, _M, _T_co], exp: _E, mod: _M) -> _T_co: ...
else:
@overload
def pow(
__base: int, __exp: int, __mod: None = ...
) -> Any: ... # returns int or float depending on whether exp is non-negative
@overload
def pow(__base: int, __exp: int, __mod: int) -> int: ...
@overload
def pow(__base: float, __exp: float, __mod: None = ...) -> float: ...
@overload
def pow(__base: _SupportsPow2[_E, _T_co], __exp: _E) -> _T_co: ...
@overload
def pow(__base: _SupportsPow3[_E, _M, _T_co], __exp: _E, __mod: _M) -> _T_co: ...
def quit(code: object = ...) -> NoReturn: ...
@overload
def reversed(__sequence: Sequence[_T]) -> Iterator[_T]: ...
@overload
def reversed(__sequence: Reversible[_T]) -> Iterator[_T]: ...
def repr(__obj: object) -> str: ...
@overload
def round(number: SupportsRound[Any]) -> int: ...
@overload
def round(number: SupportsRound[Any], ndigits: None) -> int: ...
@overload
def round(number: SupportsRound[_T], ndigits: int) -> _T: ...
def setattr(__obj: Any, __name: str, __value: Any) -> None: ...
@overload
def sorted(__iterable: Iterable[SupportsLessThanT], *, key: None = ..., reverse: bool = ...) -> List[SupportsLessThanT]: ...
@overload
def sorted(__iterable: Iterable[_T], *, key: Callable[[_T], SupportsLessThan], reverse: bool = ...) -> List[_T]: ...
if sys.version_info >= (3, 8):
@overload
def sum(__iterable: Iterable[_T]) -> Union[_T, int]: ...
@overload
def sum(__iterable: Iterable[_T], start: _S) -> Union[_T, _S]: ...
else:
@overload
def sum(__iterable: Iterable[_T]) -> Union[_T, int]: ...
@overload
def sum(__iterable: Iterable[_T], __start: _S) -> Union[_T, _S]: ...
def vars(__object: Any = ...) -> Dict[str, Any]: ...
@overload
def zip(__iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]]: ...
@overload
def zip(__iter1: Iterable[_T1], __iter2: Iterable[_T2]) -> Iterator[Tuple[_T1, _T2]]: ...
@overload
def zip(__iter1: Iterable[_T1], __iter2: Iterable[_T2], __iter3: Iterable[_T3]) -> Iterator[Tuple[_T1, _T2, _T3]]: ...
@overload
def zip(
__iter1: Iterable[_T1], __iter2: Iterable[_T2], __iter3: Iterable[_T3], __iter4: Iterable[_T4]
) -> Iterator[Tuple[_T1, _T2, _T3, _T4]]: ...
@overload
def zip(
__iter1: Iterable[_T1], __iter2: Iterable[_T2], __iter3: Iterable[_T3], __iter4: Iterable[_T4], __iter5: Iterable[_T5]
) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5]]: ...
@overload
def zip(
__iter1: Iterable[Any],
__iter2: Iterable[Any],
__iter3: Iterable[Any],
__iter4: Iterable[Any],
__iter5: Iterable[Any],
__iter6: Iterable[Any],
*iterables: Iterable[Any],
) -> Iterator[Tuple[Any, ...]]: ...
def __import__(
name: str,
globals: Optional[Mapping[str, Any]] = ...,
locals: Optional[Mapping[str, Any]] = ...,
fromlist: Sequence[str] = ...,
level: int = ...,
) -> Any: ...
# Actually the type of Ellipsis is <type 'ellipsis'>, but since it's
# not exposed anywhere under that name, we make it private here.
class ellipsis: ...
Ellipsis: ellipsis
class BaseException(object):
args: Tuple[Any, ...]
__cause__: Optional[BaseException]
__context__: Optional[BaseException]
__suppress_context__: bool
__traceback__: Optional[TracebackType]
def __init__(self, *args: object) -> None: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def with_traceback(self: _TBE, tb: Optional[TracebackType]) -> _TBE: ...
class GeneratorExit(BaseException): ...
class KeyboardInterrupt(BaseException): ...
class SystemExit(BaseException):
code: int
class Exception(BaseException): ...
class StopIteration(Exception):
value: Any
_StandardError = Exception
class OSError(Exception):
errno: int
strerror: str
# filename, filename2 are actually Union[str, bytes, None]
filename: Any
filename2: Any
if sys.platform == "win32":
winerror: int
EnvironmentError = OSError
IOError = OSError
if sys.platform == "win32":
WindowsError = OSError
class ArithmeticError(_StandardError): ...
class AssertionError(_StandardError): ...
class AttributeError(_StandardError): ...
class BufferError(_StandardError): ...
class EOFError(_StandardError): ...
class ImportError(_StandardError):
def __init__(self, *args: object, name: Optional[str] = ..., path: Optional[str] = ...) -> None: ...
name: Optional[str]
path: Optional[str]
msg: str # undocumented
class LookupError(_StandardError): ...
class MemoryError(_StandardError): ...
class NameError(_StandardError): ...
class ReferenceError(_StandardError): ...
class RuntimeError(_StandardError): ...
class StopAsyncIteration(Exception):
value: Any
class SyntaxError(_StandardError):
msg: str
lineno: Optional[int]
offset: Optional[int]
text: Optional[str]
filename: Optional[str]
class SystemError(_StandardError): ...
class TypeError(_StandardError): ...
class ValueError(_StandardError): ...
class FloatingPointError(ArithmeticError): ...
class OverflowError(ArithmeticError): ...
class ZeroDivisionError(ArithmeticError): ...
class ModuleNotFoundError(ImportError): ...
class IndexError(LookupError): ...
class KeyError(LookupError): ...
class UnboundLocalError(NameError): ...
class BlockingIOError(OSError):
characters_written: int
class ChildProcessError(OSError): ...
class ConnectionError(OSError): ...
class BrokenPipeError(ConnectionError): ...
class ConnectionAbortedError(ConnectionError): ...
class ConnectionRefusedError(ConnectionError): ...
class ConnectionResetError(ConnectionError): ...
class FileExistsError(OSError): ...
class FileNotFoundError(OSError): ...
class InterruptedError(OSError): ...
class IsADirectoryError(OSError): ...
class NotADirectoryError(OSError): ...
class PermissionError(OSError): ...
class ProcessLookupError(OSError): ...
class TimeoutError(OSError): ...
class NotImplementedError(RuntimeError): ...
class RecursionError(RuntimeError): ...
class IndentationError(SyntaxError): ...
class TabError(IndentationError): ...
class UnicodeError(ValueError): ...
class UnicodeDecodeError(UnicodeError):
encoding: str
object: bytes
start: int
end: int
reason: str
def __init__(self, __encoding: str, __object: bytes, __start: int, __end: int, __reason: str) -> None: ...
class UnicodeEncodeError(UnicodeError):
encoding: str
object: str
start: int
end: int
reason: str
def __init__(self, __encoding: str, __object: str, __start: int, __end: int, __reason: str) -> None: ...
class UnicodeTranslateError(UnicodeError): ...
class Warning(Exception): ...
class UserWarning(Warning): ...
class DeprecationWarning(Warning): ...
class SyntaxWarning(Warning): ...
class RuntimeWarning(Warning): ...
class FutureWarning(Warning): ...
class PendingDeprecationWarning(Warning): ...
class ImportWarning(Warning): ...
class UnicodeWarning(Warning): ...
class BytesWarning(Warning): ...
class ResourceWarning(Warning): ...
|
[
"erictr@microsoft.com"
] |
erictr@microsoft.com
|
f532fa34ab974fc95dff6d166910eebb381c45a5
|
68a155e62686542dc8e7e8069382085c88612280
|
/application/routes.py
|
eab784019f5a8d46370a9c487ff0b720756d01de
|
[] |
no_license
|
MohamedEmad1998/Movie-Recommendation-AI
|
2f4de729452734e221625effa8809a07256cb8d8
|
f6384117c6be758e4043ec6cb39d679bef627b7e
|
refs/heads/main
| 2023-02-19T11:40:48.749679
| 2021-01-20T21:04:09
| 2021-01-20T21:04:09
| 331,429,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
from application import app
import bot
from flask import render_template ,request
@app.route('/')
@app.route('/home')
@app.route('/index')
def hello_world():
return render_template("index2.html")
@app.route('/get')
def form_post():
message = request.args.get('msg')
response = bot.chat(message)
return str(response)
# if request.method == 'POST' :
# message = request.form['usermsg']
# update_html.update(message,'user')
# render_template('index2.html')
# reply=bot.chat(message)
# update_html.update(reply,'bot')
# return render_template('index2.html')
|
[
"noreply@github.com"
] |
MohamedEmad1998.noreply@github.com
|
8aa9836ef60eecec8665ea91de8d724b9abc0328
|
f1c3614d6ef3874e816a2616ea0ae83704b052da
|
/tests/selenium_page.py
|
58331945adeee298ace5dec647cb39d512dfa78d
|
[] |
no_license
|
margaritaumaniuc/presto
|
a46ed88c433a68c762a28e80e413bb1b61d46705
|
f95e85c7abb6da3919ed4fbfc96827f4bba473e2
|
refs/heads/master
| 2020-08-04T20:55:34.755285
| 2019-10-11T06:06:22
| 2019-10-11T06:06:22
| 212,275,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,591
|
py
|
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import StaleElementReferenceException, ElementNotVisibleException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from tests.settings import WDW_TIME
class BasePage(object):
def __init__(self, driver):
self.driver = driver
def find_by_locator(self, locator, wait=WDW_TIME):
while self.driver.execute_script('return document.readyState') == 'complete':
return WebDriverWait(self.driver, wait, ignored_exceptions=[StaleElementReferenceException]). \
until(EC.presence_of_element_located(locator))
def find_by_locator_and_clickability(self, locator, wait=WDW_TIME):
while self.driver.execute_script('return document.readyState') == 'complete':
return WebDriverWait(self.driver, wait, ignored_exceptions=[StaleElementReferenceException]). \
until(EC.element_to_be_clickable(locator))
def find_all_elements(self, locator, wait=WDW_TIME):
items = WebDriverWait(self.driver, wait,
ignored_exceptions=[StaleElementReferenceException, ElementNotVisibleException]). \
until(EC.presence_of_all_elements_located(locator))
return items
def fill_element(self, locator, value):
element = self.find_by_locator(locator)
element.send_keys(value)
def press_enter(self, locator):
element = self.find_by_locator(locator)
element.send_keys(Keys.ENTER)
|
[
"margarita.u@sparkcentral.com"
] |
margarita.u@sparkcentral.com
|
97fb86a6fc77d10f0b1704b970c7eb502694ccf4
|
693d005c1d1b1dc39d970bb9683282a5eede389f
|
/fourth_project/manage.py
|
9e85fa14b8338c2a7e0f3e9e88dc04cd538a69e3
|
[] |
no_license
|
FranklinMonro/UDEMY---Full-Stack-Django
|
db3d3646e973dec7d74dbb1b50c54d68c7f1a366
|
21ac64652342b9d7e97c34b7d4b8d10247431317
|
refs/heads/master
| 2020-04-19T03:59:26.269493
| 2019-05-14T15:06:41
| 2019-05-14T15:06:41
| 167,949,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fourth_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"noreply@github.com"
] |
FranklinMonro.noreply@github.com
|
3b9fd7f7cb708040b5bbad3d59b9b3d09d01e4bd
|
e960a8d11fd8bf86742a82d879ec765e0c801bf6
|
/GeneratePredictionT.py
|
84b77119a8dc9062b6a76af69cea3b825ad2a7a3
|
[] |
no_license
|
limcrong/trafficmanagement
|
91801a2bce86135325f031fff51ae575255e1118
|
c76303020300df265a0c0a05dd1d0c7e679d3a31
|
refs/heads/master
| 2020-06-05T01:30:53.748716
| 2019-06-19T02:40:57
| 2019-06-19T02:40:57
| 192,266,519
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,638
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import xgboost as xgb
from xgboost import XGBClassifier
from xgboost import XGBRegressor
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
from sklearn.metrics import mean_squared_error
from math import sqrt
import pickle
import ConvertScript
# In[4]:
# load xgb models
model1 = pickle.load(open("model/final1.model.dat", "rb"))
model2 = pickle.load(open("model/final2.model.dat", "rb"))
model3 = pickle.load(open("model/final3.model.dat", "rb"))
model4 = pickle.load(open("model/final4.model.dat", "rb"))
model5 = pickle.load(open("model/final5.model.dat", "rb"))
model6 = pickle.load(open("model/final6.model.dat", "rb"))
#load scaler
scaler = joblib.load('scaler.pkl')
# In[5]:
def increaseT(d,hr,m):
if(hr > 22.5) & (m > 35):
return d+1,0,0
if(m>35):
return d,hr+1,0
return d,hr,m+15
def getLastT(df):
lastday = df.iloc[-1,:]['day']
lasthr = df.iloc[-1,:]['hour']
lastmin = df.iloc[-1,:]['minute']
print("Last time stamp is: {} day {} hour {} min".format(lastday,lasthr,lastmin))
return (lastday,lasthr,lastmin)
def findAndReturnNextT(df):
d,hr,m = getLastT(df)
return increaseT(d,hr,m)
def applyScaling(dfx):
dff = dfx.copy(deep=True)
dff.drop('geohash6',axis=1,inplace=True)
dff = dff.astype(np.float32)
dff = dff.fillna(0)
scaledx = scaler.transform(dff)
print(scaledx.shape)
return scaledx
col2 = ['day', 'long', 'lat', 'min', 'max', 'zone',
'dist_to_high_demand5', 'dist_to_7', 'hour', 'minute', 'demand_s',
'mean', 'ma7', 'ma14', 'ma21', 'ma50', 'ma100', 'std', 'zoneAverage',
'geoEma7', 'geoEma14', 'zoneEma14', 'dayOfWeek', 'peak', 'totalDist',
'sin_hour', 'cos_hour', 'demand_s_2', 'demand_s_3', 'demand_s_4',
'demand_s_5', 'demand_s_6', 'demand_s_7', 'geoEma7_2', 'x', 'y', 'z',
'geo4ZoneEma7', 'geo5ZoneEma7', 'high_demand_perc', 'geoEma7_var',
'ma100_med', 'demand_last_week', 'demand']
# In[6]:
def generatePred(df):
#Create next timestep T
dfnextT = pd.DataFrame()
static = pd.read_hdf('staticValues.h5')
d,hr,m = findAndReturnNextT(df)
print("Next time stamp is: {} day {} hour {} min".format(d,hr,m))
dfnextT['geohash6'] = static['geohash6']
dfnextT['day'] = d
dfnextT['hour'] = hr
dfnextT['minute'] = m
dfn = pd.concat([df,dfnextT])
dfn= dfn[df.columns]
print("Created next timestep..")
#Generate Features
print("Running feature generation script..")
dfcon = ConvertScript.convertDf(dfn)
lastday,lasthr,lastmin = getLastT(dfcon)
dfcon = dfcon.loc[(dfcon.day == lastday)&(dfcon.hour == lasthr)&(dfcon.minute == lastmin)]
print("Generated features..")
#Scale features
scaled = applyScaling(dfcon)
x_test = scaled[:, :-1]
print("Scaled features..")
# Predict demand
y_pred = (model1.predict(x_test) + model2.predict(x_test)+model3.predict(x_test)+
model4.predict(x_test) + model5.predict(x_test) + model6.predict(x_test))/6
print("Predicted demand..")
print("Reconstructed original..")
#Construct original
withPred = np.concatenate([x_test,y_pred.reshape(y_pred.shape[0], 1)],axis=1)
newDf = pd.DataFrame(scaler.inverse_transform(withPred))
newDf.columns = col2
df_static = static[['geohash6','lat','long']]
df_merge = pd.merge(newDf,df_static, how='left', left_on=['lat','long'],right_on = ['lat','long'])
df_merge = df_merge[df.columns]
df_merge.head()
return df_merge
|
[
"rongronggg@gmail.com"
] |
rongronggg@gmail.com
|
4e7b95fde55ee2c89e339d086f06d4dbd57c294b
|
db68071bbdb5cb2cd7713b89c96d554e00470ab2
|
/MotionSensor.py
|
132223c16c76f3075f3216ec076cda53a07bad04
|
[] |
no_license
|
majorpeter/puha-manager
|
a265c9d14d845d0efab27c664420fffc3147054b
|
fd51b560a7e1dad99204ae3c9c7369d17cbcfd32
|
refs/heads/master
| 2020-03-19T04:35:44.466718
| 2018-12-21T20:24:26
| 2018-12-21T20:24:26
| 135,844,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
import logging
from datetime import datetime
class MotionSensor:
def __init__(self, node):
self.node = node
self.node.PulseCount.subscribe_to_changes(self.on_pulse_count_changed)
self.last_movement_time = datetime.now() # let's assume there is movement on startup to avoid checks later
def on_pulse_count_changed(self, name, value):
logging.debug('movement detected! (%s)' % value)
self.last_movement_time = datetime.now()
def get_time_since_last_movement(self):
return datetime.now() - self.last_movement_time
|
[
"majorpeter29@gmail.com"
] |
majorpeter29@gmail.com
|
2b3dde4546e2cdd37640fda3bce835cc0e437bce
|
aa39ef39ffe34dcf90c5b7246b118aa17fb382ff
|
/gcaptcha/rest/views.py
|
a6fca4bf34e9df24e25a709f5f34b54ab85d7e58
|
[] |
no_license
|
toantt28/django-bypass-gcaptcha
|
635dcd3ddfbd5fc7b0d0b4d696e4541550970bf2
|
78ae7ad7af1fd866995f0c3988b70e359f0953b2
|
refs/heads/master
| 2022-12-28T11:14:45.468781
| 2020-10-14T07:12:38
| 2020-10-14T07:12:38
| 303,744,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,373
|
py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse
import json
#system libraries
import os
import random
import time
#selenium libraries
from seleniumwire import webdriver
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
#recaptcha libraries
import speech_recognition as sr
import urllib
import pydub
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.views import APIView
def delay():
print("[INFO] delay")
time.sleep(random.randint(3, 5))
# Create your views here.
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
class Test(APIView):
def get(self, request, *args, **kwargs):
# create chrome driver
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1920x1080')
options.add_argument("disable-gpu")
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)
delay()
driver.get(
"https://www.google.com/search?q=hello&oq=hello&aqs=chrome..69i57j69i59j69i60.821j0j1&sourceid=chrome&ie=UTF-8"
)
# go to website
driver.get("https://www.gstatic.com/cloud-site-ux/vision/vision.min.html")
delay()
shadow_section = driver.execute_script('''return document.querySelector("vs-app").shadowRoot''')
element = shadow_section.find_element_by_tag_name('input')
driver.execute_script("document.getElementById('input').removeAttribute('hidden')")
randNum = random.randint(1, 100)
randImg = '{}.png'.format(randNum)
element.send_keys(
os.path.join(
os.getcwd(),
'gcaptcha',
'rest',
'images',
randImg
)
)
delay()
# switch to recaptcha frame
frames = driver.find_elements_by_tag_name("iframe")
driver.switch_to.frame(frames[0])
delay()
# click on checkbox to activate recaptcha
driver.find_element_by_class_name("recaptcha-checkbox-border").click()
# switch to recaptcha audio control frame
driver.switch_to.default_content()
frames = driver.find_elements_by_tag_name("iframe")
driver.switch_to.frame(frames[len(frames) - 1])
delay()
# click on audio challenge
driver.find_element_by_id("recaptcha-audio-button").click()
# switch to recaptcha audio challenge frame
driver.switch_to.default_content()
frames = driver.find_elements_by_tag_name("iframe")
driver.switch_to.frame(frames[-1])
delay()
flag = True
while flag:
try:
# click on the play button
button_div = driver.find_element_by_class_name('rc-audiochallenge-play-button')
button = button_div.find_element_by_class_name('rc-button-default')
button.click()
delay()
# get the mp3 audio file
src = driver.find_element_by_id("audio-source").get_attribute("src")
print("[INFO] Audio src: %s" % src)
# download the mp3 audio file from the source
urllib.request.urlretrieve(src, os.getcwd() + "\\sample.mp3")
sound = pydub.AudioSegment.from_mp3(os.getcwd() + "\\sample.mp3")
sound.export(os.getcwd() + "\\sample.wav", format="wav")
sample_audio = sr.AudioFile(os.getcwd() + "\\sample.wav")
r = sr.Recognizer()
with sample_audio as source:
audio = r.record(source)
# translate audio to text with google voice recognition
key = r.recognize_google(audio)
print("[INFO] Recaptcha Passcode: %s" % key)
time.sleep(1)
# key in results and submit
driver.find_element_by_id("audio-response").send_keys(key.lower())
time.sleep(2)
driver.find_element_by_id("audio-response").send_keys(Keys.ENTER)
delay()
except Exception as e:
# pass
print('[Exception]', e)
driver.save_screenshot(os.path.join(
os.getcwd(),
'gcaptcha',
'rest',
'screenshots',
'error.png'
))
flag = False
driver.switch_to.default_content()
delay()
# HERE IS success image
token = "Google mark as spam. Please try again later."
for request in driver.requests:
if 'https://cxl-services.appspot.com/proxy' in request.url:
key = 'token='
queryString = request.querystring
index = queryString.index(key)
token = queryString[index + len(key): len(queryString)]
print(token)
driver.close()
return Response({
"token": token
})
|
[
"toan.tt@neo-lab.vn"
] |
toan.tt@neo-lab.vn
|
4619d834c5879ba0fbbdd46908424b4e26a6a8fa
|
4af4f4f40e95193cf2fed870724e39263d17d22c
|
/compined_testing.py
|
6f80f28f5133e88c74309642716c3e67f854cb31
|
[] |
no_license
|
basharE/pythonFirstProject
|
1a5a138fda0230f4ef40a21c905bc23fbb9c5e7a
|
d9ec307725449096bf5feb1e507506003fbae5c2
|
refs/heads/master
| 2023-04-06T12:14:36.980051
| 2021-03-28T17:57:49
| 2021-03-28T17:57:49
| 338,761,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
import requests
from db_connector import get_user
from selenium import webdriver
user_id = 1001
requests.post('http://localhost:5000/users/' + str(user_id), json={"user_name": "user name " + str(user_id)})
res = requests.get('http://localhost:5000/users/' + str(user_id))
assert res
assert get_user(str(user_id))[0][0] == "user name " + str(user_id)
driver = webdriver.Chrome(executable_path="/Users/basharegbariya/Downloads/chromedriver")
driver.get("http://127.0.0.1:5001/users/get_user_data/"+str(user_id))
assert driver.find_element_by_id("user").text == 'user name ' + str(user_id)
driver.quit()
|
[
"bashar.egbariya@peer39.com"
] |
bashar.egbariya@peer39.com
|
b865443f9f33aa61a37c0e20be96d1d6fc77a124
|
3b062b3dc545e11513da709ef3f525a6d64e37e0
|
/p2.py
|
d7b462a5f6f64fcf947598373efbda37d0f221c3
|
[] |
no_license
|
saraattia412/python_projects
|
75932a8badb77aa06b7fb008594fcacdd6a6b276
|
d2b3f1ec4f7e47a6bc8dd00b5bf8c7d01f5f8ec2
|
refs/heads/master
| 2023-08-28T04:01:02.293203
| 2021-09-25T18:30:10
| 2021-09-25T18:30:10
| 410,351,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#sum and average any number
print('welcome in sum_average_game ^-^')
count=int(input('how many number :'))
current_count=0
sum=0
while current_count<count :
number=float(input('enter number:'))
sum += number
current_count += 1
print('sum=',sum)
print('average=',sum/count)
|
[
"saraattia412@gmail.com"
] |
saraattia412@gmail.com
|
9c162425d5991391ecff90fd0305e2d82e9d7f23
|
1e8177c6fcb5f5035de5c463f4e07ba645f5ae49
|
/csv2xl.py
|
6a3be9daaabc2bfe0cf3c0a8670ca2f29678a951
|
[] |
no_license
|
sravanre/python
|
1a97669da262d3067fce6f7bc67141ba976f176c
|
248e3612be2890313be68886d02ff8a39e6c423d
|
refs/heads/master
| 2021-07-03T18:11:01.094397
| 2020-09-18T10:04:28
| 2020-09-18T10:04:28
| 172,246,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
import csv
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
#ws = wb['Sheet1']
f = open("marks.csv","r")
for lines in csv.reader(f):
ws.append(lines)
f.close()
wb.save('marks1.xlsx')
wb.close()
##r=1
##for lines in csv.reader(f):
## for c,val in enumerate(lines):
## ws.cell(row=r,column=c+1, value=val)
## r+=1
|
[
"noreply@github.com"
] |
sravanre.noreply@github.com
|
b4b2aa8f7d0110d5a1ee9b8e0de04c1e02146c12
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_zoologists.py
|
d5eaad0aa4529df66ccc13452502429859ae6960
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _ZOOLOGISTS():
def __init__(self,):
self.name = "ZOOLOGISTS"
self.definitions = zoologist
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['zoologist']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
e56b25cff63a25e70c4cdee0e6b01b2051686c7b
|
d6977cd01645d5259c4e33d8885ae279695d59fa
|
/django_tutorials/Django1.8Pro/mypro/mypro/settings.py
|
105f0902e48e1cf21ce668b3693c4d6029131ac0
|
[] |
no_license
|
Jadatravu/Tutorials
|
092cbea55a4906e5968037915caa29698270397f
|
b07262c46dd997ee43260ea006a7010644f95650
|
refs/heads/master
| 2021-01-17T13:11:21.119328
| 2019-11-01T08:49:53
| 2019-11-01T08:49:53
| 14,251,990
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,477
|
py
|
"""
Django settings for mypro project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*+kw(pk_khm##%c#bxck0x6vzf7#2b%rdcs2mmkgndp6_+9#6i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
'mapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mypro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['/home/ubuntu/developer/18Jul16/mypro/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mypro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR,'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT ='/home/ubuntu/developer/18Jul16/mypro/staticfiles/dfiles'
STATICFILES_DIRS= (
('/home/ubuntu/developer/18Jul16/mypro/staticfiles/sfiles'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters':{
'simple':{
'format':'%(levelname)s %(message)s'
}
},
'handlers': {
'file':{
'level':'DEBUG',
'class':'logging.FileHandler',
'filename':'/home/ubuntu/developer/18Jul16/mypro/logapp.log',
'formatter':'simple'
},
},
'loggers': {
'myapp.views':{
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
|
[
"ubuntu@ip-172-31-35-183.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-35-183.us-west-2.compute.internal
|
6559913c8662fc497e27f7cebfb1cbc71854dcb1
|
4d83dbfc94ba1b0f098f990a1790df36fda66628
|
/Practice/pytorch_tut/pytorch_BatchTraining.py
|
bef2d35ff85610df1e529dc104d6cbd9b33a09c6
|
[] |
no_license
|
CTM-development/ML4B
|
31dfff27796ebfbeefe40f5eb687eb94d5c88acf
|
38a9de7a63bfd4fe77d4bb6af3e228f6dcfbe7f7
|
refs/heads/main
| 2023-05-08T09:42:17.604282
| 2021-06-01T18:12:01
| 2021-06-01T18:12:01
| 371,355,130
| 0
| 0
| null | 2021-05-31T17:45:58
| 2021-05-27T11:49:51
| null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
'''
epoch = 1 forward and backward pass of ALL training samples
batch_size = number of training samples in one forward& backward pass
number of iterations = number of passes, each pass using [batch_size] number of samples
e.g. 100 [samples, batch_size=20 --> 100/20 = 5 iterations for 1 epoch
'''
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
import numpy as np
import math
class WineDataset(Dataset):
def __init__(self):
# data loading
xy = np.loadtxt('../../data/wine.csv', delimiter=",", dtype=np.float32, skiprows=1)
self.x = torch.from_numpy(xy[:, 1:])
self.y = torch.from_numpy(xy[:, [0]])
self.n_samples = xy.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
# len(dataset)
return self.n_samples
dataset = WineDataset()
dataloader = DataLoader(dataset=dataset, batch_size=4, shuffle=True, num_workers=2)
# dataiter = iter(dataloader)
# data = dataiter.next()
# features, labels = data
# print(features, labels)
# training loop
num_epochs = 2
total_samples = len(dataset)
n_iterations = math.ceil(total_samples/4)
print(total_samples, n_iterations)
for epoch in range(num_epochs):
for i, (inputs, labels) in enumerate(dataloader):
#forward backward, update
if (i+1) % 5 == 0:
print(f'epoch {epoch+1}/{num_epochs}, step {i+1}/{n_iterations}, inputs {inputs.shape}')
|
[
"christopher@mminderlein.de"
] |
christopher@mminderlein.de
|
41b90ed3b74875084c2c3186f9d9abf696281e63
|
85eb042f8370100b3bccbfc247746df3ddbbefa3
|
/data_control.py
|
6fe1a1e255788a0b67a010832e599db14f9b8962
|
[
"MIT"
] |
permissive
|
mic100/seloger.com
|
eda05f563bd6a8576f540bf4e393cae3ccba6ad1
|
c713c09783e74846b1ac73ea9cc6cfcddf0cc331
|
refs/heads/master
| 2021-01-10T20:47:22.297843
| 2015-10-30T21:01:05
| 2015-10-30T21:01:05
| 27,082,632
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,165
|
py
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------#
#!!!! #
# #
#please note the code is not up to date. It may not work actually due to #
#change on the web site host from where data is extracted. #
# #
#!!!! #
#-----------------------------------------------------------------------------#
from tool_kit import path_log, create_path, dispatch, curl, oldnew_ipadress
from bs4 import BeautifulSoup
import time
import pandas as pd
#-----------------------------------------------------------------------------#
# get urls from real estate announces #
#-----------------------------------------------------------------------------#
def function_2(path_log, file_name="backup_file1.txt") :
create_path()
url = ["http://www.seloger.com/immobilier/tout/immo-paris-%seme-75/" % (str(i)) for i in range(1,21)]
url_liste = dispatch(path_log + "dispatch1.txt", url)
backup_file2 = open(path_log + "backup_file2.txt", "w")
for url in url_liste :
pool = curl(url)
# oldnew_ipadress(path_log)
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.findAll('div', {'class' : 'content_infos othergroupsite'})
s1 = s1[0].findAll('li')
print "len(s1) : ", len(s1)
print "\n"
som_nbr_annonce = 0
som_list = []
for i in range(len(s1)) :
url = s1[i].findAll('a')[0]['href']
len_url = len(url.split("/"))
len_departement = len(url.split("/")[len_url-4].split("-"))
departement = url.split("/")[len_url-4].split("-")[len_departement-1]
type_bien1 = url.split("/")[len_url-3].replace("bien-", "")
nbr_annonce = s1[i].findAll('b')[0].string
if nbr_annonce != None :
pass
else :
nbr_annonce = 0
som_nbr_annonce = float(som_nbr_annonce) + float(nbr_annonce)
som_list.append(float(som_nbr_annonce))
nbr_piece = s1[i].findAll('a')[0]['title'].replace("Immobilier ", "").replace(type_bien1, "").strip().split(" ")[2]
if nbr_piece == "studio" :
nbr_piece = '1'
else :
pass
type_transaction = s1[i].findAll('a')[0]['title'].replace("Immobilier ", "").replace(type_bien1, "").strip().split(" ")[0]
print i, str(som_nbr_annonce), departement, str(nbr_annonce), type_transaction, type_bien1, nbr_piece, url
backup_file2.write(departement + ";" + str(nbr_annonce)+ ";" + type_transaction + ";" + type_bien1 + ";" + nbr_piece + ";" + url + ";")
backup_file2.write("\n")
backup_file2.close()
print "\n"
#-----------------------------------------------------------------------------#
# Get number of page and urls to get through #
#-----------------------------------------------------------------------------#
def function_3(path_log) :
backup_file = open(path_log + "backup_file2.txt", "r").readlines()
print "len(backup_file) : ", len(backup_file)
print "\n"
urls_parcours = open(path_log + "urls_parcours.txt", "w")
urls_list = []
for i in range(len(backup_file)) :
url = backup_file[i].split(";")[5]
nbr = float(backup_file[i].split(";")[1])
nbr_page_init = nbr/10
partie_entiere = int(str(nbr_page_init).split(".")[0])
apres_dec = int(str(nbr_page_init).split(".")[1])
if apres_dec == 0 :
nbr_page = partie_entiere
elif apres_dec > 0 :
nbr_page = partie_entiere + 1
else :
print "Probleme nbr_page"
print "nbr : ", nbr
print "url : ", url
print nbr, nbr_page_init, "nous donne :", nbr_page, "page(s)", "\n"
if nbr_page == 1 or nbr_page == 0 :
if nbr_page == 0 :
print "Attention prise en charge du cas '0' page releve : ", "\n"
else :
b = url
urls_list.append(b)
urls_parcours.write(b + ";" + "\n")
print b
elif nbr_page == 2 :
b = url
c = url + "?ANNONCEpg=2"
urls_list.append(b)
urls_list.append(c)
urls_parcours.write(b + ";" + "\n")
urls_parcours.write(c + ";" + "\n")
print c
print b
elif nbr_page > 2 :
for j in range(2, nbr_page) :
b = url + "?ANNONCEpg=%s" %(str(j))
urls_list.append(b)
urls_parcours.write(b + ";" + "\n")
print b
else :
print "Problem nbr_page re construction"
print "len(urls_list) : ", len(urls_list)
#-----------------------------------------------------------------------------#
# get urls from real estate announces for each link #
#-----------------------------------------------------------------------------#
def function_4(path_log, file_name="urls_parcours.txt") :
# d = str(time.strftime('%d-%m-%y_%Hh%Mmin%Ssec',time.localtime()))
d2 = str(time.strftime('%d/%m/%y %H:%M:%S',time.localtime()))
d3 = str(time.strftime('%d-%m-%y',time.localtime()))
backup_file1 = open(path_log + file_name, "r").readlines()
url = []
for i in range(len(backup_file1)) :
a = backup_file1[i].split(";")[0].strip()
url.append(a)
url_liste = dispatch(path_log + "dispatch1.txt", url)
url_done = open(path_log + "url_done.txt", "w")
path_logout = "log/"
compteur = 0
for url in url_liste :
compteur += 1
print compteur, "/", len(url_liste)
for i in range(len(url)) :
url_done.write(url[i] + "\n")
pool = curl(url)
# oldnew_ipadress(path_log)
compteur1 = 0
for c in pool :
compteur1 += 1
print compteur1, "/", len(pool)
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
d = str(time.strftime('%d-%m-%y_%Hh%Mmin%Ssec',time.localtime()))
l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17 = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
dico = {'TYPE_TRANSACTION' : l0, 'NOMBRE_PHOTOS' : l1 ,
'NOMBRE_PIECE' : l2, 'NOMBRE_M2' : l3, 'ETAGE' : l4,
'BALCON' : l5, 'CUISINE' : l6, 'AUTRE' : l7,
'CHAMBRE(S)' : l8, 'MEUBLE' : l9, 'TYPE_CHAUFFAGE' : l10,
'LOCALISATION' : l11, 'PROXIMITE' : l12, 'PRIX' : l13,
'CHARGE' : l14, 'NOM_AGENCE' : l15, 'URL' : l16,
'EXTRACTION_DATE' : l17}
#-----------------------------------------------------------------#
#HERE LOOKING FOR WORDS LOCATIONS / VENTES / INVESTISSEMENT / VIAGER :
s0 = soup1.findAll('div', {'class' : 'main'})
for i in range(len(s0)) :
if s0[i].findAll('span', {'class' : 'title_recherche'}) == [] :
transaction_type = "NA"
else :
transaction_type = s0[i].findAll('span', {'class' : 'title_recherche'})
transaction_type = transaction_type[0].text
if "locations" in transaction_type :
transaction_type = "LOCATION"
elif "ventes" in transaction_type :
transaction_type = "ACHAT"
elif "investissement" in transaction_type :
transaction_type = "INVESTISSEMENT"
elif "viager" in transaction_type :
transaction_type = "VIAGER"
else :
pass
#-----------------------------------------------------------------#
#We are looking for the photo number in html page then add var TRANSACTION_TYPE
s1 = soup1.findAll('div', {'class' : 'annonce__visuel__pictogrammes'})
for i in range(len(s1)) :
if s1[i].findAll('a', {'class' : 'annonce__visuel__picto picto__photo'}) == [] :
nbr_photo = 0
else :
nbr_photo = s1[i].findAll('a', {'class' : 'annonce__visuel__picto picto__photo'})
nbr_photo = nbr_photo[0]['title']
nbr_photo = nbr_photo.replace(" photos", "")
nbr_photo = int(nbr_photo)
l1.append(nbr_photo)
l0.append(transaction_type)
#-----------------------------------------------------------------#
s2 = soup1.findAll('div', {'class' : 'annonce__detail'})
for i in range(len(s2)) :
details1 = s2[i].findAll('span', {'class' : 'annone__detail__param'})[0].text
details1 = details1.replace("\xe8", "e")
details1 = details1.replace("m\xb2", "m2")
details1 = details1.replace("\xe9", "e")
details1 = details1.split(",")
nbr_piece = "NA"
nbr_m2 = "NA"
etage = "NA"
balcon = "NA"
cuisine = "NA"
autre = "NA"
chambre = "NA"
meuble = "NA"
chauffage = "NA"
for j in details1 :
if "Piece" in j :
if nbr_piece == "NA" :
nbr_piece = j.replace(" Piece", "").replace("s", "").strip()
else :
pass
if "m2" in j :
if nbr_m2 == "NA" :
nbr_m2 = j.replace(" m2", "").strip()
else :
pass
if "Etage" in j :
if etage == "NA" :
etage = j.replace(" Etage", "").strip()
else :
pass
if "Balcon" in j :
if balcon == "NA" :
balcon = j.replace(" Balcon", "").strip()
balcon = j.replace("s", "").strip()
else :
pass
if "cuisine" in j :
if cuisine == "NA" :
cuisine = j.replace(" cuisine", "").strip()
else :
pass
if "Chambre" in j :
if chambre == "NA" :
chambre = j.replace(" Chambre", "")
chambre = chambre.replace("s", "").strip()
else :
pass
if "Meuble" in j :
if meuble == "NA" :
meuble = "YES"
else :
pass
if "chauffage" in j :
if chauffage == "NA" :
chauffage = j.replace("chauffage ", "")
chauffage = j.replace(" radiateur", "")
else :
pass
if "Piece" not in j and "m2" not in j and "Etage" not in j \
and "Balcon" not in j and "cuisine" not in j and "Chambre" not in j \
and "Meuble" not in j and "chauffage" not in j :
autre = j.strip()
else :
pass
l2.append(nbr_piece)
l3.append(nbr_m2)
l4.append(etage)
l5.append(balcon)
l6.append(cuisine)
l7.append(autre)
l8.append(chambre)
l9.append(meuble)
l10.append(chauffage)
#-----------------------------------------------------------------#
#LOCATION :
s3 = soup1.findAll('span', {'class' : 'annone__detail__localisation'})
for i in range(len(s3)) :
details2 = s3[i].findAll('span', {'class' : 'annone__detail__param'})[0].text
details2 = details2.replace(" (Paris)", "")
details2 = details2.replace(" ()", "")
l11.append(details2)
#-----------------------------------------------------------------#
#NEAR LOCATION :
s4 = soup1.findAll('div', {'class' : 'annonce__detail'})
for i in range(len(s4)) :
details3 = s4[i].findAll('span', {'class' : 'annone__detail__proximite'})
if details3 != [] :
details3 = details3[0].text
details3 = details3.replace("É", "E")
details3 = details3.replace("é", "e")
details3 = details3.replace("ê", "e")
details3 = details3.replace("ë", "e")
details3 = details3.replace("â", "a")
details3 = details3.replace("ô", "o")
details3 = details3.replace(""", "")
details3 = details3.replace("Î", "")
details3 = details3.replace("ç", "c")
details3 = details3.replace("M°", "Metro ")
details3 = details3.replace("Metro ", "")
details3 = details3.replace("Metro", "")
details3 = details3.replace("'", "'")
details3 = details3.replace("&", "et")
details3 = details3.replace("è", "e")
details3 = details3.replace("/", ",")
details3 = details3.replace(": ", "")
details3 = details3.replace("metro", "")
details3 = details3.replace("à", "a")
details3 = details3.replace("î", "i")
details3 = details3.replace("ï", "i")
details3 = details3.replace("Centre ville,", "")
details3 = details3.replace("ecole,", "")
details3 = details3.replace("commerces,", "")
details3 = details3.replace("bus,", "")
details3 = details3.replace("*", "")
else :
details3 = "NA"
proximite = details3
l12.append(proximite)
#-----------------------------------------------------------------#
#PRICE AND DETAILS OF ADDITIVE PRICE CHARGES :
s5 = soup1.findAll('div', {'class' : 'annonce__agence'})
for i in range(len(s5)) :
details4 = s5[i].findAll('span', {'class' : 'annonce__agence__prix annonce__nologo'})
details5 = s5[i].findAll('span', {'class' : 'annonce__agence__prix '})
if details4 != [] :
details4 = details4[0].text
details4 = details4.replace("\xa0", "")
details4 = details4.replace("\x80", "")
details4 = details4.split(" ")
else :
details4 = 0
if details5 != [] :
details5 = details5[0].text
details5 = details5.replace("\xa0", "")
details5 = details5.replace("\x80", "")
details5 = details5.split(" ")
else :
details5 = 0
if details4 == 0 :
detailsx = details5
elif details5 == 0 :
detailsx = details4
try :
l13.append(float(detailsx[0].replace(",", ".").replace("Â", "")))
except :
l13.append(str(detailsx[0]))
if "FAI" in detailsx[1] :
new = detailsx[1].replace("FAI", "")
try :
l14.append(float(new))
except :
l14.append(new)
elif "+" in detailsx[1] :
new = detailsx[1].replace("+", "")
l14.append(new)
else :
l14.append(detailsx[1].strip())
#-----------------------------------------------------------------#
#REAL ESTATE AGENCY NAMES :
s6 = soup1.findAll('div', {'class' : 'annonce__agence'})
for i in range(len(s6)) :
details6 = s6[i].findAll('span', {'class' : 'annone__detail__nom'})
if details6 != [] :
details6 = details6[0].text
else :
details6 = "NA"
l15.append(details6)
#-----------------------------------------------------------------#
#GET THE URL VALUE :
s7 = soup1.findAll('div', {'class' : 'annonce__detail'})
for i in range(len(s7)) :
url_cible = s7[i].findAll('a', {'class' : 'annone__detail__title annonce__link'})
url_cible = url_cible[0]['href']
url_cible = url_cible.split("?")[0]
l16.append(url_cible)
#-----------------------------------#
#DATE :
l17.append(d2)
#-----------------------------------------------------------------#
#WRITE DATA IN FILE :
if dico['CUISINE'] == [] :
pass
else :
try :
df = pd.DataFrame(dico)
df.to_csv(path_logout + 'seloger_%s.txt' %(d3), mode="a", header=False)
print compteur, df
print "\n"
except :
print "ValueError : ", ValueError
print "dico : ", dico
log_dico = open(path_log + "log_dico.txt", "a")
for i in dico :
print "len(dico[i]) : ", str(len(dico[i])), str(i), str(dico[i])
log_dico.write(str(len(dico[i])) + ";" + str(i) + ";" + str(dico[i]))
log_dico.close()
print "\n"
|
[
"mic100@hotmail.fr"
] |
mic100@hotmail.fr
|
b2ecd23d0488f2b3bea55b15c9d3b6bc34e739ab
|
98e7edf7a3f834580ba6a3c1058506e5a773d1bb
|
/fileHandling.py
|
b4d3e6ae4d6684d77027c28ed20cabc3eedba404
|
[] |
no_license
|
gauravhansda/InterviewQuestions
|
ef95363440d9eb0057ab0551f155793ea3e5e3b0
|
5134c59d3d543982a30465d7d4eaa1c8902185e2
|
refs/heads/master
| 2021-01-01T17:07:11.537838
| 2017-07-22T02:31:23
| 2017-07-22T02:31:23
| 98,001,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
class FILEHANDLING():
def __init__(self):
self.file_name = "file.txt"
# Remove blank lines from the file
def remBlanks(self):
with open(self.file_name, 'r') as inp:
lines = inp.readlines()
print lines
with open("out.txt", 'w') as out:
for line in lines:
if not line.isspace():
out.writelines(line)
if __name__ == '__main__':
fh = FILEHANDLING()
fh.remBlanks()
|
[
"GauravHansda@Gauravs-MacBook-Pro.local"
] |
GauravHansda@Gauravs-MacBook-Pro.local
|
267e66a7d5ee24cddd6245f2bb5aaf4c8f0bee87
|
4d051a2875532ee9825559261927218bbac3dbf4
|
/src/arm/joint/__init__.py
|
c5d36d433fbcca594e664ed8910fe826eec87b91
|
[] |
no_license
|
swol-kat/Arm_Code
|
4554b9f79460929515dc4e5c0dc7f0b7b23985da
|
389a8fe4875c6ab90e2ec79dedf445088b21c855
|
refs/heads/master
| 2023-04-11T11:06:38.759482
| 2021-04-23T00:28:15
| 2021-04-23T00:28:15
| 335,827,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
from .joint import Joint
from .virtual_joint import VirtualJoint
|
[
"arjungandhi06@gmail.com"
] |
arjungandhi06@gmail.com
|
c148d463b278c19a66dbc6bfe9f7997cdb5d3cb7
|
e517fcf60b982bb843ae846fa881102d504c368c
|
/poncho/postprocess/prepare_data.py
|
e6bf2e27d2a127b733c008f9ca472b820e8fb482
|
[
"MIT"
] |
permissive
|
waduhek/poncho
|
b79aa9087faf9809872eeb6a36a21de3e34bb5d8
|
47a8f2600d1afcfb8f9fa76ff0c68eb9f132f56c
|
refs/heads/master
| 2020-04-17T15:14:31.865951
| 2019-04-19T09:24:54
| 2019-04-19T09:24:54
| 166,690,766
| 4
| 1
|
MIT
| 2019-04-19T06:35:26
| 2019-01-20T17:45:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
import os
import time
from datetime import datetime
import pandas as pd
from poncho.utils.get_base_dir import get_base_dir
def main(unique_years):
(BASE_DIR) = get_base_dir()
# Log file
log = open(os.path.join(BASE_DIR, 'data', 'logs', 'train_{}.txt'.format(str(time.time()).split('.')[0])), mode='a')
log.write('Beginning to create training dataset. Time: {}\n\n'.format(str(datetime.now())))
print('Beginning to create training dataset. Time: {}\n'.format(str(datetime.now())))
for year in unique_years:
log.write('Converting data of {} into train dataset\n. Time: {}'.format(year, str(datetime.now())))
print('Converting data of {} into train dataset. Time: {}'.format(year, str(datetime.now())))
# Open required CSV file
df = pd.read_csv(os.path.join(BASE_DIR, 'data', 'prepared', 'prepared_{}.csv'.format(year))).dropna()
# Convert the comments and replies to a Pandas DataFrame object
comment = pd.DataFrame(df['comment'])
reply = pd.DataFrame(df['reply'])
# Write the comments and replies to separate files in the directory of 'nmt-chatbot'
comment.to_csv(
os.path.join(os.path.dirname(BASE_DIR), 'nmt-chatbot', 'new_data', 'train.from'),
mode='w',
index=False,
header=None
)
reply.to_csv(
os.path.join(os.path.dirname(BASE_DIR), 'nmt-chatbot', 'new_data', 'train.to'),
mode='w',
index=False,
header=None
)
log.write('Finishing up... Time: {}\n'.format(str(datetime.now())))
log.write('==========================================================================================\n\n')
print('Finishing up... Time: {}'.format(str(datetime.now())))
print('==========================================================================================')
log.close()
|
[
"ryannor56a@gmail.com"
] |
ryannor56a@gmail.com
|
9ec2452996d72632a51586c21518412a127f1081
|
95a9ee042c6d0aa9cc5ee44c6b9317a10e7b02ee
|
/day07/python/day07.py
|
4b491ffdeb3195bf4f11b03bf979dea285e470e7
|
[] |
no_license
|
ochronus/advent-of-code-2019
|
4e273a236a801cffc347c419d408ebd717366f7f
|
ba47c63913c6b85953d04eaad15f8fac56ed1c5b
|
refs/heads/master
| 2020-09-21T13:32:51.679255
| 2019-12-13T06:05:08
| 2019-12-13T06:05:08
| 224,802,962
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
from itertools import permutations
from queue import SimpleQueue
from computer import Computer, HaltException
def part1(program):
phase_settings = permutations(range(5))
highest_output_signal = 0
for phase_setting in phase_settings:
amplifiers = []
for i in range(5):
amplifiers.append(Computer(program))
amplifiers[i].give_signal(phase_setting[i])
signal = 0
for i in range(5):
amplifiers[i].give_signal(signal)
signal = amplifiers[i].run()
highest_output_signal = max(highest_output_signal, signal)
return(highest_output_signal)
def part2(program):
phase_settings = permutations(range(5, 10))
highest_output_signal = 0
for phase_setting in phase_settings:
amplifiers = []
for i in range(5):
amplifiers.append(Computer(program))
amplifiers[i].give_signal(phase_setting[i])
signal = 0
should_halt = False
while not should_halt:
for i in range(5):
amplifiers[i].give_signal(signal)
try:
signal = amplifiers[i].run()
except HaltException:
should_halt = True
highest_output_signal = max(highest_output_signal, signal)
return(highest_output_signal)
with open("../input.txt") as f:
acs = list([int(i) for i in f.read().split(",")])
print(part1(acs))
print(part2(acs))
|
[
"ochronus@ochronus.com"
] |
ochronus@ochronus.com
|
382ed93decb7215d582077e08f363fa17473c08f
|
dbb0a4d452ac0faf00411a09b7e32f13ffdb31e8
|
/tests/ssh_test.py
|
e2dda308c8bbe2ee15c26bb156ddb008f3404399
|
[] |
no_license
|
koverman47/EGEN_310
|
3ef66b7fb773b4e5fb833c250c87c7cf4fc84d49
|
f69e292baa48bca441dd0f7d9ba7789db417d42a
|
refs/heads/master
| 2020-04-18T00:39:47.999960
| 2019-04-24T20:14:44
| 2019-04-24T20:14:44
| 167,086,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
#!/usr/bin/env python3
import paramiko
import sys
import tty
import termios
# RPi Zero w SSH Credentials
host = "zeropythirty"
ip = "10.152.247.52"
user = "pi"
passw = "pi"
# Establish SSH tunnel
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=ip, username=user, password=passw)
except BadHostKeyException:
print("Host could not be found.")
sys.exit(0)
except AuthenticationException:
print("Could not authenticate host.")
sys.exit(0)
except SSHException:
print("Unknown SSH error.")
sys.exit(0)
# Give stdin to this script
tty.setcbreak(sys.stdin)
try:
while True:
result = None
# Read and pass key over SSH tunnel
key = ord(sys.stdin.read(1))
result = ssh.exec_command(key)
except KeyboardInterrupt:
pass
except SSHException:
if result:
print(result)
finally:
# Return stdin to ECHO
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
old[3] = old[3] | termios.ECHO
termios.tcsetattr(fd, termios.TCSADRAIN, old)
# Close SSH tunnel
ssh.close()
|
[
"knoverman@comcast.net"
] |
knoverman@comcast.net
|
f32e1d9a57e44c6badf99051fc94750d8a7f23e3
|
0c4d481936c5cde601e3b9d6c7e83b8e70ba9fae
|
/.history/train_search_param_20201009092152.py
|
12cf377b195daa81cd3800b44ababebd327c74c8
|
[] |
no_license
|
CGCL-codes/PDAS
|
7d98afaf60abe7ce3075e80125d98e51d92f42f0
|
578011f738f2418fe5be2dd38eb819566e7fc3bb
|
refs/heads/master
| 2023-05-08T03:26:56.450175
| 2021-05-27T02:50:38
| 2021-05-27T02:50:38
| 371,215,287
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,688
|
py
|
import os
import sys
import time
import glob
import math
import random
import logging
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as datasets
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
#from resnet_change2 import *
from resnet_new_change import *
from prune_params import ResNet20_Channel_Prune
from net_measure import measure_model, measure_param
from utils import AverageMeter, accuracy, count_parameters_in_MB, save_checkpoint
from architect import Architect
parser = argparse.ArgumentParser(description='Cifar10 Train Search')
parser.add_argument('--data', type=str, default='/home/cyh/workspace/cifar10',
help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.1,
help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001,
help='min learning rate(0.0)')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=int, default=50, help='report frequency')
#parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=120, help='num of training epochs')
parser.add_argument('--save', type=str, default='./checkpoint/',
help='folder to save checkpoints and logs')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5,
help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=6e-4,
help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3,
help='weight decay for arch encoding')
parser.add_argument('--change', action='store_true', default=False,
help='change prune ratio during searching')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
args = parser.parse_args()
log = open(os.path.join(args.save, 'channel-search-resnet20-0915.txt'),'w')
prune_index = ResNet20_Channel_Prune.index
prune_ratio = ResNet20_Channel_Prune.prune_ratio
#max2_ratio = torch.zeros(len(prune_index), 3)
min_ratio = torch.zeros(len(prune_index), 3)
min_ratio[:, 2] = -1
channel16 = list(range(2, 17, 2))
channel32 = list(range(2, 33, 2))
channel64 = list(range(2, 65, 2))
def main():
if not torch.cuda.is_available():
print('no gpu device available!!!')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.benchmark = True
cudnn.enabled = True
print_log('=> parameters: {}'.format(args), log)
best_acc = 0
best_epoch = 0
criterion = nn.CrossEntropyLoss().cuda()
count_ops, count_params, conv_list = measure_model(depth=20)
print('=> count_ops: {}, count_params: {}'.format(count_ops, count_params))
model = resnet(depth=20).cuda()
#model = torch.nn.DataParallel(model).cuda()
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate,
momentum=args.momentum, weight_decay=args.weight_decay)
'''arch_optimizer = torch.optim.Adam(model.module.arch_parameters(), lr=args.arch_learning_rate,
betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)'''
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),(0.2023, 0.1994, 0.2010))
])
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
train_data = datasets.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, criterion, count_params, args)
print_log('==> arch parameters: {}'.format(model.arch_parameters()), log)
print_log('==> arch parameters ratio: {}'.format(F.softmax(model.arch_params, dim=-1)), log)
for epoch in range(args.epochs):
#scheduler.step()
lr = scheduler.get_last_lr()[0]
print_log('=> epoch {}, lr {}'.format(epoch, lr), log)
if args.change and epoch >= 15 and epoch <= args.epochs-5:
arch_weights = F.softmax(model.arch_params, dim=-1)
_, index = arch_weights.topk(4, 1, True, True)
for j in range(len(prune_index)):
new_index = prune_ratio[j][index[j][3].item()]
old_index = min_ratio[j][1].item()
'''new_ratio = prune_ratio[j][index[j][4].item()]
old_ratio = min_ratio[j][1].item()'''
count = min_ratio[j][0].item()
if abs(new_index-old_index) < 1e-6:
if count >= 19:
max_ratio = prune_ratio[j][index[j][0].item()]
if j < 7:
a = random.randint(max(max_ratio-3, 0), min(max_ratio+3, len(channel16)-1))
elif j < 13:
a = random.randint(max(max_ratio-5, 0), min(max_ratio+5, len(channel32)-1))
else:
a = random.randint(max(max_ratio-11, 0), min(max_ratio+11, len(channel64)-1))
if abs(new_index - min_ratio[j][2].item()) < 1e-6:
prune_ratio[j][index[j][2].item()] = a
else:
prune_ratio[j][index[j][3].item()] = a
min_ratio[j][0] = 0
ratios = 1e-3 * torch.randn(1, 4)
with torch.no_grad():
for k in range(4):
model.module.arch_params[j][k] = ratios[0][k].item()
else:
min_ratio[j][0] += 1
else:
min_ratio[j][0] = 0
min_ratio[j][1] = new_ratio
train_acc, train_loss = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch, count_params, count_ops, conv_list)
scheduler.step()
print_log('=> train acc: {}'.format(train_acc), log)
print_log('=> min ratio: {}'.format(min_ratio), log)
print_log('=> arch parameters ratio: {}'.format(F.softmax(model.arch_params, dim=-1)),log)
print_log('=> prune ratio: {}'.format(prune_ratio), log)
if args.epochs - epoch <= 1:
valid_acc, valid_loss = infer(valid_queue, model, criterion)
print_log('valid_acc: {}'.format(valid_acc), log)
arch_weights = F.softmax(model.arch_params, dim=-1)
_, index = arch_weights.topk(1, 1, True, True)
max_cfg = []
#mix_cfg = []
for j in range(len(prune_index)):
if j < 7:
channel = channel16[prune_ratio[j][index[j][0].item()]]
max_cfg.append(channel)
elif j < 13:
channel = channel32[prune_ratio[j][index[j][0].item()]]
max_cfg.append(channel)
else:
channel = channel64[prune_ratio[j][index[j][0].item()]]
max_cfg.append(channel)
'''channel = max(int(round(cfg[j] * (1 - prune_ratio[j][index[j][0].item()]) / 2) * 2), 2)
max_cfg.append(channel)
mix_prune_ratio = 0
for k in range(5):
mix_prune_ratio += prune_ratio[j][k] * arch_weights[j][k].item()
#mix_channel += max(int(round(cfg[j] * (1 - prune_ratio[j][k]) * arch_weights[j][k].item() / 2) * 2), 2)
mix_channel = max(int(round(cfg[j] * (1 - mix_prune_ratio) / 2) * 2), 2)
mix_cfg.append(mix_channel)'''
print_log('==> max cfg: {}'.format(max_cfg), log)
#print_log('==> mix cfg: {}'.format(mix_cfg), log)
print_log("==> arch parameters: {}".format(model.arch_parameters()), log)
#print_log('==> best acc: {}, best epoch: {}'.format(best_acc, best_epoch), log)
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch, count_params, count_ops, conv_list):
losses = AverageMeter()
basic_losses = AverageMeter()
param_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
for index, (inputs, targets) in enumerate(train_queue):
inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)
try:
input_search, target_search = next(valid_queue_iter)
except:
valid_queue_iter = iter(valid_queue)
input_search, target_search = next(valid_queue_iter)
#input_search, target_search = next(iter(valid_queue))
input_search, target_search = input_search.cuda(), target_search.cuda(non_blocking=True)
if epoch >= 15:
architect.step(inputs, targets, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
optimizer.zero_grad()
logits = model(inputs)
loss = criterion(logits, targets)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
#basic_losses.update(basic_loss.item(), inputs.size(0))
#param_losses.update(param_loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
else:
arch_optimizer.zero_grad()
output_search = model(input_search)
arch_loss = criterion(output_search, target_search)
arch_loss.backward()
arch_optimizer.step()
optimizer.zero_grad()
logits = model(inputs)
basic_loss = criterion(logits, targets)
total_params = count_model_params(model)
if total_params > (1 + 0.05) * (0.5 * count_params):
param_loss = 2 * math.log(total_params / (0.5 * count_params))
elif total_params < (1 - 0.05) * (0.5 * count_params):
param_loss = -2 * math.log(total_params / (0.5 * count_params))
else:
param_loss = 0
#param_loss = 0.11 * (math.log(total_params) ** 0.9)
#param_loss = 0.086 * (math.log(total_params))
#flops = count_model_flops(model, count_ops, conv_list)
#print('=> flops: {}'.format(flops))
#flop_loss = 0.083*(math.log(flops)**0.9)
#flop_loss = 0.084 * (math.log(flops) ** 0.9)
#flop_loss = 0.06 * math.log(flops)
#print('=> flop loss: {}'.format(flop_loss))
#loss = basic_loss * param_loss
loss = basic_loss + param_loss
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
#basic_losses.update(basic_loss.item(), inputs.size(0))
#param_losses.update(param_loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if index % args.report_freq == 0:
print_log('=> time: {}, train index: {}, loss: {}, top1: {}, top5: {}'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), index, losses.avg, top1.avg, top5.avg), log)
return top1.avg, losses.avg
def infer(valid_queue, model, criterion):
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
for index, (inputs, targets) in enumerate(valid_queue):
inputs, targets = inputs.cuda(), targets.cuda()
logits = model(inputs)
loss = criterion(logits, targets)
prec1, prec5 = accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if index % args.report_freq == 0:
print_log('=> time: {}, valid index: {}, loss: {}, top1: {}, top5: {}'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), index, losses.avg, top1.avg, top5.avg), log)
return top1.avg, losses.avg
def count_model_params(model):
arch_weights = F.softmax(model.module.arch_params, dim=-1)
_, index = arch_weights.topk(1, 1, True, True)
cfg = []
for k, m in enumerate(model.module.modules()):
if k in prune_index:
index_p = prune_index.index(k)
if index_p < 7:
channel = channel16[prune_ratio[index_p][index[index_p][0].item()]]
cfg.append(channel)
elif index_p < 13:
channel = channel32[prune_ratio[index_p][index[index_p][0].item()]]
cfg.append(channel)
else:
channel = channel64[prune_ratio[index_p][index[index_p][0].item()]]
cfg.append(channel)
'''pr = prune_ratio[index_p][index[index_p][0].item()]
oC = max(int(round((m.weight.data.shape[0] * (1 - pr)) / 2) * 2), 2)
cfg.append(oC)'''
total = measure_param(depth=20, cfg=cfg)
return total
'''total = sum(p.numel() for p in model.parameters())
arch_weights = F.softmax(model.module.arch_params, dim=-1)
_, index = arch_weights.topk(1, 1, True, True)
for k, m in enumerate(model.module.modules()):
if k in prune_index:
index_p = prune_index.index(k)
if index_p == 0 :
pr = prune_ratio[index_p][index[index_p][0].item()]
oC = m.weight.data.shape[0] - int(round((m.weight.data.shape[0] * (1 - pr)) / 2) * 2)
total -= oC * m.weight.data.shape[1] * m.weight.data.shape[2] * m.weight.data.shape[3]
#total -= int(m.weight.data.numel() * (1 - prune_ratio[index_p][index[index_p][0].item()]))
else:
pr0 = prune_ratio[index_p-1][index[index_p-1][0].item()]
pr1 = prune_ratio[index_p][index[index_p][0].item()]
iC = m.weight.data.shape[1] - int(round((m.weight.data.shape[1] * (1 - pr0)) / 2) * 2)
oC = m.weight.data.shape[0] - int(round((m.weight.data.shape[0] * (1 - pr1)) / 2) * 2)
total -= oC * iC * m.weight.data.shape[2] * m.weight.data.shape[3]
#total -= int(m.weight.data.numel() * (1 - prune_ratio[index_p][index[index_p][0].item()]))
return total'''
def count_model_flops(model, total_flops, conv_list):
arch_weights = F.softmax(model.module.arch_params, dim=-1)
_, index = arch_weights.topk(1, 1, True, True)
total = total_flops
#print(total)
#print('=> prune index: {}'.format(prune_index))
for k, m in enumerate(model.module.modules()):
if k in prune_index:
if k == 1:
pr = prune_ratio[0][index[0][0].item()]
total -= int(conv_list[0] // 2 * pr)
#print('=> total: {}'.format(total))
elif k == 6:
pr0 = 1 - prune_ratio[0][index[0][0].item()]
pr1 = 1 - prune_ratio[1][index[1][0].item()]
total -= int(conv_list[1] // 2 * (1 - pr0 * pr1))
#print('=> total: {}'.format(total))
else:
index_p = prune_index.index(k)
pr = prune_ratio[index_p][index[index_p][0].item()]
total -= int(conv_list[2*index_p-1] // 2 * pr)
#print('=> total: {}'.format(total))
elif k-3 in prune_index and k-3 != 1:
index_p = prune_index.index(k-3)
pr = prune_ratio[index_p][index[index_p][0].item()]
total -= int(conv_list[2*index_p] // 2 * pr)
#print('=> total: {}'.format(total))
return total
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
if __name__ == '__main__':
main()
|
[
"1300302496@qq.com"
] |
1300302496@qq.com
|
0605bbf65a547de08887585ef7e0fee02b015ac9
|
0ceabf1234cc921e1cd81c4290168b2f770d1aa1
|
/home/migrations/0004_auto_20200521_1808.py
|
688eb628541e321563fd3ab8a77d25d3bc2a9478
|
[] |
no_license
|
deekhari00716/Doctor-s-webstite
|
f4cdf915fbda676a17efd8db94712dbe0456d0b4
|
78f5e671277f041d21f362f267d5b9c893caf50c
|
refs/heads/master
| 2022-08-18T16:31:28.691028
| 2020-05-23T13:02:47
| 2020-05-23T13:02:47
| 266,334,136
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# Generated by Django 3.0.3 on 2020-05-21 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_project_date'),
]
operations = [
migrations.AlterField(
model_name='project',
name='image',
field=models.ImageField(height_field=330, upload_to='home/images', width_field=185),
),
]
|
[
"deekhsri00716@gmail.com"
] |
deekhsri00716@gmail.com
|
5a8680c24a91be453228f2818013da9f4dc66067
|
05dc7b4b44200cc022b09f773dd868049ecfb3e1
|
/rss/admin.py
|
c0a8c6aaa5087612094e649d116e9e1783bec26f
|
[] |
no_license
|
micah66/rssFeed
|
e7cea9a3f68713585990aaa039ff80c289d01e75
|
0b17f3d13333bf7e6eae182fdeb6bc565ef8977a
|
refs/heads/master
| 2020-03-21T12:44:29.659306
| 2018-06-28T08:52:45
| 2018-06-28T08:52:45
| 138,569,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
from django.contrib import admin
from .models import Headlines
# Register your models here.
admin.site.register(Headlines)
|
[
"micahgordon66@gmail.com"
] |
micahgordon66@gmail.com
|
6edc845cb4ebc87b7e5081731b569ca3d83813bd
|
f876ed037442b60e964bb53e4a0cc7e14818a746
|
/rnn.py
|
54a32e964cce09e970934f341f7459084c3d9bde
|
[] |
no_license
|
shksa/cs231n-Assignment3
|
b08913353372575ff8b04552387008b91e6a0a06
|
528581fdd47471390fcd95a79b626a0a738c9b58
|
refs/heads/master
| 2021-01-20T12:42:09.528428
| 2017-05-05T16:44:33
| 2017-05-05T16:44:33
| 90,397,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,020
|
py
|
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.items()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.items():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
if self.cell_type == 'rnn':
############################# Forward Pass #################################
# (1) Image features projection onto RNN by affine transformation with W_proj weights array.
IFP_to_RNN, cache_for_feature_projection = affine_forward(features, W_proj, b_proj)
# (2) Convert word indices to word vectors of the captions_in matrix
Embedded_captions_in, cache_for_word_embedding = word_embedding_forward(captions_in, W_embed)
# (3) Run RNN for the "T" length of sequence over the minibatch N.
h0 = IFP_to_RNN
HdSV_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_1 = rnn_forward(Embedded_captions_in, h0, Wx, Wh, b)
# (4) Compute scores over the words in vocabulary for all time stpes over the mini-batch N.
Scores_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_2 = temporal_affine_forward(HdSV_ForAllTimeSteps_OverMiniBatch, W_vocab, b_vocab)
# (5) Compute loss using ground-truth captions_out matrix.
loss, d_Loss__d_Scores_ForAllTimeSteps_OverMiniBatch = temporal_softmax_loss(Scores_ForAllTimeSteps_OverMiniBatch, captions_out, mask)
############################# Backward Pass ################################
# (4) Backprop into temporal_affine_forward function.
d_HdSV_ForAllTimeSteps_OverMiniBatch, d_W_vocab, d_b_vocab = temporal_affine_backward(d_Loss__d_Scores_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_2)
# (3) Backprop into rnn_forward function.
d_Embedded_captions_in, d_h0, d_Wx, d_Wh, d_b = rnn_backward(d_HdSV_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_1)
# (2) Backprop into word_embedding_forward function.
d_W_embed = word_embedding_backward(d_Embedded_captions_in, cache_for_word_embedding)
# (1) Backprop into Image features - affine tranformation function
d_IFP_to_RNN = d_h0
d_features, d_W_proj, d_b_proj = affine_backward(d_IFP_to_RNN, cache_for_feature_projection)
elif self.cell_type == 'lstm':
# (1) Image features projection onto RNN by affine transformation with W_proj weights array.
IFP_to_RNN, cache_for_feature_projection = affine_forward(features, W_proj, b_proj)
# (2) Convert word indices to word vectors of the captions_in matrix
Embedded_captions_in, cache_for_word_embedding = word_embedding_forward(captions_in, W_embed)
# (3) Run RNN for the "T" length of sequence over the minibatch N.
h0 = IFP_to_RNN
HdSV_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_1 = lstm_forward(Embedded_captions_in, h0, Wx, Wh, b)
# (4) Compute scores over the words in vocabulary for all time stpes over the mini-batch N.
Scores_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_2 = temporal_affine_forward(HdSV_ForAllTimeSteps_OverMiniBatch, W_vocab, b_vocab)
# (5) Compute loss using ground-truth captions_out matrix.
loss, d_Loss__d_Scores_ForAllTimeSteps_OverMiniBatch = temporal_softmax_loss(Scores_ForAllTimeSteps_OverMiniBatch, captions_out, mask)
############################# Backward Pass ################################
# (4) Backprop into temporal_affine_forward function.
d_HdSV_ForAllTimeSteps_OverMiniBatch, d_W_vocab, d_b_vocab = temporal_affine_backward(d_Loss__d_Scores_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_2)
# (3) Backprop into rnn_forward function.
d_Embedded_captions_in, d_h0, d_Wx, d_Wh, d_b = lstm_backward(d_HdSV_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_1)
# (2) Backprop into word_embedding_forward function.
d_W_embed = word_embedding_backward(d_Embedded_captions_in, cache_for_word_embedding)
# (1) Backprop into Image features - affine tranformation function
d_IFP_to_RNN = d_h0
d_features, d_W_proj, d_b_proj = affine_backward(d_IFP_to_RNN, cache_for_feature_projection)
grads.update({
'W_proj': d_W_proj,
'b_proj': d_b_proj,
'W_embed': d_W_embed,
'Wx': d_Wx,
'Wh': d_Wh,
'b': d_b,
'W_vocab': d_W_vocab,
'b_vocab': d_b_vocab
})
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
###########################################################################
# Initial hidden state for the RNN
h0, cache = affine_forward(features, W_proj, b_proj)
# First word to the RNN should be the special <START> token
V, wordVec_dim = W_embed.shape
Start_Vector_Batch = np.zeros((N, wordVec_dim))
Start_Vector = W_embed[self._start]
Start_Vector_Batch[np.arange(N)] = Start_Vector
Hidden_States = {}
Hidden_States[0] = h0
H = h0.shape[1]
c0 = np.zeros((N, H))
Cell_States = {}
Cell_States[0] = c0
Scores = {}
if self.cell_type == 'rnn':
for t in range(1, max_length+1):
if t == 1:
x = Start_Vector_Batch
else:
word_indices = np.argmax(Scores[t-1], axis=1)
captions[range(N), t-1] = word_indices
x = W_embed[word_indices]
prev_h = Hidden_States[t-1]
Hidden_States[t], cache = rnn_step_forward(x, prev_h, Wx, Wh, b)
Scores[t], cache = affine_forward(Hidden_States[t], W_vocab, b_vocab)
elif self.cell_type == 'lstm':
for t in range(1, max_length+1):
if t == 1:
x = Start_Vector_Batch
else:
word_indices = np.argmax(Scores[t-1], axis=1)
captions[range(N), t-1] = word_indices
x = W_embed[word_indices]
prev_h = Hidden_States[t-1]
prev_c = Cell_States[t-1]
Hidden_States[t], Cell_States[t], cache = lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)
Scores[t], cache = affine_forward(Hidden_States[t], W_vocab, b_vocab)
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
|
[
"noreply@github.com"
] |
shksa.noreply@github.com
|
1fe280eafbf7f4ca37046d98d4cf1d1ae08472ed
|
18059513f87f1adc5cae34ac74bd89835c4f6816
|
/Deck_of_cards.py
|
8bdec654c96079de29cafccbb7c1c2ada8122edc
|
[] |
no_license
|
PrzemyslawMisiura/Deck_of_cards
|
fcdc6d47ba10d352d7e29005d4b6ef23734870bf
|
ed0f941ec4456dc2435c87f1f8a111db43549861
|
refs/heads/master
| 2020-07-17T23:32:25.635935
| 2019-09-04T10:35:00
| 2019-09-04T10:35:00
| 206,124,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,953
|
py
|
# Specifications
# Card:
# Each instance of Card should have a suit ("Hearts", "Diamonds", "Clubs", or "Spades").
# Each instance of Card should have a value ("A", "2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K").
# Card's __repr__ method should return the card's value and suit (e.g. "A
# of Clubs", "J of Diamonds", etc.)
class Card:
def __init__(self, value, suit):
self.value = value
self.suit = suit
def __repr__(self):
return "{} of {}".format(self.value, self.suit)
# Deck:
# Each instance of Deck should have a cards attribute with all 52 possible
# instances of Card.
# Deck should have an instance method called count which returns a count
# of how many cards remain in the deck.
# Deck's __repr__ method should return information on how many cards are
# in the deck (e.g. "Deck of 52 cards", "Deck of 12 cards", etc.)
# Deck should have an instance method called _deal which accepts a number
# and removes at most that many cards from the deck (it may need to remove
# fewer if you request more cards than are currently in the deck!). If
# there are no cards left, this method should raise a ValueError with the
# message "All cards have been dealt".
# Deck should have an instance method called shuffle which will shuffle a
# full deck of cards. If there are cards missing from the deck, this
# method should raise a ValueError with the message "Only full decks can
# be shuffled". Shuffle should return the shuffled deck.
# Deck should have an instance method called deal_card which uses the
# _deal method to deal a single card from the deck and return that single
# card.
# Deck should have an instance method called deal_hand which accepts a
# number and uses the _deal method to deal a list of cards from the deck
# and return that list of cards.
from random import shuffle
class Deck:
def __init__(self):
suits = ["Hearts", "Diamonds", "Clubs", "Spades"]
values = [
"A",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"J",
"Q",
"K"]
self.cards = [Card(v, s) for v in values for s in suits]
print(self.cards)
def count(self):
return len(self.cards)
def __repr__(self):
return "Deck of {} cards".format(self.count())
def _deal(self, num):
count = self.count()
actual = min([count, num])
if count == 0:
raise ValueError("All cards have been dealt")
cards = self.cards[-actual:]
self.cards = self.cards[:-actual]
return cards
def deal_card(self):
return self._deal(1)[0]
def deal_hand(self,n):
return self._deal(n)
def shuffle(self):
if self.count() < 52:
raise ValueError("Only full decks can be shuffled")
print(shuffle(self.cards))
|
[
"przem.misiura@gmail.com"
] |
przem.misiura@gmail.com
|
f6b693f1370e3d80c736a6b08d507d671d4a8bc5
|
008c065391d766fec2f2af252dd8a5e9bf5cb815
|
/Even Matrix.py
|
7e545a6a78adeb1c5ec75a406ef4644cbe57e481
|
[] |
no_license
|
22Rahul22/Codechef
|
b261ab43ff5ff64648a75ad1195e33cac2cfec52
|
1f645c779a250a71d75598e1eabad7e52dd6b031
|
refs/heads/master
| 2022-11-29T21:51:09.578798
| 2020-08-19T06:20:23
| 2020-08-19T06:20:23
| 288,650,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
t = int(input())
for _ in range(t):
n = int(input())
arr = [[0 for i in range(n)] for j in range(n)]
sr = 0
er = n
sc = 0
ec = n
z = 0
num = 1
if n % 2 == 0:
x = n // 2
else:
x = 1 + (n // 2)
while z != x:
j = sc
while j < ec:
arr[sr][j] = num
num += 1
j += 1
sr += 1
i = sr
while i < er:
arr[i][ec - 1] = num
num += 1
i += 1
ec -= 1
j = ec - 1
while j >= sc:
arr[er - 1][j] = num
num += 1
j -= 1
er -= 1
i = er - 1
while i >= sr:
arr[i][sc] = num
num += 1
i -= 1
sc += 1
z += 1
for i in range(n):
for j in range(n):
print(arr[i][j], end=" ")
print()
|
[
"rahulbhl22@gmail.com"
] |
rahulbhl22@gmail.com
|
a26d87e83ed554ff4bfb8c5fe46b37fc647dc7a7
|
24cf672d6f5d8f43b42a847d0537e2feb38729c9
|
/SSModel/InternalSSClassifiers/BERTModel.py
|
cf423b5df71bfe2f145b1cfe985d4fe5c78849ff
|
[] |
no_license
|
chris414862/LiSSA
|
0ffe7670a432d6ee657c73b13dc9c63f8a32aa02
|
63bb3bfeed462453cda97d88f3f8b30d113d252d
|
refs/heads/main
| 2023-09-05T03:16:22.935872
| 2021-11-10T17:45:27
| 2021-11-10T17:45:27
| 327,984,243
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,413
|
py
|
import torch
import torch.nn as nn
from transformers import BertModel, BertTokenizer
from SSModel.InternalSSVectorizers.BERTVectorizer import BERTVectorizer
from SSModel.ModelInterface import Model
from SSModel.VectroizerInterface import Vectorizer
from transformers import AdamW
import pandas as pd
from utils.AnnUtils import get_df_from_csv
from tqdm import tqdm
import re
from SSModel.InternalSSVectorizers.BoWVectorizer import BoWVectorizer
class BERTModel(Model):
def __init__(self, model_class = None, tokenizer_class=None, pretrained_weights=None, num_man_feats=None
, trainable_bert_layers:tuple=None):
self.internal_model = self.BERTInternal( model_class, pretrained_weights, 768, 3, num_man_feats)
self.vectorizer = BERTVectorizer(tokenizer_class, pretrained_weights)
self.class_labels = None
self.model_type = 'BERT'
def my_filter(x):
mo = re.search(r"encoder\.layer\.(\d+)\.", x[0])
if mo is None:
return True
try:
layer_number = int(mo.group(1))
except ValueError as e:
print("Namespace conflict:", x[0], "\n'encoder.layer' should be reserved for bert layer.")
raise e
if trainable_bert_layers[0] <= layer_number+1 <= trainable_bert_layers[1]:
return True
else:
return False
if trainable_bert_layers is not None:
training_params = [p for p in filter(my_filter, self.internal_model.named_parameters())]
else:
training_params = self.internal_model.named_parameters()
self.optimizer = AdamW(training_params)
self.loss_function = nn.CrossEntropyLoss()
def get_class_labels(self) -> list:
return self.class_labels
def get_model_type(self) -> str:
return self.model_type
def get_internal_model(self):
return self.internal_model
def get_weights(self):
return self.internal_model.get_weights()
def train(self, X:pd.Series, y:pd.Series, batch_size=2, epochs =1, man_feats=None):
model = self.get_internal_model()
model.train()
self.class_labels:list = y.unique().tolist()
num_entries = y.shape[0]
for epoch in range(epochs):
X = X.sample(frac=1.0)
y = y[X.index]
y = self.vectorizer.transform_labels(y,labels= self.class_labels)
with tqdm(total=num_entries) as epoch_pbar:
epoch_pbar.set_description(f'Epoch {epoch}')
accum_loss = 0
for idx, i in enumerate(range(0,len(X), batch_size)):
batch_X, batch_y = X[i:i+batch_size], y[i:i+batch_size]
batch_man_feats = man_feats[i:i+batch_size]
batch_X = self.vectorizer.transform_methods(batch_X)
self.optimizer.zero_grad()
predictions : torch.Tensor = model.forward(batch_X, batch_man_feats)
loss = self.loss_function(predictions, batch_y)
loss.backward()
self.optimizer.step()
# Add loss to accumulated loss
accum_loss += loss
# Update progress bar description
avg_loss = accum_loss / (idx + 1)
desc = f'Epoch {epoch} - avg_loss {avg_loss:.4f} - curr_loss {loss:.4f}'
epoch_pbar.set_description(desc)
epoch_pbar.update(batch_size)
def get_vectorizer(self) -> Vectorizer:
raise NotImplementedError()
def predict(self, X):
self.vectorizer.transform_methods(X)
class BERTInternal(nn.Module):
def __init__(self, model_class, pretrained_weights, embed_dimensions, num_classes, num_man_feats):
super(BERTModel.BERTInternal, self).__init__()
self.L1 = model_class.from_pretrained(pretrained_weights)
self.L2 = self.CustomAttentionLayer(embed_dimensions, num_classes)
self.final = nn.Linear(embed_dimensions+num_man_feats, num_classes, bias=False)
self.final_bias = nn.Linear(num_classes, 1, bias=False)
self.softmax = nn.Softmax(dim=1)
def forward(self, encoded_input, man_feats:pd.DataFrame):
input_ids= torch.tensor(encoded_input['input_ids'], dtype=torch.long)
token_type_ids = torch.tensor(encoded_input['token_type_ids'], dtype=torch.long)
attention_mask = torch.tensor(encoded_input['attention_mask'], dtype=torch.long)
# Size of model output ==> (batch_size, seq_len, embed_dimensions)
model_output, _ = self.L1(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
# S1 size ==> (batch_size, num_classes, embed_dimensions)
# Each vector in the class dimension represents the document's feature with respect to that class
S1, word_attention_weights = self.L2.forward(model_output)
# FINAL LAYER DIMENSION TRACKING
# output = softmax(sum(haddarmad(S1, W_c), dim=-1)+b_c) ==>
# X = haddarmad(S1,W_c):
# (batch_size, num_classes, embed_dims + num_manual_features) \hadamard (batch_size, num_classes, embed_dims + num_manual_features)
# ==> (batch_size, num_classes, embed_dims + num_manual_features)
# X = Sum(X, dim=-1):
# \sigma (batch_size, num_classes, embed_dims + num_manual_features) ==> (batch_size, num_classes)
# X = X + b_c:
# (batch_size, num_classes) + (1, num_classes) ==> (batch_size, num_classes)
# softmax(X):
# \softmax (batch_size, num_classes) ==> (batch_size, num_classes)
man_feats_tens = torch.tensor(man_feats.to_numpy(dtype=int), dtype=torch.float32).unsqueeze(dim=1)
# Manual features are repeated for every class
man_feats_tens = man_feats_tens.repeat(1,S1.size()[1],1)
inter = torch.cat((S1,man_feats_tens), dim=-1)
# Using the Hadamard product and summation ensures there's no interaction between the document's
# different class representations. This makes analysis more straight forward
output = self.softmax(torch.sum(torch.mul(inter, self.final.weight), 2, keepdim=False)+self.final_bias.weight)
return output
def get_weights(self):
return None
class CustomAttentionLayer(nn.Module):
def __init__(self, dimensions, num_classes):
super(BERTModel.BERTInternal.CustomAttentionLayer, self).__init__()
self.linear_in = nn.Linear(dimensions, dimensions)
self.tanh = nn.Tanh()
self.queries = nn.Linear(dimensions, num_classes)
self.softmax = nn.Softmax(dim=2)
def forward(self, X:torch.Tensor):
# X.size() == (batch_size, seq_length, embed_dimensions)
# U = tanh(X*W_w) ==> (batch_size, seq_length, embed_dimensions)*(embed_dimensions, embed_dimensions) -->
# (batch_size, seq_length, embed_dimensions)
U = self.tanh(self.linear_in(X))
# A = softmax(X*Q +b_q) ==> (batch_size, seq_length, embed_dimensions)*(embed_dimensions, num_classes/queries) -->
# (batch_size, seq_length, num_classes/queries)
attention_weights = self.softmax(self.queries(U))
# S = A^T*X +b_a (batch_size, num_classes/queries, seq_length)*(batch_size, seq_length, embed_dimensions) -->
# (batch_size, num_classes/queries, embed_dimension)
S = torch.bmm(attention_weights.transpose(1, 2), X)
return S, attention_weights
if __name__ == "__main__":
model_class, tokenizer_class, pretrained_weights = BertModel, BertTokenizer, 'bert-base-cased'
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
# model = model_class.from_pretrained(pretrained_weights)
class_descrip_file = '../../Inputs/class_descriptions_android.csv'
package_descrip_file = '../../Inputs/package_descriptions_android.csv'
ignore_if_next_contains = [r'^javax?\..*', r'^com\..*', r'^dalvic\..*', r'^junit\..*', r'^j\..*', r'^junit\..*']
package_descrip_cols = ['QualPackageName', 'NumMethods', 'Description']
class_descrip_cols = ['QualClassName', 'NumMethods', 'Description']
cols_4_class_sig = (0, 2)
cols_4_package_sig = (0, 1)
create_cache = False
cache_name = 'bert_debug_cache.pickle'
if create_cache:
df = pd.read_pickle('../../Inputs/Caches/cache2.pickle')
class_descrips = get_df_from_csv(class_descrip_file, aggregate_cols=cols_4_class_sig, col_names=class_descrip_cols
, ignore_if_next_contains=ignore_if_next_contains, index_col=class_descrip_cols[0])
package_descrips = get_df_from_csv(package_descrip_file, aggregate_cols=cols_4_package_sig,
col_names=package_descrip_cols
, ignore_if_next_contains=ignore_if_next_contains,
index_col=package_descrip_cols[0], add_period=True)
cols_to_embed = ['Description', "ClassDescription", "PackageDescription"]
df["PackageDescription"] = ''
df['ClassDescription'] = ''
df_qualified_classname = df['QualifiedPackage'].str.cat( df['Classname'].copy(), sep='.')
# print(df_qualified_classname)
for package in package_descrips.index.tolist():
df.loc[df['QualifiedPackage']== package, 'PackageDescription'] = package_descrips.loc[package, 'Description']
for classname in class_descrips.index.tolist():
df.loc[df_qualified_classname== classname, 'ClassDescription'] = class_descrips.loc[classname, 'Description']
def concat_str_cols(X:pd.DataFrame, columns:list=None):
combined_data = pd.Series(index=X.index, dtype='object')
combined_data = combined_data.fillna('')
for col in columns:
combined_data= combined_data.str.cat(X[col].copy().fillna(''), sep=' ')
return combined_data
s = concat_str_cols(df, cols_to_embed)
df2 = pd.DataFrame(index=s.index)
df2['X'] = s.copy()
df2['y'] = df['Source/Sink'].copy()
bow = BoWVectorizer()
mf_cols = bow.find_man_feat_cols(df)
df2[mf_cols] = df[mf_cols].copy()
df2.to_pickle(cache_name)
df = df2
else:
print('reading cache')
df = pd.read_pickle(cache_name)
bow = BoWVectorizer()
mf_cols = bow.find_man_feat_cols(df)
bm = BERTModel(model_class, tokenizer_class, pretrained_weights, len(mf_cols), trainable_bert_layers=(7,12))
bow = BoWVectorizer()
mf_cols = bow.find_man_feat_cols(df)
bm.train(df['X'],df['y'], man_feats = df[mf_cols])
# for little_s, enc in zip(s[:10],t['input_ids']):
# print(re.sub(r"\n", '',little_s))
# print(enc)
# print(len([e for e in enc if e != 0]))
# text = df['Description'].to_list()
# print(text[0])
# encs = tokenizer.batch_encode_plus(text[:2],add_special_tokens=True, max_length=512, pad_to_max_length=True, return_token_type_ids=True)
# doc_lens = []
# input_ids = torch.tensor(encs['input_ids'] , dtype=torch.long)
# print(input_ids.size())
# token_type_ids = torch.tensor(encs['token_type_ids'], dtype=torch.long)
# attention_mask = torch.tensor(encs['attention_mask'], dtype=torch.long)
# # model = model_class.from_pretrained(pretrained_weights)
# # last_hidden_state, pooler_output = model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
# # print(last_hidden_state.size())
# custom_bert = BERTModel(pretrained_weights, 768, 512, 3)
# custom_bert.forward(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
|
[
"chris414862@gmail.com"
] |
chris414862@gmail.com
|
dec87b22a15123ba554802e7dab90b8df69397a1
|
1a375cf927f2ffd33ef9087637d04ce6c83de566
|
/demo/backtest_f/main.py
|
bbb8fbc0b513361127ffd2d557cb3e313cb85193
|
[
"MIT"
] |
permissive
|
lgh0504/snake
|
af2bd25642f57a35442cfd41161d489db12019b9
|
0fd9929995327a1c23486c0dbc5421e18791eb88
|
refs/heads/master
| 2023-03-18T18:13:53.237850
| 2017-12-18T01:48:43
| 2017-12-18T01:48:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
# coding: utf-8
from datetime import datetime
import pandas_datareader as pdr
from f.portfolios import MarketOnClosePortfolio
from f.strategy.ma_cross import MovingAverageCrossStrategy
# from f.strategy.random_forecast import RandomForecastingStrategy
def run_backtest(symbol, date_range=(datetime(2016, 8, 29), datetime.now())):
# get data from yahoo
bars = pdr.get_data_yahoo(symbol, start=date_range[0], end=date_range[1])
print 'stock bars: ', bars.head(10)
# create strategy class and get signals
strategy_inst = MovingAverageCrossStrategy(symbol, bars)
signals = strategy_inst.generate_signals()
print 'signals', signals.head()
# create a portfolio
portfolio_inst = MarketOnClosePortfolio(
symbol, bars, signals, initial_capital=100000.0, shares_per_position=1000
)
returns = portfolio_inst.backtest_portfolio()
print 'head returns:', returns.head(10)
print 'tail returns:', returns.tail(10)
return returns
if __name__ == '__main__':
run_backtest(
# symbol='000333.SZ',
# symbol='000034.SZ',
symbol='600016.SH',
)
|
[
"lbj.world@gmail.com"
] |
lbj.world@gmail.com
|
4e6b2f5305a95ee8bd724518db163a25821cd145
|
6e637218f6bb9c9e9ede685665c47c655ee73c35
|
/addon/addon_dependencies/mod/46461-46487M/space_userpref.py
|
269ef41ed1fbed76e8c66c1294cc764d6828a137
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
littleneo/Blender-addon-dependencies
|
e5ffeff27e8f26b7f184c59fa70690f60f952167
|
cafd484ec42c3c5b603de7e04442a201f48375ea
|
refs/heads/master
| 2020-05-27T17:41:38.494409
| 2012-08-20T16:05:05
| 2012-08-20T16:05:05
| 2,091,693
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,958
|
py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
lnmod = ('46461-46487M',(0, 72))
import bpy
from bpy.types import Header, Menu, Panel
import os
import addon_utils
def ui_items_general(col, context):
""" General UI Theme Settings (User Interface)
"""
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(context, "outline")
colsub.row().prop(context, "item", slider=True)
colsub.row().prop(context, "inner", slider=True)
colsub.row().prop(context, "inner_sel", slider=True)
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(context, "text")
colsub.row().prop(context, "text_sel")
colsub.prop(context, "show_shaded")
subsub = colsub.column(align=True)
subsub.active = context.show_shaded
subsub.prop(context, "shadetop")
subsub.prop(context, "shadedown")
col.separator()
def opengl_lamp_buttons(column, lamp):
split = column.split(percentage=0.1)
split.prop(lamp, "use", text="", icon='OUTLINER_OB_LAMP' if lamp.use else 'LAMP_DATA')
col = split.column()
col.active = lamp.use
row = col.row()
row.label(text="Diffuse:")
row.prop(lamp, "diffuse_color", text="")
row = col.row()
row.label(text="Specular:")
row.prop(lamp, "specular_color", text="")
col = split.column()
col.active = lamp.use
col.prop(lamp, "direction", text="")
class USERPREF_HT_header(Header):
bl_space_type = 'USER_PREFERENCES'
def draw(self, context):
layout = self.layout
layout.template_header(menus=False)
userpref = context.user_preferences
layout.operator_context = 'EXEC_AREA'
layout.operator("wm.save_homefile", text="Save As Default")
layout.operator_context = 'INVOKE_DEFAULT'
if userpref.active_section == 'INPUT':
layout.operator("wm.keyconfig_import")
layout.operator("wm.keyconfig_export")
elif userpref.active_section == 'ADDONS':
layout.operator("wm.addon_install")
layout.menu("USERPREF_MT_addons_dev_guides")
elif userpref.active_section == 'THEMES':
layout.operator("ui.reset_default_theme")
class USERPREF_PT_tabs(Panel):
bl_label = ""
bl_space_type = 'USER_PREFERENCES'
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
layout.prop(userpref, "active_section", expand=True)
class USERPREF_MT_interaction_presets(Menu):
bl_label = "Presets"
preset_subdir = "interaction"
preset_operator = "script.execute_preset"
draw = Menu.draw_preset
class USERPREF_MT_appconfigs(Menu):
bl_label = "AppPresets"
preset_subdir = "keyconfig"
preset_operator = "wm.appconfig_activate"
def draw(self, context):
self.layout.operator("wm.appconfig_default", text="Blender (default)")
# now draw the presets
Menu.draw_preset(self, context)
class USERPREF_MT_splash(Menu):
bl_label = "Splash"
def draw(self, context):
layout = self.layout
split = layout.split()
row = split.row()
row.label("")
row = split.row()
row.label("Interaction:")
# XXX, no redraws
# text = bpy.path.display_name(context.window_manager.keyconfigs.active.name)
# if not text:
# text = "Blender (default)"
row.menu("USERPREF_MT_appconfigs", text="Preset")
class USERPREF_PT_interface(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Interface"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'INTERFACE')
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
view = userpref.view
row = layout.row()
col = row.column()
col.label(text="Display:")
col.prop(view, "show_tooltips")
col.prop(view, "show_tooltips_python")
col.prop(view, "show_object_info", text="Object Info")
col.prop(view, "show_large_cursors")
col.prop(view, "show_view_name", text="View Name")
col.prop(view, "show_playback_fps", text="Playback FPS")
col.prop(view, "use_global_scene")
col.prop(view, "object_origin_size")
col.separator()
col.separator()
col.separator()
col.prop(view, "show_mini_axis", text="Display Mini Axis")
sub = col.column()
sub.active = view.show_mini_axis
sub.prop(view, "mini_axis_size", text="Size")
sub.prop(view, "mini_axis_brightness", text="Brightness")
col.separator()
row.separator()
row.separator()
col = row.column()
col.label(text="View Manipulation:")
col.prop(view, "use_mouse_auto_depth")
col.prop(view, "use_zoom_to_mouse")
col.prop(view, "use_rotate_around_active")
col.prop(view, "use_global_pivot")
col.prop(view, "use_camera_lock_parent")
col.separator()
col.prop(view, "use_auto_perspective")
col.prop(view, "smooth_view")
col.prop(view, "rotation_angle")
col.separator()
col.separator()
col.label(text="2D Viewports:")
col.prop(view, "view2d_grid_spacing_min", text="Minimum Grid Spacing")
col.prop(view, "timecode_style")
row.separator()
row.separator()
col = row.column()
#Toolbox doesn't exist yet
#col.label(text="Toolbox:")
#col.prop(view, "show_column_layout")
#col.label(text="Open Toolbox Delay:")
#col.prop(view, "open_left_mouse_delay", text="Hold LMB")
#col.prop(view, "open_right_mouse_delay", text="Hold RMB")
col.prop(view, "show_manipulator")
sub = col.column()
sub.active = view.show_manipulator
sub.prop(view, "manipulator_size", text="Size")
sub.prop(view, "manipulator_handle_size", text="Handle Size")
sub.prop(view, "manipulator_hotspot", text="Hotspot")
col.separator()
col.separator()
col.separator()
col.label(text="Menus:")
col.prop(view, "use_mouse_over_open")
col.label(text="Menu Open Delay:")
col.prop(view, "open_toplevel_delay", text="Top Level")
col.prop(view, "open_sublevel_delay", text="Sub Level")
col.separator()
col.prop(view, "show_splash")
if os.name == 'nt':
col.prop(view, "quit_dialog")
class USERPREF_PT_edit(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Edit"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'EDITING')
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
edit = userpref.edit
row = layout.row()
col = row.column()
col.label(text="Link Materials To:")
col.prop(edit, "material_link", text="")
col.separator()
col.separator()
col.separator()
col.label(text="New Objects:")
col.prop(edit, "use_enter_edit_mode")
col.label(text="Align To:")
col.prop(edit, "object_align", text="")
col.separator()
col.separator()
col.separator()
col.label(text="Undo:")
col.prop(edit, "use_global_undo")
col.prop(edit, "undo_steps", text="Steps")
col.prop(edit, "undo_memory_limit", text="Memory Limit")
row.separator()
row.separator()
col = row.column()
col.label(text="Grease Pencil:")
col.prop(edit, "grease_pencil_manhattan_distance", text="Manhattan Distance")
col.prop(edit, "grease_pencil_euclidean_distance", text="Euclidean Distance")
#~ col.prop(edit, "use_grease_pencil_simplify_stroke", text="Simplify Stroke")
col.prop(edit, "grease_pencil_eraser_radius", text="Eraser Radius")
col.prop(edit, "use_grease_pencil_smooth_stroke", text="Smooth Stroke")
col.separator()
col.separator()
col.separator()
col.label(text="Playback:")
col.prop(edit, "use_negative_frames")
col.separator()
col.separator()
col.separator()
col.label(text="Animation Editors:")
col.prop(edit, "fcurve_unselected_alpha", text="F-Curve Visibility")
row.separator()
row.separator()
col = row.column()
col.label(text="Keyframing:")
col.prop(edit, "use_visual_keying")
col.prop(edit, "use_keyframe_insert_needed", text="Only Insert Needed")
col.separator()
col.prop(edit, "use_auto_keying", text="Auto Keyframing:")
sub = col.column()
#~ sub.active = edit.use_keyframe_insert_auto # incorrect, time-line can enable
sub.prop(edit, "use_keyframe_insert_available", text="Only Insert Available")
col.separator()
col.label(text="New F-Curve Defaults:")
col.prop(edit, "keyframe_new_interpolation_type", text="Interpolation")
col.prop(edit, "keyframe_new_handle_type", text="Handles")
col.prop(edit, "use_insertkey_xyz_to_rgb", text="XYZ to RGB")
col.separator()
col.separator()
col.separator()
col.label(text="Transform:")
col.prop(edit, "use_drag_immediately")
row.separator()
row.separator()
col = row.column()
col.prop(edit, "sculpt_paint_overlay_color", text="Sculpt Overlay Color")
col.separator()
col.separator()
col.separator()
col.label(text="Duplicate Data:")
col.prop(edit, "use_duplicate_mesh", text="Mesh")
col.prop(edit, "use_duplicate_surface", text="Surface")
col.prop(edit, "use_duplicate_curve", text="Curve")
col.prop(edit, "use_duplicate_text", text="Text")
col.prop(edit, "use_duplicate_metaball", text="Metaball")
col.prop(edit, "use_duplicate_armature", text="Armature")
col.prop(edit, "use_duplicate_lamp", text="Lamp")
col.prop(edit, "use_duplicate_material", text="Material")
col.prop(edit, "use_duplicate_texture", text="Texture")
#col.prop(edit, "use_duplicate_fcurve", text="F-Curve")
col.prop(edit, "use_duplicate_action", text="Action")
col.prop(edit, "use_duplicate_particle", text="Particle")
class USERPREF_PT_system(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "System"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'SYSTEM')
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
system = userpref.system
split = layout.split()
# 1. Column
column = split.column()
colsplit = column.split(percentage=0.85)
col = colsplit.column()
col.label(text="General:")
col.prop(system, "dpi")
col.prop(system, "frame_server_port")
col.prop(system, "scrollback", text="Console Scrollback")
col.separator()
col.separator()
col.label(text="Sound:")
col.row().prop(system, "audio_device", expand=True)
sub = col.column()
sub.active = system.audio_device != 'NONE'
#sub.prop(system, "use_preview_images")
sub.prop(system, "audio_channels", text="Channels")
sub.prop(system, "audio_mixing_buffer", text="Mixing Buffer")
sub.prop(system, "audio_sample_rate", text="Sample Rate")
sub.prop(system, "audio_sample_format", text="Sample Format")
col.separator()
col.separator()
col.label(text="Screencast:")
col.prop(system, "screencast_fps")
col.prop(system, "screencast_wait_time")
col.separator()
col.separator()
if hasattr(system, 'compute_device'):
col.label(text="Compute Device:")
col.row().prop(system, "compute_device_type", expand=True)
sub = col.row()
sub.active = system.compute_device_type != 'CPU'
sub.prop(system, "compute_device", text="")
# 2. Column
column = split.column()
colsplit = column.split(percentage=0.85)
col = colsplit.column()
col.label(text="OpenGL:")
col.prop(system, "gl_clip_alpha", slider=True)
col.prop(system, "use_mipmaps")
col.prop(system, "use_16bit_textures")
col.label(text="Anisotropic Filtering")
col.prop(system, "anisotropic_filter", text="")
col.prop(system, "use_vertex_buffer_objects")
# Anti-aliasing is disabled as it breaks border/lasso select
#~ col.prop(system, "use_antialiasing")
col.label(text="Window Draw Method:")
col.prop(system, "window_draw_method", text="")
col.label(text="Text Draw Options:")
col.prop(system, "use_text_antialiasing")
col.label(text="Textures:")
col.prop(system, "gl_texture_limit", text="Limit Size")
col.prop(system, "texture_time_out", text="Time Out")
col.prop(system, "texture_collection_rate", text="Collection Rate")
col.separator()
col.separator()
col.separator()
col.label(text="Sequencer:")
col.prop(system, "prefetch_frames")
col.prop(system, "memory_cache_limit")
# 3. Column
column = split.column()
column.label(text="Solid OpenGL lights:")
split = column.split(percentage=0.1)
split.label()
split.label(text="Colors:")
split.label(text="Direction:")
lamp = system.solid_lights[0]
opengl_lamp_buttons(column, lamp)
lamp = system.solid_lights[1]
opengl_lamp_buttons(column, lamp)
lamp = system.solid_lights[2]
opengl_lamp_buttons(column, lamp)
column.separator()
column.label(text="Color Picker Type:")
column.row().prop(system, "color_picker_type", text="")
column.separator()
column.prop(system, "use_weight_color_range", text="Custom Weight Paint Range")
sub = column.column()
sub.active = system.use_weight_color_range
sub.template_color_ramp(system, "weight_color_range", expand=True)
column.separator()
column.prop(system, "use_international_fonts")
if system.use_international_fonts:
column.prop(system, "language")
row = column.row()
row.label(text="Translate:")
row.prop(system, "use_translate_interface", text="Interface")
row.prop(system, "use_translate_tooltips", text="Tooltips")
class USERPREF_MT_interface_theme_presets(Menu):
bl_label = "Presets"
preset_subdir = "interface_theme"
preset_operator = "script.execute_preset"
preset_type = 'XML'
preset_xml_map = (("user_preferences.themes[0]", "Theme"), )
draw = Menu.draw_preset
class USERPREF_PT_theme(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Themes"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@staticmethod
def _theme_generic(split, themedata):
col = split.column()
def theme_generic_recurse(data):
col.label(data.rna_type.name)
row = col.row()
subsplit = row.split(percentage=0.95)
padding1 = subsplit.split(percentage=0.15)
padding1.column()
subsplit = row.split(percentage=0.85)
padding2 = subsplit.split(percentage=0.15)
padding2.column()
colsub_pair = padding1.column(), padding2.column()
props_type = {}
for i, prop in enumerate(data.rna_type.properties):
if prop.identifier == "rna_type":
continue
props_type.setdefault((prop.type, prop.subtype), []).append(prop)
for props_type, props_ls in sorted(props_type.items()):
if props_type[0] == 'POINTER':
for i, prop in enumerate(props_ls):
theme_generic_recurse(getattr(data, prop.identifier))
else:
for i, prop in enumerate(props_ls):
colsub_pair[i % 2].row().prop(data, prop.identifier)
theme_generic_recurse(themedata)
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'THEMES')
def draw(self, context):
layout = self.layout
theme = context.user_preferences.themes[0]
split_themes = layout.split(percentage=0.2)
sub = split_themes.column()
sub.label(text="Presets:")
subrow = sub.row(align=True)
subrow.menu("USERPREF_MT_interface_theme_presets", text=USERPREF_MT_interface_theme_presets.bl_label)
subrow.operator("wm.interface_theme_preset_add", text="", icon='ZOOMIN')
subrow.operator("wm.interface_theme_preset_add", text="", icon='ZOOMOUT').remove_active = True
sub.separator()
sub.prop(theme, "theme_area", expand=True)
split = layout.split(percentage=0.4)
layout.separator()
layout.separator()
split = split_themes.split()
if theme.theme_area == 'USER_INTERFACE':
col = split.column()
ui = theme.user_interface.wcol_regular
col.label(text="Regular:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_tool
col.label(text="Tool:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_radio
col.label(text="Radio Buttons:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_text
col.label(text="Text:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_option
col.label(text="Option:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_toggle
col.label(text="Toggle:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_num
col.label(text="Number Field:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_numslider
col.label(text="Value Slider:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_box
col.label(text="Box:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_menu
col.label(text="Menu:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_pulldown
col.label(text="Pulldown:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_menu_back
col.label(text="Menu Back:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_tooltip
col.label(text="Tooltip:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_menu_item
col.label(text="Menu Item:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_scroll
col.label(text="Scroll Bar:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_progress
col.label(text="Progress Bar:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_list_item
col.label(text="List Item:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_state
col.label(text="State:")
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "inner_anim")
colsub.row().prop(ui, "inner_anim_sel")
colsub.row().prop(ui, "inner_driven")
colsub.row().prop(ui, "inner_driven_sel")
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "inner_key")
colsub.row().prop(ui, "inner_key_sel")
colsub.row().prop(ui, "blend")
col.separator()
col.separator()
ui = theme.user_interface
col.label("Icons:")
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "icon_file")
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "icon_alpha")
col.separator()
col.separator()
ui = theme.user_interface.panel
col.label("Panels:")
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
rowsub = colsub.row()
rowsub.prop(ui, "show_header")
rowsub.label()
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "header")
layout.separator()
layout.separator()
elif theme.theme_area == 'BONE_COLOR_SETS':
col = split.column()
for i, ui in enumerate(theme.bone_color_sets):
col.label(text="Color Set" + " %d:" % (i + 1)) # i starts from 0
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "normal")
colsub.row().prop(ui, "select")
colsub.row().prop(ui, "active")
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "show_colored_constraints")
else:
self._theme_generic(split, getattr(theme, theme.theme_area.lower()))
class USERPREF_PT_file(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Files"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'FILES')
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
paths = userpref.filepaths
system = userpref.system
split = layout.split(percentage=0.7)
col = split.column()
col.label(text="File Paths:")
colsplit = col.split(percentage=0.95)
col1 = colsplit.split(percentage=0.3)
sub = col1.column()
sub.label(text="Fonts:")
sub.label(text="Textures:")
sub.label(text="Texture Plugins:")
sub.label(text="Sequence Plugins:")
sub.label(text="Render Output:")
sub.label(text="Scripts:")
sub.label(text="Sounds:")
sub.label(text="Temp:")
sub.label(text="Image Editor:")
sub.label(text="Animation Player:")
sub = col1.column()
sub.prop(paths, "font_directory", text="")
sub.prop(paths, "texture_directory", text="")
sub.prop(paths, "texture_plugin_directory", text="")
sub.prop(paths, "sequence_plugin_directory", text="")
sub.prop(paths, "render_output_directory", text="")
sub.prop(paths, "script_directory", text="")
sub.prop(paths, "sound_directory", text="")
sub.prop(paths, "temporary_directory", text="")
sub.prop(paths, "image_editor", text="")
subsplit = sub.split(percentage=0.3)
subsplit.prop(paths, "animation_player_preset", text="")
subsplit.prop(paths, "animation_player", text="")
col.separator()
col.separator()
colsplit = col.split(percentage=0.95)
sub = colsplit.column()
sub.label(text="Author:")
sub.prop(system, "author", text="")
col = split.column()
col.label(text="Save & Load:")
col.prop(paths, "use_relative_paths")
col.prop(paths, "use_file_compression")
col.prop(paths, "use_load_ui")
col.prop(paths, "use_filter_files")
col.prop(paths, "show_hidden_files_datablocks")
col.prop(paths, "hide_recent_locations")
col.prop(paths, "show_thumbnails")
col.separator()
col.separator()
col.prop(paths, "save_version")
col.prop(paths, "recent_files")
col.prop(paths, "use_save_preview_images")
col.label(text="Auto Save:")
col.prop(paths, "use_auto_save_temporary_files")
sub = col.column()
sub.active = paths.use_auto_save_temporary_files
sub.prop(paths, "auto_save_time", text="Timer (mins)")
col.separator()
col.label(text="Scripts:")
col.prop(system, "use_scripts_auto_execute")
col.prop(system, "use_tabs_as_spaces")
from bl_ui.space_userpref_keymap import InputKeyMapPanel
class USERPREF_MT_ndof_settings(Menu):
# accessed from the window key-bindings in C (only)
bl_label = "3D Mouse Settings"
def draw(self, context):
layout = self.layout
input_prefs = context.user_preferences.inputs
layout.separator()
layout.prop(input_prefs, "ndof_sensitivity")
if context.space_data.type == 'VIEW_3D':
layout.separator()
layout.prop(input_prefs, "ndof_show_guide")
layout.separator()
layout.label(text="Orbit options")
if input_prefs.view_rotate_method == 'TRACKBALL':
layout.prop(input_prefs, "ndof_roll_invert_axis")
layout.prop(input_prefs, "ndof_tilt_invert_axis")
layout.prop(input_prefs, "ndof_rotate_invert_axis")
layout.prop(input_prefs, "ndof_zoom_invert")
layout.separator()
layout.label(text="Pan options")
layout.prop(input_prefs, "ndof_panx_invert_axis")
layout.prop(input_prefs, "ndof_pany_invert_axis")
layout.prop(input_prefs, "ndof_panz_invert_axis")
layout.label(text="Zoom options")
layout.prop(input_prefs, "ndof_zoom_updown")
layout.separator()
layout.label(text="Fly options")
layout.prop(input_prefs, "ndof_fly_helicopter", icon='NDOF_FLY')
layout.prop(input_prefs, "ndof_lock_horizon", icon='NDOF_DOM')
class USERPREF_PT_input(Panel, InputKeyMapPanel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Input"
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'INPUT')
def draw_input_prefs(self, inputs, layout):
# General settings
row = layout.row()
col = row.column()
sub = col.column()
sub.label(text="Presets:")
subrow = sub.row(align=True)
subrow.menu("USERPREF_MT_interaction_presets", text=bpy.types.USERPREF_MT_interaction_presets.bl_label)
subrow.operator("wm.interaction_preset_add", text="", icon='ZOOMIN')
subrow.operator("wm.interaction_preset_add", text="", icon='ZOOMOUT').remove_active = True
sub.separator()
sub.label(text="Mouse:")
sub1 = sub.column()
sub1.active = (inputs.select_mouse == 'RIGHT')
sub1.prop(inputs, "use_mouse_emulate_3_button")
sub.prop(inputs, "use_mouse_continuous")
sub.prop(inputs, "drag_threshold")
sub.prop(inputs, "tweak_threshold")
sub.label(text="Select With:")
sub.row().prop(inputs, "select_mouse", expand=True)
sub = col.column()
sub.label(text="Double Click:")
sub.prop(inputs, "mouse_double_click_time", text="Speed")
sub.separator()
sub.prop(inputs, "use_emulate_numpad")
sub.separator()
sub.label(text="Orbit Style:")
sub.row().prop(inputs, "view_rotate_method", expand=True)
sub.label(text="Zoom Style:")
sub.row().prop(inputs, "view_zoom_method", text="")
if inputs.view_zoom_method in {'DOLLY', 'CONTINUE'}:
sub.row().prop(inputs, "view_zoom_axis", expand=True)
sub.prop(inputs, "invert_mouse_zoom")
#sub.prop(inputs, "use_mouse_mmb_paste")
#col.separator()
sub = col.column()
sub.label(text="Mouse Wheel:")
sub.prop(inputs, "invert_zoom_wheel", text="Invert Wheel Zoom Direction")
#sub.prop(view, "wheel_scroll_lines", text="Scroll Lines")
col.separator()
sub = col.column()
sub.label(text="NDOF Device:")
sub.prop(inputs, "ndof_sensitivity", text="NDOF Sensitivity")
row.separator()
def draw(self, context):
layout = self.layout
#import time
#start = time.time()
userpref = context.user_preferences
inputs = userpref.inputs
split = layout.split(percentage=0.25)
# Input settings
self.draw_input_prefs(inputs, split)
# Keymap Settings
self.draw_keymaps(context, split)
#print("runtime", time.time() - start)
class USERPREF_MT_addons_dev_guides(Menu):
bl_label = "Development Guides"
# menu to open web-pages with addons development guides
def draw(self, context):
layout = self.layout
layout.operator("wm.url_open", text="API Concepts", icon='URL').url = "http://wiki.blender.org/index.php/Dev:2.5/Py/API/Intro"
layout.operator("wm.url_open", text="Addon Guidelines", icon='URL').url = "http://wiki.blender.org/index.php/Dev:2.5/Py/Scripts/Guidelines/Addons"
layout.operator("wm.url_open", text="How to share your addon", icon='URL').url = "http://wiki.blender.org/index.php/Dev:Py/Sharing"
class USERPREF_PT_addons(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Addons"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
_support_icon_mapping = {
'OFFICIAL': 'FILE_BLEND',
'COMMUNITY': 'POSE_DATA',
'TESTING': 'MOD_EXPLODE',
}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'ADDONS')
@staticmethod
def is_user_addon(mod, user_addon_paths):
if not user_addon_paths:
user_script_path = bpy.utils.user_script_path()
if user_script_path is not None:
user_addon_paths.append(os.path.join(user_script_path, "addons"))
user_addon_paths.append(os.path.join(bpy.utils.resource_path('USER'), "scripts", "addons"))
for path in user_addon_paths:
if bpy.path.is_subdir(mod.__file__, path):
return True
return False
@staticmethod
def draw_error(layout, message):
lines = message.split("\n")
box = layout.box()
rowsub = box.row()
rowsub.label(lines[0])
rowsub.label(icon='ERROR')
for l in lines[1:]:
box.label(l)
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
used_ext = {ext.module for ext in userpref.addons}
# collect the categories that can be filtered on
addons = [(mod, addon_utils.module_bl_info(mod)) for mod in addon_utils.modules(addon_utils.addons_fake_modules)]
split = layout.split(percentage=0.2)
col = split.column()
col.prop(context.window_manager, "addon_search", text="", icon='VIEWZOOM')
col.label(text="Supported Level")
col.prop(context.window_manager, "addon_support", expand=True)
col.label(text="Categories")
col.prop(context.window_manager, "addon_filter", expand=True)
col = split.column()
# set in addon_utils.modules(...)
if addon_utils.error_duplicates:
self.draw_error(col,
"Multiple addons using the same name found!\n"
"likely a problem with the script search path.\n"
"(see console for details)",
)
if addon_utils.error_encoding:
self.draw_error(col,
"One or more addons do not have UTF-8 encoding\n"
"(see console for details)",
)
filter = context.window_manager.addon_filter
search = context.window_manager.addon_search.lower()
support = context.window_manager.addon_support
# initialized on demand
user_addon_paths = []
for mod, info in addons:
module_name = mod.__name__
is_enabled = module_name in used_ext
if info["support"] not in support:
continue
# check if addon should be visible with current filters
if ((filter == "All") or
(filter == info["category"]) or
(filter == "Enabled" and is_enabled) or
(filter == "Disabled" and not is_enabled)):
if search and search not in info["name"].lower():
if info["author"]:
if search not in info["author"].lower():
continue
else:
continue
# Addon UI Code
box = col.column().box()
colsub = box.column()
row = colsub.row()
row.operator("wm.addon_expand", icon='TRIA_DOWN' if info["show_expanded"] else 'TRIA_RIGHT', emboss=False).module = module_name
rowsub = row.row()
rowsub.active = is_enabled
rowsub.label(text='%s: %s' % (info['category'], info["name"]))
if info["warning"]:
rowsub.label(icon='ERROR')
# icon showing dependencies (child or parent).
disable_check = module_name
if info["dependencies"] :
rowsub.label(icon='LINKED')
# icon showing support level.
rowsub.label(icon=self._support_icon_mapping.get(info["support"], 'QUESTION'))
if info["childs"] and is_enabled :
row.label(icon='LINKED')
elif is_enabled:
row.operator("wm.addon_disable", icon='CHECKBOX_HLT', text="", emboss=False).module = module_name
else:
row.operator("wm.addon_enable", icon='CHECKBOX_DEHLT', text="", emboss=False).module = module_name
# Expanded UI (only if additional info is available)
if info["show_expanded"]:
if info["description"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Description:")
split.label(text=info["description"])
if info["location"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Location:")
split.label(text=info["location"])
if mod:
split = colsub.row().split(percentage=0.15)
split.label(text="File:")
split.label(text=mod.__file__)
if info["author"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Author:")
split.label(text=info["author"])
if info["version"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Version:")
split.label(text='.'.join(str(x) for x in info["version"]))
if info["dependencies"]:
split = colsub.row().split(percentage=0.15)
split.label(text='Dependencies:')
parent_list, msg = addon_utils.parent_list(info["dependencies"])
if parent_list :
txt = ''
for n,v in parent_list : txt += '%s v%s, '%(n,'.'.join(str(x) for x in v) )
else :
txt = msg
split.label(text=txt[:-2])
if info["childs"] :
split = colsub.row().split(percentage=0.15)
split.label(text='In use by:')
txt = ''
for n in info["childs"] : txt += '%s, '%(n)
split.label(text=txt[:-2])
if info["warning"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Warning:")
split.label(text=' ' + info["warning"], icon='ERROR')
user_addon = USERPREF_PT_addons.is_user_addon(mod, user_addon_paths)
tot_row = bool(info["wiki_url"]) + bool(info["tracker_url"]) + bool(user_addon)
if tot_row:
split = colsub.row().split(percentage=0.15)
split.label(text="Internet:")
if info["wiki_url"]:
split.operator("wm.url_open", text="Link to the Wiki", icon='HELP').url = info["wiki_url"]
if info["tracker_url"]:
split.operator("wm.url_open", text="Report a Bug", icon='URL').url = info["tracker_url"]
if user_addon:
split.operator("wm.addon_remove", text="Remove", icon='CANCEL').module = mod.__name__
for i in range(4 - tot_row):
split.separator()
# Append missing scripts
# First collect scripts that are used but have no script file.
module_names = {mod.__name__ for mod, info in addons}
missing_modules = {ext for ext in used_ext if ext not in module_names}
if missing_modules and filter in {"All", "Enabled"}:
col.column().separator()
col.column().label(text="Missing script files")
module_names = {mod.__name__ for mod, info in addons}
for module_name in sorted(missing_modules):
is_enabled = module_name in used_ext
# Addon UI Code
box = col.column().box()
colsub = box.column()
row = colsub.row()
row.label(text=module_name, icon='ERROR')
if is_enabled:
row.operator("wm.addon_disable", icon='CHECKBOX_HLT', text="", emboss=False).module = module_name
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
|
[
"jerome.le.chat@free.fr"
] |
jerome.le.chat@free.fr
|
29459d2f2495bd6eabb00953ccd6e2064a3749f5
|
d82a8844c7d46c752e567cca41a8ae1c15c975f7
|
/API/urls.py
|
aaae4d1d1c0b11959a544fed6876085e896c1700
|
[] |
no_license
|
golammahmud/job_evaluations_project
|
f1be9f8f8b27c0f9db6539294ccff25254ff08f3
|
fe362f2d6bc57e1d550c39263312ef046eb7754c
|
refs/heads/master
| 2023-08-04T10:20:59.442703
| 2021-09-27T02:31:03
| 2021-09-27T02:31:03
| 410,347,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from django.contrib import admin
from django.urls import path,include
from rest_framework import routers
from .views import UserInputView,UserBasedInputView
from rest_framework_simplejwt.views import TokenObtainPairView,TokenRefreshView
router=routers.DefaultRouter()
router.register('all-userinputs',UserInputView)
router.register('user-based-inputs',UserBasedInputView)
urlpatterns = [
path('', include(router.urls)),
path('api-auth/',include('rest_framework.urls')),
path('get_token/', TokenObtainPairView.as_view(), name='token_obtain_pair'), #get token
path('token_refresh/', TokenRefreshView.as_view(), name='token_refresh'),# get refresh token
]
|
[
"golam.mahmud99@gmail.com"
] |
golam.mahmud99@gmail.com
|
bec4636056c0dc596b344cbdca2e4857ec559ff4
|
41299f375dbd79fc6e1163333de11a27623ab7fd
|
/server/dbpedia/__init__.py
|
cc74d451e46371ba2a1e49114ba091f920692a32
|
[] |
no_license
|
MatrixReloaded/ArtViz
|
b18315562f30e2f0388d824ee9fcdf02fcca3591
|
b479079287a4e3f82fb1e6f9b1b223ef977af73e
|
refs/heads/master
| 2021-01-14T09:45:47.122465
| 2015-10-03T17:02:31
| 2015-10-03T17:11:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
"""
DBpedia quepy.
"""
from basic import *
from people import *
from BasicQuestions import *
|
[
"oana.balaceanu@ymail.com"
] |
oana.balaceanu@ymail.com
|
0873be18a9c6f1322e01559626604b34c5ef88c1
|
fea6ceb798d612368a27888e6490b4f91c04384f
|
/continue.py
|
bdfc1b2cc6ab4f708e4b8266f81d187ed8caf26f
|
[] |
no_license
|
neogeolee/PythonWorkspace
|
b4a8d8cf0ef451bf3bc0e00ccaecaf48253bc0b8
|
f8b9aff2ce821142990acac1cd2406bbe140ab4b
|
refs/heads/master
| 2022-12-17T11:30:29.974230
| 2020-09-22T16:28:58
| 2020-09-22T16:28:58
| 297,708,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
absent = [2, 5] # 결석
no_book = [7] # 책 안들고 왔음
for student in range(1, 11):
if student in absent:
continue
elif student in no_book:
print('오늘 수업여기까지 {0} 개새끼야 따라와'.format(student))
break
print('{0}, 책읽어라'.format(student))
|
[
"neogeolee@nate.com"
] |
neogeolee@nate.com
|
f7863d8927d006aaf6bb1f2450af7fe6550ab070
|
e34d4bf879910b8f41068c1efb90915897e53d53
|
/system_design_ladder/GeoHashII.py
|
b15bec1dd5ca21a631b684b5a96092a0772cec5f
|
[] |
no_license
|
ZhouningMan/LeetCodePython
|
6cfc30f0b76f6162502410fef5639fde4801bd74
|
cad9585c440efb329c9321648f94c58ded198438
|
refs/heads/master
| 2020-12-10T03:53:48.824344
| 2020-01-13T02:29:02
| 2020-01-13T02:29:02
| 233,494,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
class GeoHash:
BASE32 = "0123456789bcdefghjkmnpqrstuvwxyz"
"""
@param: geohash: geohash a base32 string
@return: latitude and longitude a location coordinate pair
"""
def decode(self, geohash):
binary = self._to_bin(geohash)
lon_bin = [binary[i] for i in range(0, len(binary), 2)]
lat_bin = [binary[i] for i in range(1, len(binary), 2)]
longitude = self._bin_to_val(-180, 180, lon_bin)
latitude = self._bin_to_val(-90, 90, lat_bin)
return latitude, longitude
def _to_bin(self, geohash):
binary = ''
for c in geohash:
idx = GeoHash.BASE32.index(c)
b = ''
for i in range(5):
b = str(idx % 2) + b
idx = idx // 2
binary += b
return binary
def _bin_to_val(self, low, high, binary):
for b in binary:
mid = (high + low) / 2
if b == '1': # our value is higher
low = mid
else: # our value is lower
high = mid
return (low + high) / 2
if __name__ == '__main__':
geoHash = GeoHash()
geoHash.decode("wx4g0s")
|
[
"linfenglee321@gmail.com"
] |
linfenglee321@gmail.com
|
5e6f288758fa99fdf7f7a34041decfca9b7f7f42
|
dac906538145808a71e94e030d63c6f20753977a
|
/webapp/models.py
|
1816f0d14f43481ce923fe42af4e351a6fad5b98
|
[] |
no_license
|
nikofil/NMProject
|
3a0846b72cf66afb140feff9674d053e144f8087
|
150971c67e509b2bdc874b85c1f6abda4e27c793
|
refs/heads/master
| 2020-12-31T05:24:39.311930
| 2015-07-01T10:16:26
| 2015-07-01T10:16:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,832
|
py
|
from django.db import models
def toint(x):
try:
return int(x)
except ValueError:
return -1
def tofloat(x):
try:
return float(x)
except ValueError:
return -1.0
class BaseStation(models.Model):
# describes a base station record
rid = models.IntegerField(default=-1)
email = models.CharField(max_length=100)
operator = models.CharField(max_length=100)
mcc = models.IntegerField(default=-1)
mnc = models.IntegerField(default=-1)
cid = models.IntegerField(default=-1)
lac = models.IntegerField(default=-1)
latitude = models.FloatField(default=-1.0)
longitude = models.FloatField(default=-1.0)
timestamp = models.DateTimeField()
def __str__(self):
return str(self.rid) + " " + self.email
def setdata(self, data):
self.rid = toint(data[0])
self.email = data[1]
self.operator = data[2]
self.mcc = toint(data[3])
self.mnc = toint(data[4])
self.cid = toint(data[5])
self.lac = toint(data[6])
self.latitude = tofloat(data[7])
self.longitude = tofloat(data[8])
self.timestamp = data[9] + "+03:00"
class BatteryStatus(models.Model):
# describes a battery status record
rid = models.IntegerField(default=-1)
email = models.CharField(max_length=100)
level = models.IntegerField(default=-1)
plugged = models.IntegerField(default=-1)
temperature = models.IntegerField(default=-1)
voltage = models.IntegerField(default=-1)
timestamp = models.DateTimeField()
def __str__(self):
return str(self.rid) + " " + self.email
def setdata(self, data):
self.rid = toint(data[0])
self.email = data[1]
self.level = toint(data[2])
self.plugged = toint(data[3])
self.temperature = toint(data[4])
self.voltage = toint(data[5])
self.timestamp = data[6] + "+03:00"
class GPSStatus(models.Model):
#describes a GPS position record
rid = models.IntegerField(default=-1)
email = models.CharField(max_length=100)
latitude = models.FloatField(default=-1.0)
longitude = models.FloatField(default=-1.0)
timestamp = models.DateTimeField()
def __str__(self):
return str(self.rid) + " " + self.email
def setdata(self, data):
self.rid = toint(data[0])
self.email = data[1]
self.latitude = tofloat(data[2])
self.longitude = tofloat(data[3])
self.timestamp = data[4] + "+03:00"
class WifiPos(models.Model):
#descibes a wifi position (average of positions from wifi statuses)
ssid = models.CharField(max_length=100)
bssid = models.CharField(max_length=100)
latitude = models.FloatField(default=-1.0)
longitude = models.FloatField(default=-1.0)
def __str__(self):
return self.ssid + " - " + self.bssid
class WifiStatus(models.Model):
#descibes a wifi status record
rid = models.IntegerField(default=-1)
email = models.CharField(max_length=100)
ssid = models.CharField(max_length=100)
bssid = models.CharField(max_length=100)
level = models.IntegerField(default=-1)
frequency = models.IntegerField(default=-1)
latitude = models.FloatField(default=-1.0)
longitude = models.FloatField(default=-1.0)
timestamp = models.DateTimeField()
realpos = models.ForeignKey(WifiPos, null=True, blank=True)
def __str__(self):
return str(self.rid) + " " + self.email
def setdata(self, data):
self.rid = toint(data[0])
self.email = data[1]
self.ssid = data[2]
self.bssid = data[3]
self.level = toint(data[4])
self.frequency = toint(data[5])
self.latitude = tofloat(data[6])
self.longitude = tofloat(data[7])
self.timestamp = data[8] + "+03:00"
|
[
"aesmade@gmail.com"
] |
aesmade@gmail.com
|
8dbfd84f504b14bdf50f58fb927e9dff65dae76d
|
4714f19916c27a49d3a29f9bd96bdf92ca5affea
|
/extensions/sympyex/__init__.py
|
223f96e875e112503e4003a6481b283447616b09
|
[] |
no_license
|
Sroka/jupyter_maths
|
803c6834eff5186f8262cbc5246c9aca80dbec41
|
81c385cceae192c23f3c33ccb203708b7fe349d6
|
refs/heads/master
| 2020-03-22T08:11:20.482729
| 2019-03-03T10:19:31
| 2019-03-03T10:19:31
| 139,750,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
from sympy import Expr, Equality, latex
from IPython.display import display, Latex
def expressionEqualityExtension(self: Expr, function):
return Equality(self, function, evaluate=False)
def latexify(self: Expr, mode='plain'):
return latex(self, mode=mode)
def latexify_inline(self: Expr):
return latex(self, mode='inline')
def latexify_equation(self: Expr):
return latex(self, mode='equation')
def display_latex(latex: str):
return display(Latex(latex))
Expr.eq = expressionEqualityExtension
Expr.latex = latexify
Expr.latex_inline = latexify_inline
Expr.latex_equation = latexify_equation
del expressionEqualityExtension
del latexify
del latexify_inline
del latexify_equation
|
[
"srokowski.maciej@gmail.com"
] |
srokowski.maciej@gmail.com
|
1c2f7f5a73f04021dcfaf564920e8a0ffd7f0374
|
93c43eb538a14261f1af29e91d1770a99184eb82
|
/adding milk routes v2.py
|
8b5e441034fca3677075ea48e0b054e984fc133c
|
[] |
no_license
|
VS-DavidSouth/FMD_Truck_python
|
e61bc0c2b278ee52a8848b5eed457ce7c1c775b0
|
6d241f454379936b7192d2422db19bc507d09e1c
|
refs/heads/master
| 2020-03-27T05:41:15.030656
| 2018-08-24T21:03:56
| 2018-08-24T21:03:56
| 146,039,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,383
|
py
|
#adding milk routes v2.py
#Created by David South -- Last updated: 6/1/17
#Adapted from "adding routes v3" for the Feed Truck Model
#Purpose: to create a Feature Class for each region with a specific number
#of blank rows with specific fields that will be used to make routes in the VRP
#(Vehicle Routing Problem) in the Network Analyst addon in ArcMap.
#Note: this can be run in the Python IDE or in the arcpy window.
######################SETUP###################
import arcpy, os
from arcpy import env
#set workplace environment
ws = r'F:\FMD_Project\Davids_work\Milk_Truck_Model\Python'
env.workspace = ws
env.overwriteOutput = True
######################PARAMETERS##############
#max capacity, in gallons per delivery truck
capacities = 6500
#location of routes folder
routes_folder = r'F:\FMD_Project\Davids_work\Milk_Truck_Model\Routes'
#location of the creameries folder
creameries_folder = r'F:\FMD_Project\Davids_work\Milk_Truck_Model\Creameries'
#location of the creameries file
creameries = r'F:\FMD_Project\Davids_work\Milk_Truck_Model\Creameries\creameries_v4_3.shp'
#define fields to search in the creameries file
fields1 = ['FID', 'Trucks_per']
#define fields for the routes file (EStartTime and LStartTime fields are for
#determining which day the route will run, Earliest and Latest start time)
fields2 = ['Name', 'StartDepot', 'EndDepot','EStartTime', 'LStartTime', \
'Capacities']
#these fields were not used but can be added in:
#'MaxOrder'
#name of the new file
newFile = 'MTM_routes.shp' #MTM means Milk Truck Model
newFileFull = os.path.join(routes_folder, newFile)
#define days of the week that the routes can run on. Note there is one extra
#day, this is to incorporate a 24 hour period for each day.
date = ['1994-08-22', '1994-08-23', '1994-08-24']
#define days of the week
DotW = ['Mon', 'Tues']
####################ACTUAL CODE#################
#count how many Depots there are in the region
creameries_C = arcpy.GetCount_management(in_rows= creameries)
creameries_Count = int(creameries_C.getOutput(0))
#define blank lists
trucks_per_creamery = []
#make a search cursor to save the info from the creameries and save it to a list
with arcpy.da.SearchCursor(creameries, fields1) as cursor1:
for row_1 in cursor1:
trucks_per_creamery += [row_1[1]]
#create a new completely blank shapefile for the routes
arcpy.CreateFeatureclass_management(out_path= \
routes_folder, out_name= newFile, geometry_type="POLYLINE", template= "", \
has_m="DISABLED", has_z="DISABLED")
#add new fields
for num1 in range(0, len(fields2)):
arcpy.AddField_management(newFileFull, fields2[num1], "TEXT", "", "", 15)
#define a cursor
cursor2 = arcpy.da.InsertCursor(newFileFull, fields2)
##add in a buncha blank rows for the routes##
#make two copies of the routes, one for each 24 hour period
for s in range (0, len(date)-1):
#do this for each for each creamery
for q in range (0, creameries_Count):
#do this for for each route
for p in range (0, trucks_per_creamery[q]):
#fill the fields from fields2 with the following info
cursor2.insertRow(['Route_' + str(q)+ '_' + str(p) + DotW[s], \
str(q), str(q), date[s], date[s+1], str(capacities)])
#get outta here cursor. You ain't gotta go home, but you can't stay here.
del cursor2
print "Completed."
|
[
"12001003523326@FEDIDCARD.GOV"
] |
12001003523326@FEDIDCARD.GOV
|
30afeecf7a442f626392bcc9b54728254bb8a8be
|
60d5ea4f007d49768d250ef394003f554003e4d0
|
/python/Linked List/142.Linked List Cycle II.py
|
dec51f534aabccb931d8e8932d39d11aac643c6f
|
[] |
no_license
|
EvanJamesMG/Leetcode
|
dd7771beb119ea1250dbb3b147a09053298cd63b
|
fa638c7fda3802e9f4e0751a2c4c084edf09a441
|
refs/heads/master
| 2021-01-10T17:11:10.896393
| 2017-12-01T16:04:44
| 2017-12-01T16:04:44
| 46,968,756
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
# coding=utf-8
'''
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
Note: Do not modify the linked list.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
使用快慢指针。若链表存在环,两指针必在环中相遇,此时将慢指针移回头结点,
两指针以相同的速度移动,在环开始的节点处再次相遇。
图中(http://www.cnblogs.com/zuoyuan/p/3701877.html),head到环路起点的距离为K,起点到fast和slow的相遇点的距离为M,环路周长为L。假设,在fast和slow相遇时,fast走过了Lfast,slow走过了Lslow。根据题意:
Lslow=K+M;Lfast=K+M+n*L(n为正整数);Lfast=2*Lslow
可以推出:Lslow=n*L;K=n*L-M
则当slow重新回到head,而fast还在相遇点,slow和fast都向前走,且每次走一个节点。
则slow从head走到起点走了K,而fast从相遇点出发也走了K,而fast向前走了距离K后到了哪里呢?由于K=(n-1)*L+(L-M),所以fast转了n-1圈,再走L-M,也到了起点。这样起点就找到了。
'''
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head == None or head.next == None:
return None
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if fast == slow:
break
if slow == fast:
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return slow
return None
# if __name__ == "__main__":
#
# result = Solution().numTrees(3)
# print result
|
[
"Evan123mg@gmail.com"
] |
Evan123mg@gmail.com
|
1b464ae08fac263159c6d7d58623f8f8b5db8153
|
2e3f7b74e5e14eb9b12d316f609c3f057de1e845
|
/ka_kun_blog/forms.py
|
92a17c224684df6f4661e7cfab7160cff51c5071
|
[] |
no_license
|
katsuhikonakano/blog
|
edf3de81b1cfe45d2739fa55375bafbf326a0263
|
2c46d7e8b7044b1ab05ba9c9e595f9d4861b2674
|
refs/heads/master
| 2021-03-08T08:46:11.347696
| 2020-07-27T03:36:39
| 2020-07-27T03:36:39
| 246,260,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
from django import forms
from .models import Post
from django.forms import TextInput, Textarea, FileInput, Select, SelectMultiple
from django.contrib.auth.forms import AuthenticationForm
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ["title", "content", "thumnail", "image", "category", "tag"]
widgets = {
'title': TextInput(attrs={
'class': 'form-control',
'placeholder': 'タイトル'}),
'content': Textarea(attrs={
'class': 'form-control',
'placeholder': '本文',
'cols': 80, 'rows': 15}),
'thumnail': FileInput(attrs={
'class': 'form-control-file'}),
'image': FileInput(attrs={
'class': 'form-control-file'}),
'category': Select(attrs={
'class': 'form-control'}),
'tag': SelectMultiple(attrs={
'class': 'form-control'
})
}
class LoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for fields in self.fields.values():
fields.widget.attrs['class'] = 'form-control'
fields.widget.attrs['placeholder']= fields.label
|
[
"katsuhiko1211@gmail.com"
] |
katsuhiko1211@gmail.com
|
ade97c5dd853443919aed5d2d9e2775c257ab39f
|
b7c788979e8f0f7cb9283874e46f9ec12d13819c
|
/app/Member.py
|
f035869d4271d59c55940c97a23e3e716c457110
|
[
"MIT"
] |
permissive
|
luwoldy/masonite-wcv
|
457ba95c7722f1434e35b31973f5b89102ce5434
|
c520f8b30c1775b3e337c3fa63ae8f08c1b0adf4
|
refs/heads/master
| 2023-01-09T05:34:49.048144
| 2019-12-20T02:55:54
| 2019-12-20T02:55:54
| 210,762,211
| 0
| 0
|
MIT
| 2023-01-04T11:19:06
| 2019-09-25T05:26:03
|
CSS
|
UTF-8
|
Python
| false
| false
| 110
|
py
|
"""Member Model."""
from config.database import Model
class Member(Model):
"""Member Model."""
pass
|
[
"leul.woldeab@gmail.com"
] |
leul.woldeab@gmail.com
|
c4cf2cde8e5feb6718ee02fd9cd86914090cb1ec
|
55d9fd08d587c7a724a41c447bf7f57252788f5b
|
/lfp_causal/meso/stat_easy_vs_hard.py
|
3f46205f09f1dcec82319ae802294193264b9147
|
[] |
no_license
|
StanSStanman/lfp_causal
|
1f400b5d5da285eacc273e04ecd2fcc4ee274bc6
|
832c22dd16aab5650355b58b96dbd3743805f640
|
refs/heads/master
| 2022-05-10T03:03:28.256695
| 2022-04-21T14:38:01
| 2022-04-21T14:38:01
| 176,560,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,952
|
py
|
import os
import os.path as op
import xarray as xr
import numpy as np
import pandas as pd
from frites.dataset import DatasetEphy
from frites.workflow import WfMi
from itertools import product
from research.get_dirs import get_dirs
from lfp_causal.IO import read_session
from lfp_causal.compute_bad_epochs import get_ch_bad_epo, get_log_bad_epo
from lfp_causal.compute_power import normalize_power
# os.system("taskset -p 0xff %d" % os.getpid())
from lfp_causal.profiling import (RepeatedTimer, memory_usage, cpu_usage)
import time
import json
def prepare_data(powers, regressors, l_bad, e_bad, conditions, reg_name,
reg_val, times=None, freqs=None, avg_freq=False,
norm=None, bline=None, fbl=None):
if times is not None:
if isinstance(times, tuple):
tmin, tmax = times
elif isinstance(times, (list, np.ndarray)):
tmin, tmax = times[0], times[-1]
else:
raise ValueError('times must be NoneType '
'tuple of values (tmin, tmax),'
'list or numpy array')
if freqs is not None:
if isinstance(freqs, tuple):
fmin, fmax = freqs
else:
raise ValueError('freqs must be NoneType '
'or tuple of values (fmin, fmax)')
all_pow = []
for p in powers:
# for p, r, lb, eb in zip(powers, regresors, l_bad, e_bad):
print('Opening', p)
pow = xr.open_dataset(p)
if fbl is not None:
_fbl = p.split('/')[:-1] + [fbl]
_fbl = '/' + op.join(*_fbl)
if norm is not None:
pow = normalize_power(pow, norm, bline, _fbl)
if isinstance(times, (tuple, list, np.ndarray)):
pow = pow.loc[dict(times=slice(tmin, tmax))]
all_times = pow.times.values
if isinstance(freqs, tuple):
pow = pow.loc[dict(freqs=slice(fmin, fmax))]
all_freqs = pow.freqs.values
pow = pow.to_array().values.transpose(1, 0, 2, 3)
# pow = pow[nans, :, :, :]
if avg_freq:
pow = pow.mean(2)
all_pow.append(pow)
all_reg = {}
for rn in reg_name:
# for rv in reg_val:
_reg = []
for r, lb, eb, cn, _idx in zip(regressors, l_bad, e_bad,
conditions, range(len(regressors))):
xls = pd.read_excel(r, index_col=0)
reg = xls[rn].values
if len(lb) != 0:
reg = np.delete(reg, lb)
if len(eb) != 0:
reg = np.delete(reg, eb)
# I actually don't remember why they should be usefull, consider to eliminate them
# all_pow[_idx] = np.delete(all_pow[_idx], np.where(reg != reg_val),
# axis=0)
# reg = np.delete(reg, np.where(reg != reg_val))
if cn == 'easy':
reg = np.full_like(reg, 0)
elif cn == 'hard':
reg = np.full_like(reg, 1)
_reg.append(reg)
all_reg['{0}_{1}'.format(rn, reg_val)] = _reg
return all_pow, all_reg, all_times, all_freqs
def compute_stats_meso(fname_pow, fname_reg, rois, log_bads, bad_epo,
conditions, regressor, reg_vals, mi_type, inference,
times=None, freqs=None, avg_freq=True, norm=None):
power, regr, \
times, freqs = prepare_data(fname_pow, fname_reg, log_bads,
bad_epo, conditions=conditions,
reg_name=regressor, reg_val=reg_vals,
times=times, freqs=freqs,
avg_freq=avg_freq,
norm=norm, bline=(-.55, -0.05),
fbl='cue_on_pow_beta_mt.nc')
###########################################################################
for i, p in enumerate(power):
power[i] = p[:25, :, :]
for k in regr.keys():
for i, r in enumerate(regr[k]):
regr[k][i] = r[:25]
# for k in cond.keys():
# if cond[k] is not None:
# for i, c in enumerate(cond[k]):
# cond[k][i] = c[:25]
###########################################################################
mi_results = {}
pv_results = {}
conj_ss_results = {}
conj_results = {}
for _r, _mt, _inf in zip(regr, mi_type, inference):
if _mt == 'cc':
regr[_r] = [r.astype('float32') for r in regr[_r]]
elif _mt == 'cd':
regr[_r] = [r.astype('int32') for r in regr[_r]]
elif _mt == 'ccd':
regr[_r] = [r.astype('float32') for r in regr[_r]]
cond[_r] = [c.astype('int32') for c in cond[_r]]
ds_ephy = DatasetEphy(x=power.copy(), y=regr[_r], roi=rois,
z=None, times=times)
wf = WfMi(mi_type=_mt, inference=_inf, kernel=np.hanning(20))
mi, pval = wf.fit(ds_ephy, n_perm=1000, n_jobs=-1)
mi['times'] = times
pval['times'] = times
if _inf == 'rfx':
conj_ss, conj = wf.conjunction_analysis(ds_ephy)
if not avg_freq:
mi.assign_coords({'freqs': freqs})
pval.assign_coords({'freqs': freqs})
mi_results[_r] = mi
pv_results[_r] = pval
if _inf == 'rfx':
conj_ss_results[_r] = conj_ss
conj_results[_r] = conj
ds_mi = xr.Dataset(mi_results)
ds_pv = xr.Dataset(pv_results)
if len(conj_ss_results) == len(conj_results) == 0:
return ds_mi, ds_pv
else:
ds_conj_ss = xr.Dataset(conj_ss_results)
ds_conj = xr.Dataset(conj_results)
return ds_mi, ds_pv, ds_conj_ss, ds_conj
if __name__ == '__main__':
from lfp_causal import MCH, PRJ
dirs = get_dirs(MCH, PRJ)
monkeys = ['teddy']
conditions = ['easy', 'hard']
event = 'trig_off'
norm = 'fbline_relchange'
n_power = '{0}_pow_beta_mt.nc'.format(event)
times = [(-1.5, 1.3)]
# freqs = [(5, 120)]
# freqs = [(8, 15), (15, 30), (25, 45), (40, 70), (60, 120)]
freqs = [(8, 12), (15, 35), (40, 65), (70, 120)]
freqs = [(15, 35)]
avg_frq = True
overwrite = True
# regressors = ['Correct', 'Reward',
# 'is_R|C', 'is_nR|C', 'is_R|nC', 'is_nR|nC',
# 'RnR|C', 'RnR|nC',
# '#R', '#nR', '#R|C', '#nR|C', '#R|nC', '#nR|nC',
# 'learn_5t', 'learn_2t', 'early_late_cons',
# 'P(R|C)', 'P(R|nC)', 'P(R|Cho)', 'P(R|A)',
# 'dP', 'log_dP', 'delta_dP',
# 'surprise', 'surprise_bayes', 'rpe',
# 'q_pcorr', 'q_pincorr', 'q_dP',
# 'q_entropy', 'q_rpe', 'q_absrpe',
# 'q_shann_surp', 'q_bayes_surp']
# conditionals = [None, None,
# None, None, None, None,
# None, None,
# None, None, None, None, None, None,
# None, None, None,
# None, None, None, None,
# None, None, None,
# None, None, None,
# None, None, None,
# None, None, None,
# None, None]
# conditionals = ['Condition' for r in regressors]
# mi_type = ['cd', 'cd',
# 'cd', 'cd', 'cd', 'cd',
# 'cd', 'cd',
# 'cc', 'cc', 'cc', 'cc', 'cc', 'cc',
# 'cd', 'cd', 'cd',
# 'cc', 'cc', 'cc', 'cc',
# 'cc', 'cc', 'cc',
# 'cc', 'cc', 'cc',
# 'cc', 'cc', 'cc',
# 'cc', 'cc', 'cc',
# 'cc', 'cc']
# mi_type = ['ccd' for r in regressors]
regressors = ['Condition']
regressors = ['q_rpe']
reg_vals = 0
conditionals = [None]
mi_type = ['cc']
inference = ['ffx' for r in regressors]
fn_pow_list = []
fn_reg_list = []
rois = []
log_bads = []
bad_epo = []
conds = []
rej_files = []
rej_files += ['1204', '1217', '1231', '0944', # Bad sessions
'0845', '0847', '0939', '0946', '0963', '1036', '1231',
'1233', '1234', '1514', '1699',
'0940', '0944', '0964', '0967', '0969', '0970', '0971',
'0977', '0985', '1037', '1280']
rej_files += ['0210', '0219', '0221', '0225', '0226', '0227', '0230',
'0252', '0268', '0276', '0277', '0279', '0281', '0282',
'0283', '0285', '0288', '0290', '0323', '0362', '0365',
'0393', '0415', '0447', '0449', '0450', '0456', '0541',
'0573', '0622', '0628', '0631', '0643', '0648', '0653',
'0660', '0688', '0689', '0690', '0692', '0697', '0706',
'0710', '0717', '0718', '0719', '0713', '0726', '0732',
'0220', '0223', '0271', '0273', '0278', '0280', '0284',
'0289', '0296', '0303', '0363', '0416', '0438', '0448',
'0521', '0618', '0656', '0691', '0693', '0698', '0705',
'0707', '0711', '0712', '0716', '0720', '0731']
for monkey in monkeys:
for condition in conditions:
epo_dir = dirs['epo'].format(monkey, condition)
power_dir = dirs['pow'].format(monkey, condition)
regr_dir = dirs['reg'].format(monkey, condition)
fname_info = op.join(dirs['ep_cnds'].format(monkey, condition),
'files_info.xlsx')
for d in os.listdir(power_dir):
if d in rej_files:
continue
if op.isdir(op.join(power_dir, d)):
fname_power = op.join(power_dir, d, n_power)
fname_regr = op.join(regr_dir, '{0}.xlsx'.format(d))
fname_epo = op.join(epo_dir,
'{0}_{1}_epo.fif'.format(d, event))
fn_pow_list.append(fname_power)
fn_reg_list.append(fname_regr)
# rois.append(read_session(fname_info, d)['sector'].values)
rois.append(np.array(['striatum']))
lb = get_log_bad_epo(fname_epo)
log_bads.append(lb)
be = get_ch_bad_epo(monkey, condition, d,
fname_info=fname_info)
bad_epo.append(be)
conds.append(condition)
mi_results = {}
pv_results = {}
for t, f in product(times, freqs):
ds_mi, ds_pv = compute_stats_meso(fn_pow_list, fn_reg_list, rois,
log_bads, bad_epo, conds,
regressors, reg_vals,
mi_type, inference,
t, f, avg_frq, norm)
mk = 'teddy'
cd = 'easy_hard_rpe_one_roi'
if avg_frq:
save_dir = op.join(dirs['st_prj'], mk, cd, event, norm,
'{0}_{1}_mt'.format(f[0], f[1]))
elif not avg_frq:
save_dir = op.join(dirs['st_prj'], mk, cd, event, norm,
'{0}_{1}_tf_mt'.format(f[0], f[1]))
os.makedirs(save_dir, exist_ok=True)
fname_mi = op.join(save_dir, 'mi_results.nc'.format(f[0], f[1]))
fname_pv = op.join(save_dir, 'pv_results.nc'.format(f[0], f[1]))
if not overwrite and op.exists(fname_mi):
mi = xr.load_dataset(fname_mi)
pv = xr.load_dataset(fname_pv)
ds_mi['times'] = mi['times']
ds_pv['times'] = pv['times']
ds_mi = mi.update(ds_mi)
ds_pv = pv.update(ds_pv)
ds_mi.to_netcdf(fname_mi)
ds_pv.to_netcdf(fname_pv)
|
[
"ruggero.basanisi@gmail.com"
] |
ruggero.basanisi@gmail.com
|
a8f1f9f4f7be1763c648b87bb38cc25de8b350de
|
d9b698e156c15bdc3da2190d20529e0acdf24190
|
/entregable/script2.py
|
096bd5a6367f2b9e6bbe68b8be07dccec8ddcfce
|
[] |
no_license
|
turtlean/sistemas-tp1
|
dc2fda54adb331a46962a59adc5867cc071f6e70
|
e0769d4234fb412cd9ca8c860f5bdf969155cf28
|
refs/heads/master
| 2021-05-27T06:29:31.538873
| 2013-11-19T01:30:58
| 2013-11-19T01:30:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,046
|
py
|
#! usr/bin/python
from matplotlib.pyplot import *
import sys
import time
import os
import math
import matplotlib.pyplot as plt
#./simusched <lote.tsk> <num_cores> <costo_cs> <costo_mi> <sched> [<params_sched>]
os.system("rm -rf experimento")
os.system("mkdir experimento")
#for plotting
ywt=[] #waiting times
yta=[] #ta times
x=[] #quantums
desviosWt=[]
desviosTa=[]
#random tasks
wtProm=[]
taProm=[]
wtMin = 1000000
taMin = 1000000
fo = open("wtta.out", "a") #"wtta.out"
sched = str(sys.argv[1]) #scheduler
lote = str(sys.argv[2]) #lote de tareas
qm = int(sys.argv[3]) #quantumLimite
cores = str(sys.argv[4]) #coresLimite
for k in range(1,int(cores)+1):
for i in range (1,qm):
j = str(i)
print j
for s in range (0,40):
if (sched=="SchedLottery"):
os.system("./simusched "+lote+" "+str(k)+" 0 0 "+sched+" "+j+" "+j+" > test")
if (sched=="SchedRR" or sched=="SchedRR2"):
string = "./simusched "+lote+" "+str(k)+" 0 0 "+sched+" "
for h in range(0,k):
string = string+j+" "
string = string+"> test"
os.system(string)
#os.system("python graphsched.py test")
os.system("./wtta < test > tmp")
tmp1 = open("tmp",'r').read()
tmp = open("tmp",'r').readlines()
wt = tmp[0].split()
wt = float(wt[2])
ta = tmp[1].split()
ta = float(ta[2])
wtProm.append(wt)
taProm.append(ta)
wt = sum(wtProm) / float(len(wtProm))
ta = sum(taProm) / float(len(taProm))
desvioWt = [(z-wt)*(z-wt) for z in wtProm]
desvioWt = math.sqrt(sum(desvioWt) / (float(len(desvioWt))-1))
desviosWt.append(desvioWt)
desvioTa = [(b-ta)*(b-ta) for b in taProm]
desvioTa = math.sqrt(sum(desvioTa) / (float(len(desvioTa))-1))
desviosTa.append(desvioTa)
wtProm=[]
taProm=[]
ywt.append(wt)
yta.append(ta)
x.append(i)
if (taMin > ta):
taMin = ta
quantum1 = i
if (wtMin > wt):
wtMin = wt
quantum2 = i
fo.write("Quantum: "+j+"\n")
fo.write("Waiting time: "+str(wt)+"\n")
fo.write("Turnaround time: "+str(ta)+"\n")
#nombre_test = "test_"+j
#os.system("mv test.png experimento/"+nombre_test+".png")
#os.system("mv test experimento/"+nombre_test+".out")
#os.system("rm test")
fo.write("\n")
fo.write("Quantum con menor waiting time: "+str(quantum2)+"("+str(wtMin)+")\n")
fo.write("Quantum con menor turnaround time: "+str(quantum1)+"("+str(taMin)+")")
#os.system("mv wtta.out experimento/")
#Graficador
plt.figure(k)
plt.xlabel("Quantum")
plt.ylabel("Waiting time")
plt.errorbar(x, ywt, yerr=desviosWt, fmt='.', color='black')
plt.axis([0,qm,0,max(ywt)+2])
savefig("experimento/cores_"+str(k)+"_wt.jpg")
plt.figure(k+1)
plt.xlabel("Quantum")
plt.ylabel("Turnaround time")
plt.errorbar(x, yta, yerr=desviosTa, fmt='.', color='black')
plt.axis([0,qm,taMin-60,max(yta)+60])
savefig("experimento/cores_"+str(k)+"_ta.jpg")
wtMin = 1000000
taMin = 1000000
ywt=[] #waiting times
yta=[] #ta times
x=[] #quantums
desviosWt=[]
desviosTa=[]
os.system("rm tmp")
os.system("rm test")
os.system("mv wtta.out experimento/")
|
[
"martin@martin-pc.(none)"
] |
martin@martin-pc.(none)
|
5e9e166be4bb079b5d0ce28085ede2532003ca6d
|
0428aa38f62004e25134596c30280fa038f65f1d
|
/trigger_lambda_on_s3_put.py
|
2ad68100488a42aaddafb2351b57700f48d7b608
|
[] |
no_license
|
smusongwe/s3
|
fa762c1f4b62895a0c93055dbce7a2dd6ffef38c
|
540aa034fd0afbe30b998b60db7ab6efaf8063d4
|
refs/heads/main
| 2023-01-19T03:25:22.171853
| 2020-12-01T00:34:29
| 2020-12-01T00:34:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
import json
import urllib.parse
import boto3
print('Loading function')
s3 = boto3.client('s3')
def lambda_handler(event, context):
#1 - Get the bucket name
bucket = event['Records'][0]['s3']['bucket']['name']
#2 - Get the file/key name
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
#3 - Fetch the file from S3
response = s3.get_object(Bucket=bucket, Key=key)
#4 - Deserialize the file's content
text = response["Body"].read().decode()
data = json.loads(text)
#5 - Print the content
print(data)
#6 - Parse and print the transactions
transactions = data['transactions']
for record in transactions:
print(record['transactionType'])
return 'Success!'
except Exception as e:
print(e)
raise e
|
[
"noreply@github.com"
] |
smusongwe.noreply@github.com
|
4cb017fbd99c77bd3a84cd8f89a44b62c7d24911
|
953eb11091193b2668cb71dd225ac80d86da0bc2
|
/src/meron_background.py
|
2d37a0076c413ed48a4322d583b0e2c2c6ad3a3f
|
[] |
no_license
|
haugstve/use-watson-ml-python
|
e9f750a1daa85ba9f83b83d9690d95d1beeb3c9c
|
6fcb2a2059c050de41286bdec90619f61e0fffa6
|
refs/heads/master
| 2020-08-10T14:21:14.586297
| 2019-10-11T11:34:25
| 2019-10-11T11:34:25
| 214,359,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
import schedule
import time
import random
import json
from think_meron import aim_token, evaluate_meron_model
from path import processed_data_path, root
def job():
data_path = processed_data_path / 'payloads_meron.json'
with open(data_path, 'r') as f:
payloads = json.load(f)
token = aim_token()
selection = random.randint(0, len(payloads))
response = evaluate_meron_model(token, payloads[selection])
log_path = root / 'response.log'
with open(log_path, 'a+') as f:
f.write(f'status: {response.status_code}'
f', content: {response.content}\n')
schedule.every().minute.at(":00").do(job)
while True:
schedule.run_pending()
time.sleep(1)
|
[
"daniel.haugstvedt@gmail.com"
] |
daniel.haugstvedt@gmail.com
|
280a05ec34f5669a8ef15a8fd75fa5be7a8a3981
|
723c3e005d76db1cdc706b2169cc6cc441974201
|
/PDF_Sales/Old_files/PDF_IND_Dates_Old/PDF_IND_Dates_2.py
|
67caa425edbe7d0d1ed6ae4c752ca3cda8b8e0e1
|
[] |
no_license
|
GolovanPriherel/Python_Projects
|
0b801d753efabbe0ca4a0d47bd532cc316024799
|
6108e10cefb05d521f8b4969fed57e92f61ab753
|
refs/heads/main
| 2023-08-28T10:59:35.320556
| 2021-10-13T20:44:33
| 2021-10-13T20:44:33
| 416,886,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,909
|
py
|
import re, fitz, pytesseract, urllib.request
# ---- Работа с PDF
from PIL import Image
# Шаблоны поиска
patternsIND = [' IND ([0-9,]+)', ' IND Number.([0-9,]+)', ' IND No. ([0-9,]+)']
patternsINDDate = ['IND\D{4}(\d{1,2}.\d{1,2}.\d{1,4})', 'IND \d{5,6}..(\d{1,2}.\D+.\d{1,4})',
'IND\D{4}(\d{1,2}.\D+.\d{1,4})', 'IND \d{5,6}..(\d{1,2}.\d{1,2}.\d{1,4})',
'IND.[0-9,]+.Review #01 dated\s\s(\w+.\d+..\d+)', 'was originally submitted']
# Путь для подключения tesseract
pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
# Открывает по страницам ПДФ (для текста)
def Extract_PDF_Text(pdf_file):
pdf_document = pdf_file
doc = fitz.open(pdf_document)
for pages in range(doc.pageCount):
page = doc.loadPage(pages)
page_text = page.getText("text")
yield page_text
# print(page_text)
# Находит в тексте из ПДФ-а номер и дату
def extract_text(pdf_file, pattern1, pattern2):
Date, IND = [], []
for page in Extract_PDF_Text(pdf_file):
for patt in pattern1:
result_IND = re.findall(patt, page)
for sub in range(len(result_IND)):
IND.append(result_IND[sub].replace('\n', ''))
for patt in pattern2:
result_IND_Date = re.findall(patt, page)
for sub in range(len(result_IND_Date)):
Date.append(result_IND_Date[sub].replace('\n', ''))
if IND:
IND = (max(set(IND), key=IND.count))
if Date:
Date = (max(set(Date), key=Date.count))
elif IND and Date:
IND = (max(set(IND), key=IND.count))
Date = (max(set(Date), key=Date.count))
# print('Текст победил')
return IND, Date
# Получает картинки из ПДФ-а
def extract_png(PDF):
ind_num, ind_date = '',''
doc = fitz.open(PDF)
for i in range(0, len(doc)):
for img in doc.getPageImageList(i):
xref = img[0]
pix = fitz.Pixmap(doc, xref)
if pix.n < 5: # this is GRAY or RGB
pix.writePNG("Text1.png")
else: # CMYK: convert to RGB first
pix1 = fitz.Pixmap(fitz.csRGB, pix)
pix1.writePNG("Text1.png")
pix1 = None
pix = None
text = Find_IND_Date_Tess("Text1.png")
IND_Num, IND_Date = extract_text_png(text, patternsIND, patternsINDDate)
# print('---', i)
if IND_Num and IND_Date:
return IND_Num, IND_Date
elif IND_Num:
ind_num = IND_Num
elif IND_Date:
ind_date = IND_Date
return ind_num, ind_date
# Распознавание текста в картинках
def Find_IND_Date_Tess(picture):
img = Image.open(f'{picture}')
file_name = img.filename
file_name = file_name.split(".")[0]
custom_config = r'--oem 3 --psm 6'
text = pytesseract.image_to_string(img, config=custom_config)
return text
# Находит из картинок номер и дату
def extract_text_png(text, pattern1, pattern2):
IND, Date = [], []
for patt in pattern1:
result_IND = re.findall(patt, text)
for sub in range(len(result_IND)):
IND.append(result_IND[sub].replace('\n', ''))
for patt in pattern2:
result_IND_Date = re.findall(patt, text)
for sub in range(len(result_IND_Date)):
Date.append(result_IND_Date[sub].replace('\n', ''))
if IND:
IND = (max(set(IND), key=IND.count))
elif Date:
Date = (max(set(Date), key=Date.count))
elif IND and Date:
IND = (max(set(IND), key=IND.count))
Date = (max(set(Date), key=Date.count))
# print('Изображения победили')
return IND, Date
# Вызов всех ф-ий для парсера
def Find_IND_Date (url): # Просто впиши ссылку на ПДФ и всё
urllib.request.urlretrieve(url, "../IND1.pdf") # Скачиваем ПДФ
IND_Num, IND_Date = extract_text("../IND1.pdf", patternsIND, patternsINDDate) # Зырим текст
TrueNum, TrueDate = None, None
if IND_Num and IND_Date: # Проверям хватит ли только текста для проверки
return IND_Num, IND_Date
elif IND_Num:
TrueNum = IND_Num
elif IND_Date:
TrueDate = IND_Date
else:
IND_Num1, IND_Date1 = extract_png("IND1.pdf") # Проверка с помощью распознавания
if IND_Num1:
TrueNum = IND_Num1
elif IND_Date1:
TrueDate = IND_Date1
# Доделать распознавание приколсов
return TrueNum, TrueDate
# Проверенные ссылки на пдф
# url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2013/202971Orig1s000PharmR.pdf' # с инд (5 цифр) и датой (через пробел)
# url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2021/212887Orig1s000,212888Orig1s000Approv.pdf' # с инд (6)
# url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2012/202428Orig1s000PharmR.pdf' # с инд (6)
# url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2002/21-456_Aciphex_Medr_P1.pdf' # Картинка с датой IND, но без номера
url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2010/022518Orig1s000PharmR.pdf' # Куча разных номеров инд с датой
IND_Num, IND_Date = Find_IND_Date(url)
print(IND_Num, '|', IND_Date)
|
[
"noreply@github.com"
] |
GolovanPriherel.noreply@github.com
|
ef2e1856553562a52e2e69951f8546c91328ccd8
|
5c41073897f791bc6915c874a9304d7d7f6c13db
|
/gravitate/domain/bookings/view_mediator.py
|
853566c6ab0c9429b78407c69be23fce3ed1e7bc
|
[] |
no_license
|
billyrrr/gravitate-backend
|
5c8ce8b95607b06bd2d850b085e9129d9dc8632b
|
f1e98f0002046cb4c932f9f1badbdf2eb8af92d1
|
refs/heads/master
| 2023-05-11T10:58:07.871694
| 2020-05-07T23:04:50
| 2020-05-07T23:04:50
| 156,035,235
| 0
| 0
| null | 2020-01-19T02:32:39
| 2018-11-04T00:19:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,130
|
py
|
import time
from flask_boiler.utils import snapshot_to_obj
from flask_boiler.view.query_delta import ViewMediatorDeltaDAV, ProtocolBase
from google.cloud.firestore import DocumentSnapshot
from gravitate import CTX
from gravitate.domain.user import User
from . import RiderBooking, BookingStoreBpss, RiderTarget, RiderBookingView, \
RiderBookingForm, RiderBookingReadModel
from google.cloud.firestore import Query
from ..location.models import Sublocation
class UserBookingMediator(ViewMediatorDeltaDAV):
"""
Forwards a rider booking to a user subcollection
"""
class Protocol(ProtocolBase):
@staticmethod
def on_create(snapshot, mediator):
obj = RiderBookingReadModel.new(snapshot=snapshot)
mediator.notify(obj=obj)
@staticmethod
def on_update(snapshot, mediator):
obj: RiderBooking = snapshot_to_obj(snapshot)
if obj.status == "removed":
RiderBookingReadModel.remove_one(obj=obj)
model_cls = RiderBooking
class UserBookingEditMediator(ViewMediatorDeltaDAV):
"""
Forwards a rider booking to a user subcollection
"""
def notify(self, obj):
obj.propagate_change()
class Protocol(ProtocolBase):
@staticmethod
def on_create(snapshot, mediator):
assert isinstance(snapshot, DocumentSnapshot)
path = snapshot.reference
booking_id = path.id
user_id = path.parent.parent.id
d = snapshot.to_dict()
obj = RiderBookingForm.from_dict(doc_id=booking_id,
d=dict(**d, user_id=user_id))
mediator.notify(obj=obj)
snapshot.reference.delete()
class BookingTargetMediator(ViewMediatorDeltaDAV):
"""
Generate booking target from a rider booking newly added or edited.
"""
model_cls = RiderBooking
def notify(self, obj):
obj.save()
class Protocol(ProtocolBase):
@staticmethod
def on_create(snapshot, mediator):
obj: RiderBooking = snapshot_to_obj(snapshot=snapshot)
for from_sublocation_ref in obj.from_location.sublocations:
from_sublocation = Sublocation.get(doc_ref=from_sublocation_ref)
for to_sublocation_ref in obj.to_location.sublocations:
to_sublocation = Sublocation.get(
doc_ref=to_sublocation_ref)
d = dict(
r_ref=obj.doc_ref,
from_lat=from_sublocation.coordinates[
"latitude"],
from_lng=from_sublocation.coordinates[
"longitude"],
from_id=from_sublocation.doc_id,
to_lat=to_sublocation.coordinates["latitude"],
to_lng=to_sublocation.coordinates["longitude"],
to_id=to_sublocation.doc_id,
user_id=obj.user_id
)
ts = dict(
earliest_arrival=obj.earliest_arrival,
latest_arrival=obj.latest_arrival,
earliest_departure=obj.earliest_departure,
latest_departure=obj.latest_departure,
)
ts = {k: v for k, v in ts.items() if v is not None}
target = RiderTarget.new(
**d, **ts
)
mediator.notify(obj=target)
@staticmethod
def on_delete(snapshot, mediator):
obj: RiderBooking = snapshot_to_obj(snapshot=snapshot)
if obj.status in {"matched", }:
"""
Delete targets for matched rider bookings
"""
booking_ref = obj.doc_ref
for target in RiderTarget.where(r_ref=booking_ref):
target.delete()
def _get_query(self):
query = Query(parent=self.model_cls._get_collection())
return query
|
[
"billrao@me.com"
] |
billrao@me.com
|
6ffd2e78972b8fd0b9f8b3259fae0c6b13ecaf63
|
0f3c7d268349336160d592eaa3acf56a1bb12cc2
|
/reports/ISOPE_outline/create_latex_report.py
|
42c30523f9cc8383a9d80aa0358fe68d4ba36499
|
[
"MIT"
] |
permissive
|
ramisetti/Prediction-of-roll-motion-using-fully-nonlinear-potential-flow-and-Ikedas-method
|
ac0f7eac820e5f381d1073bd52556ac313026727
|
cce8abde16a15a2ae45008e48b1bba9f4aeaaad4
|
refs/heads/main
| 2023-05-19T05:54:41.448268
| 2021-06-11T09:48:15
| 2021-06-11T09:48:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
import os.path
from src.notebook_to_latex import convert_notebook_to_latex
import reports
notebook_path = os.path.join(reports.path,'ISOPE_outline','01.1.outline.ipynb')
build_directory = os.path.join(reports.path,'ISOPE')
if not os.path.exists(build_directory):
os.mkdir(build_directory)
skip_figures=True
convert_notebook_to_latex(notebook_path=notebook_path, build_directory=build_directory, save_main=False, skip_figures=skip_figures)
if not skip_figures:
## Special treatment
import joblib
import graphviz
from sklearn import tree
clf = joblib.load('models/C_r_tree.pkl')
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=[r'sigma', r'a_1', r'a_3'], rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.render("reports/ISOPE/figures/decision_tree")
|
[
"marale@kth.se"
] |
marale@kth.se
|
ea147069bc358894d57aadf2f0981d4f2f8e0902
|
8d6418d8f813961318a962638467cdea5adec882
|
/test.py
|
4e4d2e798b46c69735d1b4597e4283d944ae03bd
|
[] |
no_license
|
demakaituan/code
|
6d7045d7a085ff843b636ecb3e88d9959f68d32e
|
6578bc0dbd7018449e9f8c83f4fc5304126036a6
|
refs/heads/master
| 2021-01-21T14:04:26.549322
| 2016-05-30T08:54:51
| 2016-05-30T08:54:51
| 54,120,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
import unittest
import seuif97
class Region1_Test(unittest.TestCase):
def setUp(self):
self.tab5 = [[3, 300, 0.100215168e-2, 0.115331273e3, 0.112324818e3, 0.392294792, 4.17301218, 0.150773921e4],
[80, 300, 0.971180894e-3, 0.184142828e3, 0.106448356e3,
0.368563852, 4.01008987, 0.163469054e4],
[3, 500, 0.120241800e-2, 0.975542239e3, 0.971934985e3, 2.58041912, 4.65580682, 0.124071337e4]]
def test_specific_volume(self):
places = 11
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_volume(item[0], item[1]), item[2], places)
def test_specific_enthalpy(self):
places = 6
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_enthalpy(item[0], item[1]), item[3], places)
def test_specific_internal_energy(self):
places = 6
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_internal_energy(
item[0], item[1]), item[4], places)
def test_specific_entropy(self):
places = 8
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_entropy(item[0], item[1]), item[5], places)
def test_specific_isobaric_heat_capacity(self):
places = 8
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_isobaric_heat_capacity(
item[0], item[1]), item[6], places)
def test_speed_of_sound(self):
places = 5
for item in self.tab5:
self.assertAlmostEqual(seuif97.speed_of_sound(item[0], item[1]), item[7], places)
class additional_Test(unittest.TestCase):
def setUp(self):
self.tab6 = [[0.001, 0, 9.800980612e-4],
[90, 0, 91.92954727],
[1500, 3.4, 58.68294423]]
def test_backward_equations(self):
places = 8
for item in self.tab6:
self.assertAlmostEqual(seuif97.backward_equations(item[0], item[1]), item[2], places)
def suite_test():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Region1_Test))
suite.addTest(unittest.makeSuite(additional_Test))
return suite
if __name__ == '__main__':
unittest.main(defaultTest = 'suite_test')
|
[
"599146992@qq.com"
] |
599146992@qq.com
|
fe329d17aaaa67a21a1b92f02c926bb41085e379
|
28df0dfb458a3c0f37d5a597307017717f9b1cc3
|
/auth_utils.py
|
4880fdd53294b5dadd961c64186bd5575b88e666
|
[] |
no_license
|
dagou1992/flask
|
5533aa7c48a98c561b4235cbfcc6675ebfd81471
|
67afd661e0d14a62c55789b844ae11e790ca531d
|
refs/heads/master
| 2020-03-24T00:02:55.456559
| 2018-07-25T08:12:49
| 2018-07-25T08:12:49
| 142,269,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
# coding: utf-8
from flask_login import LoginManager, UserMixin
from flask_login import login_user
from app import json_response_error
login_manager = LoginManager()
def init_auth(app):
login_manager.init_app(app)
class User(UserMixin):
def __init__(self, user):
self.info = user
def get_id(self):
"""登录成功后,就会调用get_id()获取到id存入session中"""
return self.info
@login_manager.user_loader
def load_user(user_id):
return User(user_id)
# 在session中记录user已经登录
def register_login(user):
login_user(User(user))
@login_manager.unauthorized_handler
def unauthorized_handler():
return json_response_error(401, "用户未登录。"), 401
|
[
"jbq19920903@163.com"
] |
jbq19920903@163.com
|
3393ba44a415f8540c1fe8973955eef2de703ff1
|
c211c25b0dde4435f09357880ac0db97c05fd9a7
|
/Spider.py
|
9575f749de3597014c98b646d7e53ddc7af3777c
|
[] |
no_license
|
Monday666/maoyanSpider
|
2abfd912a56afeef9563b9f6f4e18b3377fc4c7f
|
68036975face7256d4b6286a325a28174943465c
|
refs/heads/master
| 2020-03-31T07:10:07.798737
| 2018-10-08T02:58:28
| 2018-10-08T02:58:28
| 152,010,437
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,377
|
py
|
import requests
from bs4 import BeautifulSoup
import re
import csv
def Gethtml(url):
"""
使用浏览器模拟的方式获取网页源码
:param url: 网页URL地址
:return:html
"""
# 将cookies字符串组装为字典
cookies_str = '__mta=208903469.1537876239606.1537966087518.1537966271718.11; uuid_n_v=v1; uuid=3851AE40C0B911E895764F985E386DE202DFFDFED118403EB9BA5E7A9C9D6698; _lxsdk_cuid=16610912361c8-07a50566ed1d0e-8383268-1fa400-16610912362c8; _lxsdk=3851AE40C0B911E895764F985E386DE202DFFDFED118403EB9BA5E7A9C9D6698; _csrf=33645a5e9922420ef22609cd9965dd58afac2d82a9caca9afd817c97d4a41563; _lx_utm=utm_source%3Dmeituanweb; __mta=208903469.1537876239606.1537964122287.1537964124676.6; _lxsdk_s=16615ccbec7-4dc-ef6-e2a%7C%7C20'
cookies_dict = {}
for cookie in cookies_str.split(";"):
k, v = cookie.split("=", 1)
cookies_dict[k.strip()] = v.strip()
# 其他请求头参数
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
# 访问页面
page = requests.get(url=url, cookies=cookies_dict, headers=headers)
# 返回页面HTML
return page.text
def FindMovie(sp):
"""
截取各部电影的所在标签返回列表,每个值是一部电影所在整个标签里的内容
"""
movie = sp.find_all('div', attrs={'class': re.compile(r"show-list(\s\w+)?")})
return movie
def FindPage(sp):
"""
寻找并截取每个日期的界面
:param sp:
:return:
"""
page = sp.find_all('div', attrs={'class': re.compile(r"plist-container(\s\w+)?")})
return page
def FindName(sp):
"""
找到电影名
:param sp:soup
:return:str name
"""
name = sp.find('h3', class_='movie-name')
return name.text
def FindDate(sp):
"""
找到日期
:param sp:
:return:
"""
lsdate = sp.find_all('span', attrs={'class': re.compile(r"date-item(\s\w+)?")})
data = []
for l in lsdate:
data.append(l.text)
return data
def FindTime(sp):
"""
找到时间
:param sp:
:return:
"""
time = []
page = FindPage(sp)
for i in range(len(page)):
lstime = page[i].find_all('span', attrs={'class': 'begin-time'})
timei = []
if lstime == []:
timei.append("无场次")
else:
for l in lstime:
timei.append(l.text)
time.append(timei)
return time
def FindPrice(sp):
"""
找到价格
:param sp:
:return:
"""
lsprice = sp.find('span', class_='value text-ellipsis', text=re.compile(r"(.\d+.\d.)张"))
return lsprice.text
def FindPeople(sp):
"""
找到人数,返回已售票,和未售票
:param sp:
:return:
"""
Npeople = sp.find_all('span', class_='seat selectable')
Hpeople = sp.find_all('span', class_='seat sold')
return Npeople, Hpeople
def ReturnPrice(sp):
"""
到价格界面找到价格并返回价格值
:param sp:
:return:
"""
page = FindPage(sp)
server = "http://maoyan.com"
price = []
for i in range(len(page)):
pricei = []
Url = []
a = page[i].find_all('a', attrs={'class': 'buy-btn normal'})
if a == []:
pricei.append('无')
else:
for each in a:
Url.append(server + each.get('href'))
for j in Url:
pricei.append(FindPrice(BeautifulSoup(Gethtml(j), 'html.parser')))
price.append(pricei)
return price
def ReturnPN(sp):
"""
到人数界面找到人数并返回已售票之和未售票值
:param sp:
:return:
"""
peopleN = []
page = FindPage(sp)
server = "http://maoyan.com"
for i in range(len(page)):
Url = []
peopleNi = []
a = page[i].find_all('a', attrs={'class': 'buy-btn normal'})
if a == []:
peopleNi.append('无')
else:
for each in a:
Url.append(server + each.get('href'))
for j in Url:
people = FindPeople(BeautifulSoup(Gethtml(j), 'html.parser'))
Npeople, Hpeople = people
peopleNi.append("已售出:" + str(len(Hpeople)) + "剩余票数:" + str(len(Npeople)))
peopleN.append(peopleNi)
return peopleN
# 获取网页源码
URL = "http://maoyan.com/cinema/2714?poi=2367020"
sp1 = BeautifulSoup(Gethtml(URL), 'html.parser')
movie = FindMovie(sp1)
name = []
data = []
time = []
price = []
peopleN = []
# 获取数据
for i in range(len(movie)):
name.append(FindName(movie[i]))
data.append(FindDate(movie[i]))
time.append(FindTime(movie[i]))
price.append(ReturnPrice(movie[i]))
peopleN.append(ReturnPN(movie[i]))
# 整合数据
info = []
for i in range(len(movie)):
for j in range(len(data[i])):
for k in range(len(time[i][j])):
infok = [name[i], data[i][j], time[i][j][k], price[i][j][k], peopleN[i][j][k]]
info.append(infok)
#存储数据
with open('myinfo.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['电影名', '日期', '时间', '票价', '余票'])
for i in range(len(info)):
csvwriter.writerow(info[i])
|
[
"[18361233771@163.com]"
] |
[18361233771@163.com]
|
baba79af33bbf688b0cc90d14d78060c6b946973
|
3a771b72dae1aae406b94726bcbcf73915577b18
|
/q56.py
|
e7701a5af9e0d8749ed043cc4977a73042423870
|
[] |
no_license
|
SHANK885/Python-Basic-Programs
|
4fcb29280412baa63ffd33efba56d9f59770c9dc
|
157f0f871b31c4523b6873ce5dfe0d6e26a6dc61
|
refs/heads/master
| 2021-07-18T18:24:10.455282
| 2018-11-19T07:02:27
| 2018-11-19T07:02:27
| 138,009,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
unicodeString =u"Heello world"
print(unicodeString)
|
[
"shashankshekhar885@gmail.com"
] |
shashankshekhar885@gmail.com
|
41255f5976ab13155649263d540c618488794b94
|
08a329d07172a384be41eb58a0586032b18787d2
|
/if1.py
|
c5867a5c4dee5850f5bb2049812193dbe20c31e6
|
[] |
no_license
|
atsuhisa-i/Python_study1
|
9bc39d058fe8bdd00adb35324758ad8fa08f4ca1
|
439a654f09e81208658355d99c8ce1c3cd4bcc4e
|
refs/heads/main
| 2023-04-06T12:44:12.099067
| 2021-04-14T13:24:56
| 2021-04-14T13:24:56
| 348,309,405
| 0
| 0
| null | 2021-04-14T13:24:57
| 2021-03-16T10:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 69
|
py
|
number = '123456'
if number == '123456':
print('1st Prize:Money')
|
[
"atsuhisa.1124@gmail.com"
] |
atsuhisa.1124@gmail.com
|
6932b26cbfad7fb87ae3a6ac07694d091c54d719
|
e35594083b7dfc15a8e790efa26fc36ac264ccce
|
/pages/base_page.py
|
0583a1adc7f65adf2a1972a4236615998f514a99
|
[] |
no_license
|
quiprodest/testauto-study-project
|
4f10bc8552fe8b8ca354bc856bab2ddcf76982e5
|
b338ea8d83dfa046c38507e2524e47431d172b26
|
refs/heads/master
| 2021-06-19T16:46:40.660719
| 2019-07-30T12:43:27
| 2019-07-30T12:43:27
| 199,621,325
| 0
| 0
| null | 2021-04-20T18:26:23
| 2019-07-30T09:35:41
|
Python
|
UTF-8
|
Python
| false
| false
| 496
|
py
|
from selenium.common.exceptions import NoSuchElementException
class BasePage(object):
def __init__(self, browser, url): # , timeout=10
self.browser = browser
self.url = url
# self.browser.implicitly_wait(timeout)
def open(self):
self.browser.get(self.url)
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except (NoSuchElementException):
return False
return True
|
[
"qui_p@tutanota.com"
] |
qui_p@tutanota.com
|
6f54f5939a8fda03c24dfa9d9dbe33c08f498424
|
096ccaca86872b03a137edf58221413073d770cb
|
/helpers/24_web_apis_sources.py
|
0a219f85661a944bd17fb1db67075e5cf05ea372
|
[] |
no_license
|
DH-heima/webscrapping
|
f142962b50deed2628052dd7a48098a4afbcbada
|
1dc8f81f45db0d4366391c3052c5ab36f4d4bc5d
|
refs/heads/master
| 2022-02-02T23:26:22.520064
| 2019-06-13T13:38:10
| 2019-06-13T13:38:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,587
|
py
|
import os
# from ers import shops, COLLECTION_DATE, web_apis_traffic_sources_csv, web_apis_traffic_sources_aggregates_csv
import os.path as op
import numpy as np
import pandas as pd
BASE_DIR = "/code/mhers"
WAVE_NUMBER = 8
shops = pd.read_excel(op.join(BASE_DIR, "ressources/ERS-referential-shops.xlsx"), index_col=None)
COLLECTION_DATE = "2018-06-10"
web_apis_traffic_sources_csv = os.path.join(BASE_DIR,'data/w_{}/final_csvs'.format(WAVE_NUMBER), 'shopgrid_details - web_apis_traffic_sources_w{}.csv'.format(WAVE_NUMBER))
web_apis_traffic_sources_aggregates_csv = os.path.join(BASE_DIR,'data/w_{}/final_csvs'.format(WAVE_NUMBER), 'shopgrid_summary - web_apis_traffic_sources_w{}.csv'.format(WAVE_NUMBER))
# #####################################################################################################################
# web_apis_demographics_csv
# #####################################################################################################################
# This generates the dummy data and shouldn't be in production
mask = pd.DataFrame({'to_delete': [1]})
df = pd.DataFrame()
for c, row in shops.iterrows():
tmp = pd.DataFrame(mask.copy())
for k in ['shop_id', 'continent', 'country', 'region', 'segment']:
tmp[k] = row[k]
df = df.append(tmp)
df.drop(columns=['to_delete'], inplace=True)
# TODO : delete the random data creation and fetch the data in the proper dataset
df['direct'] = np.random.random(size=(df.shape[0], 1)) * 0.3
df['email'] = np.random.random(size=(df.shape[0], 1)) * 0.2
df['referrals'] = np.random.random(size=(df.shape[0], 1)) * 0.2
df['social'] = np.random.random(size=(df.shape[0], 1)) * 0.1
df['paid_search'] = np.random.random(size=(df.shape[0], 1)) * 0.1
df['display_ads'] = np.random.random(size=(df.shape[0], 1)) * 0.1
df['organic_search'] = 1 - df['direct'] - df['email'] - df['referrals'] - df['social'] - df['paid_search'] - df['display_ads']
# Todo : Time Span is the time over which the aggregates are calculated
df['time_span'] = "Apr. 2016 - Aug. 2018"
# Collection date
print('WARNING : PLEASE ENSURE THE COLLECTION_DATE is accurate :', COLLECTION_DATE)
df['collection_date'] = COLLECTION_DATE
final_cols = ['collection_date', 'time_span', 'continent', 'country', 'region', 'segment', 'shop_id', 'direct', 'email',
'referrals', 'social', 'paid_search', 'display_ads', 'organic_search']
df = df[final_cols]
df.to_csv(web_apis_traffic_sources_csv, sep=';', index=False, encoding='utf-8')
print("File web_apis_traffic_sources_csv stored at : ", web_apis_traffic_sources_csv)
# #####################################################################################################################
# web_apis_demographics_aggregates_csv
# #####################################################################################################################
df['region'].fillna("", inplace=True)
# Aggregating
res = []
agregation_levels_list = [
['continent', 'country', 'region', 'segment'],
['continent', 'country', 'segment'],
['continent', 'segment'],
['segment'],
['continent', 'country', 'region'],
['continent', 'country'],
['continent'],
['collection_date']
]
# All agregations
for agg_level in agregation_levels_list:
dfG2 = df.groupby(agg_level, as_index=False)
dfG2 = dfG2.agg({
'direct': {'direct': 'mean'},
'email': {'email': 'mean'},
'referrals': {'referrals': 'mean'},
'social': {'social': 'mean'},
'paid_search': {'paid_search': 'mean'},
'display_ads': {'display_ads': 'mean'},
'organic_search': {'organic_search': 'mean'},
}).reset_index()
dfG2.columns = dfG2.columns.droplevel(1)
dfG2 = pd.DataFrame(dfG2)
print(agg_level, 'adding', dfG2.shape)
res.append(dfG2)
# Aggregate on all-levels
all_dfs = pd.concat(res, axis=0, ignore_index=True)
# Collection date
print('WARNING : PLEASE ENSURE THE COLLECTION_DATE is accurate :', COLLECTION_DATE)
all_dfs['collection_date'] = COLLECTION_DATE
# Todo : Time Span is the time over which the aggregates are calculated
all_dfs['time_span'] = "Apr. 2016 - Aug. 2018"
final_cols = ['collection_date', 'time_span', 'continent', 'country', 'region', 'segment', 'direct', 'display_ads',
'email', 'organic_search', 'paid_search', 'referrals', 'social']
all_dfs = all_dfs[final_cols]
all_dfs.to_csv(web_apis_traffic_sources_aggregates_csv, sep=';', index=None, encoding='utf-8')
print("File web_apis_traffic_sources_aggregates_csv stored at : ", web_apis_traffic_sources_aggregates_csv, " -")
|
[
"pierre.chevalier@epitech.eu"
] |
pierre.chevalier@epitech.eu
|
06b52d5b79166ba67bb054b31a199bbe635cff9f
|
62fb574c97c16645dc029b60014d48c88c1714df
|
/1 - 9/Problem 8.py
|
49ca444c85e1107eb2aadd0a38c01cab8f952d6d
|
[] |
no_license
|
kadirsefaunal/projectEuler
|
ee16faf3161961d4f2adec2ad5466ed7b3127713
|
710ad4112a4d3b7350f33c206db8baa60b5cf9a8
|
refs/heads/master
| 2021-01-01T06:34:58.613504
| 2017-07-21T13:56:42
| 2017-07-21T13:56:42
| 97,455,744
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
def main():
sayi = "731671765313306249192251196744265747423553491949349698352031277450632623957831801698480186947885184385861560789112949495459501737958331952853208805511125406987471585238630507156932909632952274430435576689664895044524452316173185640309871112172238311362229893423380308135336276614282806444486645238749303589072962904915604407723907138105158593079608667017242712188399879790879227492190169972088809377665727333001053367881220235421809751254540594752243525849077116705560136048395864467063244157221553975369781797784617406495514929086256932197846862248283972241375657056057490261407972968652414535100474821663704844031998900088952434506585412275886668811642717147992444292823086346567481391912316282458617866458359124566529476545682848912883142607690042242190226710556263211111093705442175069416589604080719840385096245544436298123098787992724428490918884580156166097919133875499200524063689912560717606058861164671094050775410022569831552000559357297257163626956188267042825248360082325753"
#Son 13 hane sığmadı.
enBuyuk = 0
for i in range(0, len(sayi) - 13):
sayilar = []
carpim = 1
for j in range(0, 13):
carpim *= int(sayi[j + i])
sayilar.append(int(sayi[j + i]))
if carpim > enBuyuk:
enBuyuk = carpim
print(sayilar)
print(enBuyuk)
if __name__ == "__main__":
main()
|
[
"kadirsefau@gmail.com"
] |
kadirsefau@gmail.com
|
08ca6256aa20cb142ea6d49f2471e8c6aa0fec33
|
9021d47bb5a47cfd6704161c7db43585808f1d2b
|
/application.py
|
fe6c6a5c21031d4afd693e55c450d5d1180a3606
|
[] |
no_license
|
Mohamed24444/HW-todo-api
|
456d6ad4b4e75b0dcbc9e5c990703b0c9c961d83
|
9db9efb587c1bb7864893e5811eeda3416d1322e
|
refs/heads/main
| 2023-07-18T06:28:06.760694
| 2021-08-31T20:46:49
| 2021-08-31T20:46:49
| 400,887,854
| 0
| 0
| null | 2021-08-28T20:50:12
| 2021-08-28T20:50:12
| null |
UTF-8
|
Python
| false
| false
| 2,998
|
py
|
import helper
from flask import Flask, request, Response
import json
application = app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/item/new', methods=['POST'])
def add_item():
# Get item from the POST body
req_data = request.get_json()
item = req_data['item']
# Add item to the list
res_data = helper.add_to_list(item)
# Return error if item not added
if res_data is None:
response = Response("{'error': 'Item not added - " + item + "'}", status=400 , mimetype='application/json')
return response
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/items/all')
def get_all_items():
# Get items from the helper
res_data = helper.get_all_items()
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/item/status', methods=['GET'])
def get_item():
# Get parameter from the URL
item_name = request.args.get('name')
# Get items from the helper
status = helper.get_item(item_name)
# Return 404 if item not found
if status is None:
response = Response("{'error': 'Item Not Found - %s'}" % item_name, status=404 , mimetype='application/json')
return response
# Return status
res_data = {
'status': status
}
response = Response(json.dumps(res_data), status=200, mimetype='application/json')
return response
@app.route('/item/update', methods=['PUT'])
def update_status():
# Get item from the POST body
req_data = request.get_json()
item = req_data['item']
status = req_data['status']
# Update item in the list
res_data = helper.update_status(item, status)
# Return error if the status could not be updated
if res_data is None:
response = Response("{'error': 'Error updating item - '" + item + ", " + status + "}", status=400 , mimetype='application/json')
return response
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/item/remove', methods=['DELETE'])
def delete_item():
# Get item from the POST body
req_data = request.get_json()
item = req_data['item']
# Delete item from the list
res_data = helper.delete_item(item)
# Return error if the item could not be deleted
if res_data is None:
response = Response("{'error': 'Error deleting item - '" + item + "}", status=400 , mimetype='application/json')
return response
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/items/delall', methods=['DELETE'])
def del_all():
# Get items from the helper
res_data = helper.del_all_items()
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
|
[
"noreply@github.com"
] |
Mohamed24444.noreply@github.com
|
9a21127696172a92f9de67e17b6cab869625b037
|
f25a62033ce864f9fd22cf85c9abf92a280fca01
|
/CodersLab/MSP/buffor_23.10.2017/workspace/SCIAGA/D8_MySql_laczenie_relacje/__init__.py
|
43f784fec55793c35338081529aea1f341bac558
|
[] |
no_license
|
AleksandraSandor/Learning-Python
|
e397a68e2a3e1103e537016c65a280c6b9a1a90d
|
387befaadbb84a8d76961893208b504ddeccf0ce
|
refs/heads/master
| 2020-04-11T09:46:23.425149
| 2018-02-04T17:03:43
| 2018-02-04T17:03:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,407
|
py
|
# ===============================================================================
# MySql laczenie relacje
# ===============================================================================
from mysql.connector import connect
cnx = connect(user="root", password="1", host="localhost")
cur = cnx.cursor()
sql = "use test_domowy;"
cur.execute(sql)
# cur = cnx.cursor(dictionary=False)
# sql = """create table customers(customer_id int not null auto_increment,
# name varchar(255) not null,
# primary key(customer_id));"""
# cur.execute(sql)
#
# sql = """create table addresses(customer_id int not null,
# street varchar(255),
# primary key(customer_id),
# foreign key(customer_id) references customers(customer_id) on delete cascade);""" #<--------- jeden do jedn
# cur.execute(sql)
#
# sql = """create table orders(order_id int not null auto_increment,
# customer_id int not null,
# order_details varchar(255),
# primary key(order_id),
# foreign key(customer_id) REFERENCES customers(customer_id));""" #<--------- jeden do wielu
# cur.execute(sql)
# #
# sql = """create table items(item_id int not null auto_increment,
# description varchar(255),
# primary key(item_id));"""
# cur.execute(sql)
#
# sql = """create table items_orders(id int auto_increment,
# item_id int not null,
# order_id int not null,
# primary key(id),
# foreign key(order_id) REFERENCES orders(order_id), #<--------- dodatkowa tableawiele do wielu
# FOREIGN KEY(item_id) REFERENCES items(item_id))"""
# cur.execute(sql)
# sql = """insert into items_orders(order_id, item_id) VALUES (1,1), (2,1), (2,2);"""
# cur.execute(sql)
# sql = """insert into customers(name) VALUES ("Januszaaa"), ("Kubaaaa"), ("Wojtekkkk");"""
# cur.execute(sql)
# sql = """insert into addresses(customer_id, street) VALUES (1, "ul. jeden"), (2, "ulica dwa");"""
# cur.execute(sql)
# sql = """insert into orders(customer_id, order_details) VALUES (3, "zam1"), (3, "zam2"), (1, "zam3");"""
# cur.execute(sql)
# sql = """insert into items(description) VALUES ("itm1"), ("itm2"), ("itm3");"""
# cur.execute(sql);
# sql = """select * from customers join addresses on customers.customer_id=addresses.customer_id
# where customers.customer_id=2;"""
# cur.execute(sql)
# for row in cur:
# print(row)
# sql = """select * from customers join orders on customers.customer_id=orders.customer_id
# where customers.customer_id=3;"""
# cur.execute(sql)
# for row in cur:
# print(row)
# sql = """select * from orders join items_orders on orders.order_id=items_orders.order_id;"""
# cur.execute(sql)
# for row in cur:
# print(row)
# insert into addresses(customer_id, street) values (6,"ul. zaciszna"); #<---6 dopisze adres jeśli jest ID u customers
# delete from customers where customer_id = 6; # <--- skasowało również adres
# ===============================================================================
# Formatowanie zapytania
# ===============================================================================
a = "user_name"
b = "user_id"
i = 1
ii = 6
sql = """"select {}, {} from user where user_id = %s or user_id = %s""".format(b, a) #/todo dlaczegoto nie działą 1
cur.execute(sql,(i, ii))
for row in cur:
print(row)
cnx.commit()
cur.close()
cnx.close()
|
[
"wojciech.gaudnik@gmail.com"
] |
wojciech.gaudnik@gmail.com
|
39784b00ffbcca86d07d92e619feaf2a638306a7
|
ff81b6f0e467db2dde8afdf8370fae4bae179460
|
/flaskapp-docker/flaskapp/setup.py
|
d9ae9d271b4cf8c9b708fd4af8d5733387fb17a4
|
[
"MIT"
] |
permissive
|
hammadyasir/my_flask_docker
|
c1ca66bf0be482de82e8fc4c9018fbb926524bc0
|
ca142c0bac4ff99e6765630fb4b38e09142fd30a
|
refs/heads/main
| 2023-06-27T06:28:02.711996
| 2021-07-22T14:03:46
| 2021-07-22T14:03:46
| 388,434,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
from setuptools import setup
setup(
name='project',
packages=['project'],
include_package_data=True,
install_requires=[
'flask',
],
)
|
[
"49805944+hammadyasir@users.noreply.github.com"
] |
49805944+hammadyasir@users.noreply.github.com
|
8a53b8e8eadc3fdb0cd8371ce351adebce79def2
|
0fc6370708a3407255a667f29095e287db2fb454
|
/MockGvh/agentThread.py
|
fc31b2f25b3fd0f9404ea200f569b5ab4b715599
|
[] |
no_license
|
jzw2/KoordLanguage
|
257af6a8175319cec5369126e168708bc5934baa
|
d0c56c0124c70e9dc61886080f09ffae7da9583a
|
refs/heads/master
| 2020-05-30T08:28:30.936176
| 2019-07-26T19:13:33
| 2019-07-26T19:20:40
| 187,888,056
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,651
|
py
|
import random
from abc import ABC, abstractmethod
class Pos():
pass
class AgentThread(ABC):
def __init__(self, config):
self._pid = config.pid
self._num_agents = config.num_agents
self._pos = (random.randrange(1, 10), random.randrange(1, 10))
self.shared_vars = config.shared_vars
@abstractmethod
def loop_body(self):
pass
@abstractmethod
def initialize_vars(self):
pass
def pos3d(self, x, y, z):
pass
def write_to_shared(self, var_name, index, value):
self.shared_vars[var_name][index] = value
def read_from_shared(self, var_name, index):
return self.shared_vars[var_name][index]
def read_from_sensor(self, var_name):
if var_name == "Motion.position":
return self._pos
def write_to_actuator(self, var_name, value):
if var_name == "Motion.target":
self._pos = value
def create_ar_var(self, name, type, initial_value=None):
if name not in self.shared_vars:
self.shared_vars[name] = [initial_value] * self._num_agents
def create_aw_var(self, name, type, initial_value=None):
if name not in self.shared_vars:
self.shared_vars[name] = [initial_value] * self._num_agents
def log(self, message):
pass
def pid(self):
return self._pid
def num_agents(self):
return self._num_agents
def start(self):
pass
def run(self):
for i in range(10):
self.loop_body()
def midpoint(self, x, y):
a, b = x
c, d = y
return (a + c) / 2, (b + d) / 2
|
[
"hc825b@gmail.com"
] |
hc825b@gmail.com
|
f7e2098e769e91b1838c62aee43d87264b9aa9cb
|
052d6ac57f2026aba22249368149b18027c78342
|
/frontstage_api/resources/register/confirm_organisation_survey.py
|
6331b7150306a3ab3887cebaf9c1d5eb733780ca
|
[
"MIT"
] |
permissive
|
ONSdigital/ras-frontstage-api
|
c34b41185cc825b49262c1879ad559778a54dbfc
|
7bb32a85868e2a241b8a0331b884155a36450669
|
refs/heads/master
| 2018-07-15T00:35:22.130352
| 2018-06-01T14:09:13
| 2018-06-01T14:09:13
| 105,001,932
| 2
| 1
|
MIT
| 2018-06-01T14:09:14
| 2017-09-27T09:54:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
import logging
from flask import jsonify, make_response, request
from flask_restplus import Resource, fields
from structlog import wrap_logger
from frontstage_api import auth, register_api
from frontstage_api.controllers import case_controller, collection_exercise_controller, iac_controller, party_controller, survey_controller
logger = wrap_logger(logging.getLogger(__name__))
enrolment_details = register_api.model('EnrolmentDetails', {
'enrolment_code': fields.String(required=True),
})
@register_api.route('/confirm-organisation-survey')
class ConfirmOrganisationSurvey(Resource):
@staticmethod
@auth.login_required
@register_api.expect(enrolment_details, validate=True)
def post():
logger.info('Attempting to retrieve organisation and survey data')
enrolment_code = request.get_json().get('enrolment_code')
# Verify enrolment code is active
iac = iac_controller.get_iac_from_enrolment(enrolment_code)
if not iac['active']:
return make_response(jsonify(iac), 401)
# Get organisation name
case = case_controller.get_case_by_enrolment_code(enrolment_code)
business_party_id = case['caseGroup']['partyId']
organisation_name = party_controller.get_party_by_business_id(business_party_id).get('name')
# Get survey name
collection_exercise_id = case['caseGroup']['collectionExerciseId']
collection_exercise = collection_exercise_controller.get_collection_exercise(collection_exercise_id)
survey_id = collection_exercise['surveyId']
survey_name = survey_controller.get_survey(survey_id).get('longName')
response_json = {
"organisation_name": organisation_name,
"survey_name": survey_name
}
logger.info('Successfully retrieved organisation and survey data')
return make_response(jsonify(response_json), 200)
|
[
"noreply@github.com"
] |
ONSdigital.noreply@github.com
|
6c47ab3f00e510c29034f7c38073fbb1375a1270
|
2672228cd79938f112802a7d3c6209e907c46935
|
/testsuite/tests/ui/devel/test_devel_smoke.py
|
3a477c44d2a2e96d67c821889012d437d2b338e3
|
[
"Apache-2.0"
] |
permissive
|
Hchrast/3scale-tests
|
82233d4015fc5ec9f1cad82ce411e6d48f3c056f
|
ab64592f1438a6cb878b81897164a0e495fed961
|
refs/heads/main
| 2023-08-27T21:04:35.108744
| 2021-11-08T10:08:59
| 2021-11-08T10:08:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
"""Developer portal smoke tests"""
import pytest
from testsuite import settings
from testsuite.ui.views.admin.audience import BaseAudienceView
from testsuite.ui.views.devel import BaseDevelView, AccessView, LandingView
@pytest.fixture(scope="module")
def provider_account(provider_account):
"""Fixture returns Provider account.
If `site_access_code` was changed in tests, it is restored to its original value"""
access_code = provider_account['site_access_code']
yield provider_account
provider_account.update(dict(site_access_code=access_code))
# pylint: disable=unused-argument
@pytest.mark.smoke
def test_devel_from_admin(login, navigator, browser):
"""Tests if developer portal is accessible via navigation menu (Developer portal > Visit Portal)"""
audience = navigator.navigate(BaseAudienceView)
with browser.new_tab(audience.visit_portal):
assert settings["threescale"]["devel"]["url"] in browser.url
view = LandingView(browser)
view.post_navigate()
assert view.is_displayed
# pylint: disable=unused-argument
@pytest.mark.smoke
def test_devel_login(devel_login, browser):
"""Tests simple developer portal login"""
assert BaseDevelView(browser).is_displayed
@pytest.mark.smoke
def test_empty_access_code(browser, provider_account):
"""Test developer portal accessibility when `site_access_code` is empty"""
browser.selenium.delete_all_cookies()
browser.url = settings["threescale"]["devel"]["url"]
assert AccessView(browser).is_displayed
provider_account.update(dict(site_access_code=""))
browser.selenium.refresh()
assert LandingView(browser).is_displayed
|
[
"jsmolar@redhat.com"
] |
jsmolar@redhat.com
|
2c635e67dd79f81bcb26276f01d417fc9ebf8127
|
8baf2a9f9b11117e979b6629b8bbb8d7f3395f9a
|
/iohelp.py
|
eb5e6a94c52c9731850f20db1987bd588fbedb9d
|
[
"MIT"
] |
permissive
|
holzfigure/hiveopolis_broodframe_background
|
b28de8b231e0299c886d8776e4e3ba18040dcc37
|
dfe89300c00b0d459f71132b464092ec5d1ce656
|
refs/heads/master
| 2022-03-14T06:57:51.939327
| 2019-11-27T06:19:26
| 2019-11-27T06:19:26
| 219,782,785
| 0
| 0
|
MIT
| 2019-11-05T16:31:46
| 2019-11-05T15:47:36
| null |
UTF-8
|
Python
| false
| false
| 22,641
|
py
|
#!/usr/bin/env python3
"""A library of helpful functions.
Notably to set up output-folders safely, with time-stamped copies
of the source code included.
holzfigure 2019
"""
# import os
# import csv
import time
import math
import shutil
# import argparse
import logging
import logging.handlers
from pathlib import Path
from datetime import datetime # , timedelta
import tkinter
from tkinter import Tk, filedialog
# import numpy as np
import matplotlib
from matplotlib import pyplot as plt
# NOTE: module "imp" is deprecated..
import warnings
warnings.filterwarnings('error', category=DeprecationWarning)
# # GLOBALS
POSTFIX_DIR = "out"
TIME_FMT = "%y%m%d-%H%M%S-utc"
DAY_FMT = "%y%m%d-utc"
# Logging
LOG_MAX_BYTES = 20000000 # ~ 20 MB
LOG_BACKUP_COUNT = 50
# Plotting
DEF_EXT = "png"
DEF_WF = 3.0
COLORMAP = plt.cm.viridis
# colormap = plt.cm.viridis
# colormap = plt.cm.jet
# colormap = plt.cm.gist_ncar
# colormap = plt.cm.Set1
def now_str(pattern=TIME_FMT):
"""Return a formatted timestring for the current time."""
# return time.strftime(pattern, time.gmtime())
return datetime.utcnow().strftime(pattern)
def parse_subtree(filedir, pattern):
"""Parse a subtree (including subfolders) for the pattern.
from:
https://stackoverflow.com/questions/2186525/
use-a-glob-to-find-files-recursively-in-python
+ sorting
[requires 'import fnmatch']
Deprecated since using pathlib! (v180817)
"""
# matches = []
# for root, dirnames, filenames in os.walk(filedir):
# for filename in fnmatch.filter(filenames, pattern):
# matches.append(os.path.join(root, filename))
# return sorted(matches)
filedir = Path(filedir).resolve()
return sorted(filedir.rglob(pattern))
def safename(s, s_type="file"):
"""Append stuff to a file or folder if it already exists.
Check whether a given file or folder 's' exists, return a non-existing
filename.
s ........ (full) filename or directory
s_type ... 'file' or 'f' for files,
'directory' or 'dir' or 'd' for folders
Returns a file- or pathname that is supposedly safe to save
without overwriting data.
"""
# Ensure s is a Path object
p = Path(s)
low_type = str.lower(s_type)
if low_type == "file" or low_type == "f":
# if os.path.isfile(ss
if p.is_file():
stem = p.stem
suffix = p.suffix
counter = 0
while p.is_file():
# p = p.with_name(f"{stem}-{counter:02d}{suffix}")
p = p.with_name("{}-{:02d}{}".format(stem, counter, suffix))
counter += 1
elif low_type == "directory" or low_type == "dir" or low_type == "d":
if p.is_dir():
stem = p.stem
counter = 0
while p.is_dir():
# s = s_base + "-{:02d}".format(counter)
# p = p.with_name(f"{stem}-{counter:02d}")
p = p.with_name("{}-{:02d}".format(stem, counter))
counter += 1
return p
def safesavefig(path, ext=".png", close=True, verbose=False):
"""Safely save a figure from pyplot.
adapted from:
http://www.jesshamrick.com/2012/09/03/saving-figures-from-pyplot/
# plt.gcf().canvas.get_supported_filetypes()
# plt.gcf().canvas.get_supported_filetypes_grouped()
filetypes = {
'ps': 'Postscript',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format'
}
180817 Added a '.' to the default extension to be compatible
with path.suffix
"""
valid_extensions = plt.gcf().canvas.get_supported_filetypes()
fallback_ext = ".png"
# Ensure path is a pathlib.Path object
path = Path(path)
# Parse path components
directory = path.parent
stem = path.stem
suffix = path.suffix
# Check whether path already has an extension
if suffix:
if suffix in valid_extensions:
if suffix != ext:
logging.debug(f"Overwriting kwarg ext '{ext}' " +
f"with suffix '{suffix}' from {path}!")
ext = suffix
else:
logging.debug(f"Overwriting file suffix '{suffix}' "
f"with kwarg ext '{ext}'!")
# Ensure extension is correct
ext = ext.lower()
if not ext.startswith("."):
logging.debug(f"Adding '.' to {ext}")
ext = f".{ext}"
if ext.split(".")[-1] not in valid_extensions:
logging.warning(f"Invalid extension '{ext}', " +
f"replacing with '{fallback_ext}'")
ext = fallback_ext
# Generate filename
# filename = "%s.%s" % (os.path.split(path)[1], ext)
filename = stem + ext
# Ensure valid directory
if not directory:
directory = Path.cwd()
directory = directory.resolve()
if not directory.is_dir():
directory.mkdir(parents=True)
# Finalize full filename
# savepath = os.path.join(directory, filename)
savepath = directory / filename
savepath = safename(savepath, 'file')
# Save figure to file
# TODO: Remove str() once matplotlib is updated??
plt.savefig(str(savepath))
if verbose:
logging.info(f"Saved figure to {savepath}")
if close:
plt.close()
# if verbose:
# logging.debug("Done")
return savepath
def latexify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
Code adapted from
http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
Width and max height in inches for IEEE journals taken from
https://www.computer.org/cms/Computer.org/Journal%20templates/
transactions_art_guide.pdf
from https://nipunbatra.github.io/blog/2014/latexify.html
(v180817: updated this link)
"""
assert(columns in [1, 2])
if fig_width is None:
fig_width = 3.39 if columns == 1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (math.sqrt(5) - 1.0) / 2.0 # aesthetic ratio
fig_height = fig_width * golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'ps',
'text.latex.preamble': ['\\usepackage{gensymb}'],
'axes.labelsize': 6, # fontsize for x and y labels (was 10)
'axes.titlesize': 6,
'font.size': 6, # 'text.fontsize': 8, # was 10
'legend.fontsize': 6, # was 10
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif'
}
matplotlib.rcParams.update(params)
def format_axes(ax):
"""Format axes."""
spine_color = 'gray'
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(spine_color)
ax.spines[spine].set_linewidth(0.5)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=spine_color)
return ax
# # The following functions require numpy:
# def euclid(p1, p2):
# return np.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
#
# def euclid_array(p1s, p2s):
# """The inputs "p1s" and "p2s" are 2-column arrays
# of XY-coordinates.
# """
# return np.sqrt((p2s[:, 0] - p1s[:, 0])**2 +
# (p2s[:, 1] - p1s[:, 1])**2)
def setup_environment(
thisfile,
dir_targ=None,
level=1,
new_dir=True,
prefix_file=None,
postfix_dir=None,
daystamp=False, day_fmt=DAY_FMT,
dependencies=None,
):
"""Create an output directory safely.
No overwriting of existing files and folders.
in (optional):
dir_targ .... str full path to a directory. def=""
level .... int (-1, [0], 1) are the choices.
[-1: dir-out is sibling to the given directory
0: dir_out is the given directory
CAUTION: will only be this directory if
new_dir=False and postfix=None!
DEFAULT: 1: dir_out is child of the given directory]
new_dir .... bool if True, create a new directory, even if
one already exists
if False, write into an existing
directory with the given name
thisfile .... bool if True, get full path to current file
[if i call "os.path.basename(__file__)" here,
will i get the path to the calling code,
or to this file 'holzhelp.py'?]
prefix_file .... str prefix file with this (and '_')
postfix_dir .... str append to name of output-folder.
dependencies ... list paths to other files to copy to dir_out
out:
dir_out .... str full path to created output directory
"""
# Set up directories
# ==================
# # if interactive:
# dir_targ = filedialog.askdirectory(initialdir=DIR_INI)
# thisfile = os.path.basename(__file__)
# thisname = os.path.splitext(os.path.split(thisfile)[1])[0]
thisfile = Path(thisfile).resolve()
thisname = thisfile.stem
if prefix_file:
# thisname = f"{prefix_file}_{thisname}"
thisname = "{}_{}".format(prefix_file, thisname)
if not dir_targ:
# dir_targ = os.path.join(os.getcwd(), postfix)
# dir_targ = os.getcwd()
# dir_targ = Path.cwd() / f"{thisname}_{postfix_dir}"
dir_targ = Path.cwd()
else:
dir_targ = Path(dir_targ)
# determine level to place directory
if level < 0:
# basedir, lastdir = os.path.split(dir_targ)
# os.path.join(basedir, thisname)
# dir_out = dir_targ.with_name(f"{dir_targ.stem}_{thisname}")
dir_out = dir_targ.with_name("{}_{}".format(
dir_targ.stem, thisname))
elif level == 0:
# NOTE: only stays if new_dir=False and postfix=None!
dir_out = dir_targ
elif level > 0:
# dir_out = os.path.join(dir_targ, thisname)
dir_out = dir_targ / thisname
if postfix_dir:
# dir_out += "_" + postfix_dir
# dir_out = dir_out.with_name(f"{dir_out.stem}_{postfix_dir}")
dir_out = dir_out.with_name("{}_{}".format(
dir_out.stem, postfix_dir))
if daystamp:
# dir_out += now_str("_%y%m%d-utc")
# dir_out = dir_out.with_name(f"{dir_out.stem}_{now_str(day_fmt)}")
dir_out = dir_out.with_name("{}_{}".format(
dir_out.stem, now_str(day_fmt)))
if new_dir:
dir_out = safename(dir_out, 'directory')
if not dir_out.is_dir():
# os.mkdir(dir_out)
dir_out.mkdir(parents=True)
# logging.info("created output directory at '{}'".format(dir_out))
# logwarn = []
# else:
# logwarn = ("output directoy already exists, " +
# "error in function safename()")
# copy files to output-directory
src_out = dir_out / "src"
if not src_out.is_dir():
src_out.mkdir()
print(f"Created folder '{src_out}'")
if not dependencies:
dependencies = []
dependencies.append(thisfile)
for filename in dependencies:
# path, fname = os.path.split(filename)
# name, ext = os.path.splitext(fname)
# path = filename.parent
filename = Path(filename).resolve()
name = filename.stem
suffix = filename.suffix
if prefix_file:
# name = f"{prefix_file}_{name}"
name = "{}_{}".format(prefix_file, name)
# thatfile = os.path.join(
# dir_out, name + now_str() + ext)
# thatfile = dir_out / f"{name}_{now_str()}{suffix}"
thatfile = src_out / "{}_{}{}".format(name, now_str(), suffix)
thatfile = safename(thatfile, 'file')
# TODO: Replace this with a proper pathlib method once?
# And remove the 'str()' once Raspbian is n Python 3.6..
shutil.copy2(str(filename), str(thatfile))
# this_split = os.path.splitext(thisfile)
# thatfile = os.path.join(
# dir_out, this_split[0] + now_str() + this_split[1])
# thatfile = safename(thatfile, 'file')
# shutil.copy2(thisfile, thatfile)
return dir_out, thisname # , logwarn
def setup_logging(
thisname,
args,
dir_log=None,
max_bytes=LOG_MAX_BYTES,
backup_count=LOG_BACKUP_COUNT,
):
"""Set up the logging module to log to a file.
Rotate logfiles if they are bigger than LOG_MAX_BYTES.
https://docs.python.org/3/howto/logging-cookbook.html
"""
err_msg = []
if dir_log is None:
# dir_log = os.path.join(os.getcwd(), "DIR_LOG")
dir_log = Path.cwd() / "log"
dir_log = safename(dir_log, 'dir')
if not dir_log.is_dir():
try:
dir_log.mkdir(parents=False)
except Exception as err:
# err_msg.append(
# f"Failed to create directory {dir_log}\n" +
# f"Error: {err}\n" +
# "Now creating full path...")
err_msg.append((
"Failed to create directory {}\n" +
"Error: {}\n" +
"Now creating full path...").format(dir_log, err))
dir_log.mkdir(parents=True)
# log_path = os.path.join(LOC_PATH, "logs")
# thisfile = os.path.basename(__file__)
# logfile = safename(os.path.join(
# dir_log, "{}_{}.log".format(thisname, now_str())), 'file')
logfile = safename(
# (dir_log / f"{thisname}_{now_str()}.log"), 'file')
(dir_log / "{}_{}.log".format(thisname, now_str())), 'file')
# logfile = safename(logfile, 'file')
# logname = thisfile[0:-3] + '.log' # + now_str() + '.log'
if args.debug:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
# logging.basicConfig(
# level=loglevel,
# format=("%(asctime)s %(levelname)-8s " +
# "%(funcName)-12s: %(message)s"),
# datefmt='%y-%m-%d %H:%M:%S UTC',
# filename=logfile,
# filemode='a')
# # logging.basicConfig(filename=logfile, level=logging.INFO)
# logging.debug("logging to file {}".format(logfile))
# Set level
logging.getLogger('').setLevel(loglevel)
# All times in UTC
logging.Formatter.converter = time.gmtime
# format=('%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
# Rotating logs
# https://docs.python.org/2/howto/
# logging-cookbook.html#using-file-rotation
# Add the log message handler to the logger
# TODO: Remove the "str()" once RPIs have Python3.6
rotater = logging.handlers.RotatingFileHandler(
str(logfile),
mode='a',
maxBytes=max_bytes,
backupCount=backup_count)
# encoding=None,
# delay=0)
# rotater.setLevel(loglevel)
rotate_formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)-8s %(funcName)-12s: %(message)s",
datefmt='%y-%m-%d %H:%M:%S UTC')
rotater.setFormatter(rotate_formatter)
logging.getLogger('').addHandler(rotater)
# if not cron:
# Define a Handler which writes INFO messages or
# higher to the sys.stderr
console = logging.StreamHandler()
# console.setLevel(loglevel) # (logging.INFO)
# Set a format which is simpler for console use
# formatter = logging.Formatter(
# '%(name)-12s: %(levelname)-8s %(message)s')
console_formatter = logging.Formatter(
"%(levelname)-8s: %(message)s")
# Tell the handler to use this format
console.setFormatter(console_formatter)
# Add the handler to the root logger
logging.getLogger('').addHandler(console)
if len(err_msg) > 0:
for msg in err_msg:
logging.warning(msg)
logging.debug("Logging to screen and to {}".format(logfile))
# return dir_log
def select_files(title="select file(s)", dir_ini=None,
filetypes=[("all files", "*")],
more=False):
"""Interactively pick a file (actually its path-string).
If 'more=True', a tuple of files will be returned.
see:
http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/tkFileDialog.html
http://www.programcreek.com/python/example/4281/
tkFileDialog.askopenfilename
http://effbot.org/tkinterbook/tkinter-file-dialogs.htm
Not mentioned in the above refs is ".askopenfilenames()",
which takes the same options but returns a tuple of
selected files.
>> dir(filedialog)
['ACTIVE', 'ALL', 'ANCHOR', 'ARC', 'BASELINE', 'BEVEL', 'BOTH',
'BOTTOM', 'BROWSE', 'BUTT', 'BaseWidget', 'BitmapImage', 'BooleanVar',
'Button', 'CASCADE', 'CENTER', 'CHAR', 'CHECKBUTTON', 'CHORD', 'COMMAND',
'CURRENT', 'CallWrapper', 'Canvas', 'Checkbutton', 'DISABLED', 'DOTBOX',
'Dialog', 'Directory', 'DoubleVar',
'E', 'END', 'EW', 'EXCEPTION', 'EXTENDED', 'Entry', 'Event', 'EventType',
'FALSE', 'FIRST', 'FLAT', 'FileDialog', 'Frame', 'GROOVE', 'Grid',
'HIDDEN', 'HORIZONTAL', 'INSERT', 'INSIDE', 'Image', 'IntVar',
'LAST', 'LEFT', 'Label', 'LabelFrame', 'Listbox', 'LoadFileDialog',
'MITER', 'MOVETO', 'MULTIPLE', 'Menu', 'Menubutton', 'Message',
'Misc', 'N', 'NE', 'NO', 'NONE', 'NORMAL', 'NS', 'NSEW, 'NUMERIC',
'NW', 'NoDefaultRoot', 'OFF', 'ON', 'OUTSIDE', 'Open', 'OptionMenu',
'PAGES', 'PIESLICE', 'PROJECTING', 'Pack', 'PanedWindow', 'PhotoImage',
'Place', 'RADIOBUTTON', 'RAISED', 'READABLE', 'RIDGE', 'RIGHT', 'ROUND',
'Radiobutton', 'S', 'SCROLL', 'SE', 'SEL', 'SEL_FIRST', 'SEL_LAST',
'SEPARATOR', 'SINGLE', 'SOLID', 'SUNKEN', 'SW',
'SaveAs', 'SaveFileDialog', 'Scale', 'Scrollbar', 'Spinbox', 'StringVar',
'TOP', 'TRUE', 'Tcl', 'TclError', 'TclVersion', 'Text', 'Tk', 'TkVersion',
'Toplevel', 'UNDERLINE', 'UNITS', 'VERTICAL', 'Variable', 'W', 'WORD',
'WRITABLE', 'Widget', 'Wm', 'X', 'XView', 'Y', 'YES', 'YView', '_Dialog',
'__builtins__', '__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', '__spec__',
'askdirectory', 'askopenfile', 'askopenfilename', 'askopenfilenames',
'askopenfiles', 'asksaveasfile', 'asksaveasfilename', 'commondialog',
'constants', 'dialogstates', 'enum', 'fnmatch',
'getboolean', 'getdouble', 'getint', 'image_names', 'image_types',
'mainloop', 'os', 're', 'sys', 'test', 'wantobjects']
"""
# Resolve initial directory
if not dir_ini: # or not dir_ini.is_dir():
dir_ini = Path.cwd()
else:
dir_ini = Path(dir_ini).resolve()
# Include this to make the crappy empty window go away
root = Tk()
root.withdraw()
print(f"tkinter version: {tkinter.TkVersion}")
# Set options
opts = {}
opts["parent"] = root
opts["title"] = title
opts["initialdir"] = dir_ini
opts['filetypes'] = filetypes
# Check whether single file or tuple of files is requested
if more:
# tuple of full filenames (paths)
# ffn_return = tkFileDialog.askopenfilenames(**opts)
ffn_return = filedialog.askopenfilenames(**opts)
if len(ffn_return) > 0:
ffn_return = [Path(ffn) for ffn in ffn_return]
else:
# String of full filename (path)
# ffn_return = tkFileDialog.askopenfilename(**opts)
ffn_return = filedialog.askopenfilename(**opts)
if ffn_return:
ffn_return = Path(ffn_return)
# If cancelled, return None
if not ffn_return:
return None
# Return full filename(s)
return ffn_return
def select_directory(title="select directory", dir_ini=None):
"""Interactively retrieve the path to a directory."""
# include this to make the crappy empty window go away
root = Tk()
root.withdraw()
print(f"tkinter version: {tkinter.TkVersion}")
# open directory dialog
# dir_select = tkFileDialog.askdirectory(
dir_select = filedialog.askdirectory(
parent=root,
title=title,
initialdir=dir_ini)
# check cancel or false directoy
if not dir_select:
print("Cancelled by user, returning 'None'")
return None
else:
dir_select = Path(dir_select)
if not dir_select.is_dir():
print(f"Directory '{dir_select}' doesn't exist, returning 'None'")
return None
# return full path of selected diretory
return dir_select
def main():
"""Mock main-function.
Write test cases here.
"""
# setup environment
# thisfile = os.path.basename(__file__)
# dir_out = setup_environment(thisfile, postfix=postfix)
pass
if __name__ == "__main__":
main()
|
[
"daniel.hofstadler@uni-graz.at"
] |
daniel.hofstadler@uni-graz.at
|
135b320f762c1a45836db74f49c2c6bc2fe6c8fe
|
d5ed53c58da60caba413869572f7c4abb0e5f666
|
/class_factorial.py
|
89845687e40e9b96da1996073fc246e5a315afcc
|
[] |
no_license
|
Me-Pri/Python-programs
|
e8269f06ef30e7360fda1fa4398577d4845b8fa6
|
96fa0d17e4de4a21640105b6387a483de652987f
|
refs/heads/main
| 2023-01-31T14:04:33.327260
| 2020-12-17T06:38:54
| 2020-12-17T06:38:54
| 322,203,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
class factorial:
no=0;
fact=1;
def input(self):
self.no=int(input("Enter the no of terms: "))
def factor(self):
for i in range(1,self.no+1):
self.fact=self.fact*i
print("The factorial of {0} is {1}".format(self.no,self.fact))
fac=factorial();
fac.input();
fac.factor();
|
[
"noreply@github.com"
] |
Me-Pri.noreply@github.com
|
e1bb0830c777662e0bc15e9c46b3a760597a28be
|
1e710f5ddc4d4b89ff601412e4a7e97d92423fe3
|
/lesson_1/example_4.py
|
609ef451896008c74c8f80517aaf02d220e649b0
|
[] |
no_license
|
Evgeniy-Nagornyy/Python_1
|
b8b176eafa83d7c7962ee244a9cecef4bbb0b601
|
8ce10ea075f5336460e85a6d75e89e28f377d19f
|
refs/heads/master
| 2022-10-22T09:23:52.062377
| 2020-06-10T13:27:23
| 2020-06-10T13:27:23
| 264,262,317
| 0
| 0
| null | 2020-06-09T23:32:21
| 2020-05-15T17:54:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
line_numbers = int(input('Введите положительное число - '))
check_number = line_numbers % 10 # Проверяемое число на максимум (очень не хватает цикла do while)
check_line_number = str(check_number) # проверочное число для выхода из цикла
max_number = 0 # Максимальное число
i = 1 # счетчик для передвижения
if line_numbers == 0: # вдруг введут 0
print(max_number)
else:
while int(check_line_number) <= line_numbers:
check_number = ((line_numbers % (10 ** i)) // (10 ** (i - 1))) # переключение на следующую цифру
if check_number == 9: # проверяем, есть ли смысл проверять дальше
max_number = check_number
break
elif check_number > max_number: # проверка числа
max_number = check_number
check_line_number = str(check_number) + check_line_number # формируем число для проверки выхода из цикла
i += 1
print(f'Самая большая цифра в числе - {max_number}')
# второй вариант решения ("запрещенный")
# line_numbers = input('Введите положительное число - ')
# print(f'Самая большая цифра в числе - {max([int(line_numbers[i]) for i in range(len(line_numbers))])}')
|
[
"Evgeniy_kott@mail.ru"
] |
Evgeniy_kott@mail.ru
|
94ebbf777a68a1f870eb8d7132960630fcd5f534
|
7573bbf969a0b90ba9015a35b0ab59c29db4688f
|
/architectures/cnn_utils.py
|
49b39fa00c4c11420eafaf715574aa4f712894ee
|
[] |
no_license
|
Huetous/pytoss
|
525adf877e5b6d1e089e718a814d0645a2890b12
|
329ac86bc7f6289cba6e25c0b9df6cfc0eb00eb8
|
refs/heads/master
| 2023-05-05T21:26:13.495008
| 2021-05-27T02:30:42
| 2021-05-27T02:30:42
| 335,869,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
import torch.nn as nn
def init_cnn(module):
if getattr(module, "bias", None) is not None:
nn.init.constant_(module.bias, 0)
if isinstance(module, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(module.weight)
for m in module.children():
init_cnn(m)
def conv(n_in, nf, ks=3, stride=1, bias=False):
return nn.Conv2d(n_in, nf, kernel_size=ks,
stride=stride, padding=ks // 2, bias=bias)
def conv_layer(n_in, n_out, kernel_size=3, stride=1, bn=True, zero_bn=False, act=True):
layers = [conv(n_in, n_out, kernel_size, stride)]
if bn:
bn = nn.BatchNorm2d(n_out)
nn.init.constant_(bn.weight, 0. if zero_bn else 1.)
layers.append(bn)
if act:
layers.append(nn.ReLU())
return nn.Sequential(*layers)
|
[
"daddudota22@mail.ru"
] |
daddudota22@mail.ru
|
732875b53794690f71a61540b24b81ee5edee29e
|
75e2e9a6fcb2962a9502bf5c3237db35fd62d9da
|
/web_indexer_starter.py
|
a50c9220eb694b1f06745e40104181050539bf12
|
[] |
no_license
|
undefinedobjects/web_indexer.py
|
6a41508fe050b58858b5e9645e41c7adde1f7f96
|
c031a29fd8ba7930b7be36bdcf1b9a3d1dc5c1a7
|
refs/heads/master
| 2023-03-02T23:28:24.635419
| 2021-02-15T11:21:51
| 2021-02-15T11:21:51
| 272,031,613
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
import os
for number in range(90,100):
start = "172." + str(number) + ".0.0"
end = "172." + str((number + 1)) + ".0.0"
os.system("start web_indexer.py -s " + start + " -e " + end)
|
[
"noreply@github.com"
] |
undefinedobjects.noreply@github.com
|
998358bbb4e00e3a37420a1318e0751c5ae23214
|
edeb309cefeddfaac8dbad653a71f32d97a29d35
|
/FireModules/Websurfing/google_search_deleting_files.py
|
f6540aeef174471b43e0493f53af5b2dd6897f27
|
[
"MIT"
] |
permissive
|
alex14324/DumpsterFire
|
746c0b42734abb3a8539bdb96477d6a6488d3079
|
58a6b94d4beadb43776610bbb3bcb2a2416efe8a
|
refs/heads/master
| 2020-08-07T05:33:15.058377
| 2019-10-07T07:23:40
| 2019-10-07T07:23:40
| 213,318,310
| 0
| 0
|
MIT
| 2019-10-11T04:39:20
| 2019-10-07T07:15:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
#!/usr/bin/python
#
# Filename:
#
# Version: 1.0.0
#
# Author: Joe Gervais (TryCatchHCF)
#
# Summary:
#
# Part of the DumpsterFire Toolset. See documentation at https://github.com/TryCatchHCF/DumpsterFire
#
#
# Description:
#
#
# Example:
#
#
import urllib, time, random
from FireModules.fire_module_base_class import *
class google_search_deleting_files( FireModule ):
def __init__(self):
self.commentsStr = "Websurfing/google_search_deleting_files"
def __init__(self, moofStr):
self.moofStr = moofStr
self.commentsStr = "Websurfing/google_search_deleting_files"
return;
def Description( self ):
self.Description = "Performs Google search on securely deleting files"
return self.Description
def Configure( self ):
return
def GetParameters( self ):
return ""
def SetParameters( self, parametersStr ):
print parametersStr
return
def ActivateLogging( self, logFlag ):
print self.commentsStr + ": Setting Logging flag!"
print logFlag
return
def Ignite( self ):
print self.commentsStr + ": Opening URL session for Google search on securely deleting files"
self.webSession = urllib.urlopen( 'https://www.google.com/search?q=securely+deleting+files&oq=securely+deleting+files' )
trash = self.webSession.read()
return
|
[
"noreply@github.com"
] |
alex14324.noreply@github.com
|
b0e53f0386e068a848db093380db2c99a669e4ea
|
5f4cf695ffa0460aa42d245b77cbe273d249bd9c
|
/lists/tests.py
|
c23480c4e2089804ef59e85ce28fc7ffb23b2c88
|
[] |
no_license
|
codebleeder/superlist
|
d616ad77f601ac28db9f075fb8f4547534927b27
|
fc850ab7d4f8bea90805a79117509726ae323192
|
refs/heads/master
| 2021-01-10T13:10:02.072566
| 2015-12-30T09:00:13
| 2015-12-30T09:00:13
| 48,778,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
from django.test import TestCase
from django.core.urlresolvers import resolve
from lists.views import home_page
# Create your tests here.
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
found = resolve('/')
self.assertEqual(found.func, home_page)
|
[
"sharad.shivmath@gmail.com"
] |
sharad.shivmath@gmail.com
|
97d8d09c16f1edd913cb3db8bb84efd98193c1b5
|
fb41b080244208be9aedfeed517c93289ca0ecca
|
/files/shop/migrations/0030_delete_kurs_d.py
|
f70850f386c5e18501cb5637dd5953982618d2a6
|
[] |
no_license
|
EddieMorra/mebli
|
8824b7943f81a7738dea3f65397e97e9e98f0f62
|
a3b3ba7aa0abc82ab688f53263dd7a3a6164c1f6
|
refs/heads/master
| 2022-12-12T01:37:27.306728
| 2020-04-06T17:36:48
| 2020-04-06T17:36:48
| 253,088,733
| 0
| 0
| null | 2022-12-08T04:00:04
| 2020-04-04T20:03:54
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 295
|
py
|
# Generated by Django 3.0.2 on 2020-03-28 02:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0029_remove_product_kurs_d'),
]
operations = [
migrations.DeleteModel(
name='Kurs_d',
),
]
|
[
"rom4egful@gmail.com"
] |
rom4egful@gmail.com
|
d8cb4d738e3fca2d4ddb17040fa4fe5a789e0334
|
63e2bed7329c79bf67279f9071194c9cba88a82c
|
/SevOneApi/python-client/test/test_flow_falcon_visualization.py
|
51a57732e471078c158cccc29b73d4aae5586ecf
|
[] |
no_license
|
jsthomason/LearningPython
|
12422b969dbef89578ed326852dd65f65ab77496
|
2f71223250b6a198f2736bcb1b8681c51aa12c03
|
refs/heads/master
| 2021-01-21T01:05:46.208994
| 2019-06-27T13:40:37
| 2019-06-27T13:40:37
| 63,447,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.flow_falcon_visualization import FlowFalconVisualization # noqa: E501
from swagger_client.rest import ApiException
class TestFlowFalconVisualization(unittest.TestCase):
"""FlowFalconVisualization unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFlowFalconVisualization(self):
"""Test FlowFalconVisualization"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.flow_falcon_visualization.FlowFalconVisualization() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"johnsthomason@gmail.com"
] |
johnsthomason@gmail.com
|
6464b7dec09ac442b5f4aa129661e465bc584d1b
|
bbd44d1f91bf1ed83778bf5086b9fa625794849c
|
/test2.py
|
69b16dc8ba9c36cb9c54fead66cd5bbe9fc83cc5
|
[] |
no_license
|
hyperloop11/flask-blog
|
a1c1175653a2183285d737a119021a1ab6a72519
|
a701f3995a96da72da6dbff2c024265cc9438f35
|
refs/heads/main
| 2023-03-03T19:02:38.717568
| 2021-02-15T17:59:26
| 2021-02-15T17:59:26
| 335,084,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
from test1 import age
class Person:
p_age = age
def __init__(self, name):
self.name = name
|
[
"shirin.kaul11@gmail.com"
] |
shirin.kaul11@gmail.com
|
8b69611ea55eff694e4f24935e45e153a3593a8b
|
fbd9cf0b31e5d433b1d2d7dfe562a660d60d27de
|
/taskmanager_app/migrations/0001_initial.py
|
23410d47e27a30020cf91198ba0ddd1c98b785c6
|
[] |
no_license
|
pragatisinghdev/taskmanager
|
b18c8f8323031e583a990aa62b0bc282a7f592c3
|
64daa154be39285996aeb4c94e58c01e49b5fbc6
|
refs/heads/master
| 2020-09-12T11:54:33.437867
| 2019-12-28T17:29:52
| 2019-12-28T17:29:52
| 222,417,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
# Generated by Django 2.2.5 on 2019-12-27 05:25
from django.db import migrations, models
import taskmanager_app.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(blank=True, max_length=50)),
('phone', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('file', models.FilePathField(path=taskmanager_app.models.images_path)),
('end_date', models.DateField()),
('author', models.ForeignKey(on_delete='models.CASCADE', to='taskmanager_app.User')),
],
),
]
|
[
"psingh@endosoft.com"
] |
psingh@endosoft.com
|
60b4a65b29fce7b704390b16827b172a69b43b49
|
00105bf59b9f4b250fdcc33c01aef954173fd4a3
|
/7. Linear_Regression/Linear Regression.py
|
e7232e2263e99d9bae5b2ce670d43dccf0c8bd9c
|
[] |
no_license
|
up-data-science/ML_1_Exercise
|
7305410e25b159813c70dc05141c9dee2f75b189
|
417ede59dd32370b30b0fc5b8305f71da6a5774d
|
refs/heads/master
| 2020-06-28T16:15:53.243595
| 2019-08-02T18:18:31
| 2019-08-02T18:18:31
| 200,279,229
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,130
|
py
|
# coding: utf-8
# # Linear Regression
#
# In this tutorial we will implement a linear regression model. We will also implement a function that splits the available data into a training and a testting part.
#
# ## Problem Setting
#
# We will use the Boston Housing Dataset. This dataset contains information collected by the U.S Census Service concerning housing in the city of Boston in the state of Massachusetts in 1978. Our goal is to predict the median value of the houses in a particular town in the city of Boston given its attributes. Check the file ’housing.names’ for more information on the attributes.
# In[ ]:
import urllib
import pandas as pd
import numpy as np
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
get_ipython().magic(u'load_ext autoreload')
get_ipython().magic(u'autoreload 2')
from sklearn.datasets import load_boston
boston=load_boston()
testfile = urllib.URLopener()
testfile.retrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.names", "housing.names")
df=pd.DataFrame(boston.data)
df.columns=['crime_rate','res_land_zoned','industry','charles_river','nox','avg_num_rooms','prop_bf_1940','dst_emply_center','rd_highway_idx','tax_rate','stdnt_tchr_ratio','prop_blacks','low_status_pct']
X=boston.data
y=boston.target
# In[ ]:
df.head(10)
# ### Exercise 1
#
# Write the *split_train_test(X,y,split,seed)*, given an instance matrix $X \in \mathbb{R}^{N \times D}$, labels $y \in Y^N$, a split ratio in $[0, 1]$ and a random seed $\in \mathbb{Z}$. Split the dataset in $(split×100)\%$ of the instances for training our model and the rest for testing, i.e.
#
# $$ \left|X_{\text{train}}\right| = \lceil \text{split} \cdot N \rceil, \qquad |X_{\text{train}}| + |X_{\text{test}}| = N. $$
# Make sure you use the given random number generator seed so we all get the same results. The function is supposed to return:
#
# - X_train, y_train: the training instances and labels;
# - X_test, y_test: the test instances and labels,
#
# in the same order as was mentioned.
#
# Hint: It may be helpful to use shuffling functionality (e.g. np.random.shuffle).
# In[ ]:
def split_train_test(X,y,split,seed):
##################
#INSERT CODE HERE#
##################
return None # X_train, y_train, X_test, y_test
# ### Exercise 2
#
# Write the function *train_linear_reg(X_train,y_train,lmbd)*.
# Implement the ridge regression model (slide 24). The function should output the learned weight vector $\theta \in \mathbb{R}^D$ or $\mathbb{R}^{D+1}$ (depending on whether you are adding *bias*).
# In[ ]:
def train_linear_reg(X, y, lmbd):
##################
#INSERT CODE HERE#
##################
return None # theta
# ### Exercise 3
#
# Write the function *predict(X,theta)* which predicts housing values vector pred for a dataset X and a previously trained parameter vector $\theta$.
# In[ ]:
def predict(X, theta):
##################
#INSERT CODE HERE#
##################
return None # y_pred
# ### Exercise 4
#
# Write the function *mean_abs_loss(y_true,y_pred)* which computes the mean of the absolute differences between our prediction vector $y\_pred$ and the real housing values $y\_true$.
# In[ ]:
def mean_abs_loss(y_true,y_pred):
##################
#INSERT CODE HERE#
##################
return 0
# ### Exercise 5
#
# Evaluate your solutions by running the following code.
#
# Moreover, answer the following questions: What is the most important feature in your model? Are there features that are not so important? What happens if you remove them? Are there outliers with a high absolute loss?
# In[ ]:
seed = 3
lmbd=1
split=0.7
X_train,y_train,X_test,y_test=split_train_test(X,y,split,seed)
theta=train_linear_reg(X_train,y_train,lmbd)
y_pred=predict(X_test,theta)
mae=mean_abs_loss(y_test,y_pred)
print 'The mean absolute loss is {loss:0.3f}'.format(loss=mae*1000)
|
[
"noreply@github.com"
] |
up-data-science.noreply@github.com
|
a507e0c2ed7cc0b1606723d494231252e4ea77cc
|
ef0f296e4615d3e2109e4b906a81cc4ba24b2b29
|
/fusuma/Fusuma/DataMan.py
|
b65f16ad2cf1119ad280760c9722ffb77933ba67
|
[] |
no_license
|
hylom/fusuma
|
48504235db0fb086ecda3d711c510c700207fe42
|
0d776ae29f56826c33942fae17c45468b8353a09
|
refs/heads/master
| 2021-01-23T19:45:49.387978
| 2009-06-23T16:52:21
| 2009-06-23T16:52:21
| 159,778
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
#!/usr/bin/env python
#######################################################################
# This file is part of Fusuma website management system.
#
# Copyright (c) hylom <hylomm at gmail.com>, 2008.
#
# This file is released under the GPL.
#
# $Id: DataMan.py,v 1.1.1.1 2008/11/27 17:15:36 hylom Exp $
#######################################################################
"""
This is Fusuma's article data management module.
"""
__revision__ = "$Revision: 1.1.1.1 $"
import datetime
import sys
import os
VERSION = "0.0.1"
VERSION_DATE = VERSION + " 09/26/2008"
VERSION_SPLIT = tuple(VERSION.split('.'))
class DataMan(object):
"""
This is Fusuma's story data management class.
"""
def __init__(self, document_root):
"""
Initialize Data Manager.
@param document_root: document root directory
@type document_root: string
"""
self._document_root = document_root
def new_story(self):
"""
create new story.
"""
story = Story()
story.output_dir = _get_output_path
# Story.property()[""] =
def _get_output_path(self):
"""
return directory which story is saved.
default is: <document_root>/yyyy/mm/dd/
"""
dt = datetime.datetime.today();
current_locale = locale.getlocale( locale.LC_CTYPE )
locale.setlocale( locale.LC_CTYPE, "" )
path = dt_expire.strftime( "%Y/%m/%d" )
locale.setlocale( locale.LC_CTYPE, current_locale )
return path;
class Story:
"""
DataMan's Story object.
"""
def __init__(self):
"""
Initialize Story.
"""
# store session object
# TODO: This session-data-file-name-rule may be insecure!
self._output_file = ""
self._output_dir = ""
self._story = ""
self._property = {}
def save(self):
"""
Save story to file.
"""
## accessor
def property(self):
return self._property
def output_file(self):
return self._output_file
def set_output_file(self, str):
self._output_file = str
def output_dir(self):
return self._output_dir
def set_output_dir(self, str):
self._output_dir = str
def story(self):
return self._story
def set_story(self, str):
self._story = str
|
[
"hylom@users.sourceforge.jp"
] |
hylom@users.sourceforge.jp
|
53afd2bce9ba50e5033d6390c1079e0d1ae46806
|
87eae6cd14dd360d6d8b8e78e6e1e9952d6cd0c1
|
/p5.py
|
424933937bbb58f010d4b44920badcd8fe10f517
|
[] |
no_license
|
karthikgvsk/project-euler
|
afd8a62b6067d2236a9a6b58b0ed01c23136c5e3
|
01e562a97b5c9cd03e19fbea29a63fc0bfcaac46
|
refs/heads/master
| 2020-04-25T21:18:19.061986
| 2013-06-08T17:23:24
| 2013-06-08T17:23:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
#smallest multiple
#2520 is the smallest number that can be divided by each of the numbers from#1 to 10 without any remainder.
#
#What is the smallest positive number that is evenly divisible by all of the#numbers from 1 to 20?
from math import log
def primelist(n):
l = [0] * (n + 1)
p = 2
while p <= n:
q = 2
while q <= n // p:
l[p * q] = 1
q = q + 1
p = p + 1
l[0], l[1], l[2], l[3] = 1, 1, 0, 0
return l
n = 20
l = primelist(n)
prod = 1
for i in range(len(l)):
if l[i] == 0:
power = int(log(n, i))
prod = prod * (i ** power)
print prod
|
[
"karthikgvsk@gmail.com"
] |
karthikgvsk@gmail.com
|
a35d8d4bfeb2701f3c8a5b9ffc1d88786aa08395
|
41106cdb25e50720dd7e06c2a9cfea5928a42c6c
|
/scripts/convert/converter/convert_caffe2.py
|
17275491fa576107a2b251d691d776863ed6f85b
|
[
"Apache-2.0"
] |
permissive
|
judgeeeeee/klever-model-registry
|
58edb1da904667d429bb35b3ebeaa3361e860364
|
7ae97a4babf0861132976494fc8ac04ca40d4af3
|
refs/heads/master
| 2023-02-22T22:47:01.736458
| 2020-12-03T06:25:24
| 2020-12-03T06:25:24
| 317,134,591
| 0
| 1
|
Apache-2.0
| 2020-11-30T06:46:06
| 2020-11-30T06:46:05
| null |
UTF-8
|
Python
| false
| false
| 1,877
|
py
|
import os
import json
import argparse
import numpy as np
import onnx
import caffe2.python.onnx.frontend
from caffe2.proto import caffe2_pb2
from .base_convert import BaseConverter
INIT_NET = 'init_net.pb'
PREDICT_NET = 'predict_net.pb'
DEL_ATTR = 'ws_nbytes_limit'
MODEL_NAME = 'model.onnx'
def del_attr(netdef):
for op in netdef.op:
for i, attr in enumerate(op.arg):
if attr.name == DEL_ATTR:
op.arg.pop(i)
def np2onnx(s):
def _modified2np(_s):
if _s == 'float32':
return 'float'
if _s == 'float64':
return 'double'
return _s
s = _modified2np(s)
return onnx.TensorProto.DataType.Value(s.upper())
class Caffe2ToONNX(BaseConverter):
def _load_model(self):
self.init_net = self._find_with_name(INIT_NET)
self.predict_net = self._find_with_name(PREDICT_NET)
def _parse_input(self):
value_info = {}
for input in self.input_value:
value_info[input['name']] = (np2onnx(input['dType']),
tuple(input['size']))
return value_info
def _convert(self):
self._load_model()
value_info = self._parse_input()
predict_net = caffe2_pb2.NetDef()
with open(self.predict_net, 'rb') as f:
predict_net.ParseFromString(f.read())
init_net = caffe2_pb2.NetDef()
with open(self.init_net, 'rb') as f:
init_net.ParseFromString(f.read())
del_attr(predict_net)
out_path = os.path.join(self.output_dir, 'model', MODEL_NAME)
onnx_model = caffe2.python.onnx.frontend.caffe2_net_to_onnx_model(
predict_net,
init_net,
value_info,
)
onnx.save(onnx_model, out_path)
if __name__ == '__main__':
convert = Caffe2ToONNX()
convert.convert()
|
[
"noreply@github.com"
] |
judgeeeeee.noreply@github.com
|
3bafddf779602141a9656eb0ef245fd5e9719bcd
|
f5752707e33e456adecb1f6f20f8bcb53f320adf
|
/Utility_Scripts/USGS2018_spectrum.py
|
f43787449ac48dd70da6ef41c4546f2c56eb2704
|
[] |
no_license
|
alborzgh/Work_Scripts
|
4eb22f39d3ff1377a2f2fab629f65b359fda250a
|
7aa3a2e5853a4b5c050be72df3056c3fdf60dd6e
|
refs/heads/master
| 2022-12-06T08:48:50.673196
| 2020-07-15T20:06:36
| 2020-07-15T20:06:36
| 276,767,977
| 0
| 1
| null | 2020-07-15T20:06:37
| 2020-07-03T00:02:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,870
|
py
|
import numpy as np
import matplotlib.pylab as plt
from io import BytesIO
from io import StringIO
from zipfile import ZipFile
from scipy.interpolate import interp2d
from scipy.interpolate import interp1d
site_classes = {
'A/B':{'Vs30':1500, 'name':'AB'},
'AB':{'Vs30':1500, 'name':'AB'},
'B' :{'Vs30':1080, 'name':'B'},
'B/C':{'Vs30':760 , 'name':'BC'},
'BC':{'Vs30':760 , 'name':'BC'},
'C' :{'Vs30':530 , 'name':'C'},
'C/D':{'Vs30':365 , 'name':'CD'},
'CD':{'Vs30':365 , 'name':'CD'},
'D' :{'Vs30':260 , 'name':'D'},
'D/E':{'Vs30':185 , 'name':'DE'},
'DE':{'Vs30':185 , 'name':'DE'},
'E' :{'Vs30':150 , 'name':'E'},
}
main_zip_file_address = r'C:\AlborzFiles\MyDesktop\Literature\USGS-Hazard-Map\0p01 Degree WUS Basin Map Data.zip'
def _get_hazard_curve(site_class='B', ordinate='PGA'):
with ZipFile(main_zip_file_address, 'r') as USGS_zip_file:
lower_zip_name = fr'0p01 Degree WUS Basin Map Data/2018_nshm_{site_classes[site_class]["name"]}_vs30_{str(site_classes[site_class]["Vs30"])}_0p01_degree_seattle_basin_maps.zip'
lower_zip = BytesIO(USGS_zip_file.read(lower_zip_name))
with ZipFile(lower_zip) as lower_zip_file:
csv_address = fr'2018_nshm_{site_classes[site_class]["name"]}_vs30_{str(site_classes[site_class]["Vs30"])}_0p01_degree_seattle_basin_maps/{ordinate}/curves.csv'
with lower_zip_file.open(csv_address, 'r') as curve_file:
top_row = curve_file.readline().decode('utf-8').rstrip().split(',')[3:]
hazard_x = np.array([float(x) for x in top_row])
phantom_file = StringIO(curve_file.read().decode('utf-8'))
data = np.loadtxt(phantom_file, delimiter=',', usecols=tuple(range(1,23)))
lon = data[:,0]
lat = data[:,1]
hazard_y = data[:,2:]
del data
return (lat, lon, hazard_x, hazard_y)
def get_USGS_hazard_2018(lat, lon, site_class='B', return_period=2475):
x_vals = np.array([0.0,0.01,0.02,0.03,0.05,0.075,0.1,0.15,0.2,0.25,0.3,0.4,0.5,0.75,1.0,1.5,2.0,3.0,4.0,5.0,7.5,10.0])
y_vals = np.zeros(x_vals.shape)
for ii, x in enumerate(x_vals):
ordinate_text = ''
if x == 0:
ordinate_text = 'PGA'
else:
ordinate_text = 'SA' + str(x).replace('.','P')
lat_list, lon_list, hazard_x, hazard_y = _get_hazard_curve(site_class, ordinate_text)
loc_to_del = np.where(np.abs(lat_list - lat) > 0.02)
lat_list = np.delete(lat_list,loc_to_del)
lon_list = np.delete(lon_list,loc_to_del)
hazard_y = np.delete(hazard_y,loc_to_del, 0)
loc_to_del = np.where(np.abs(lon_list - lon) > 0.02)
lat_list = np.delete(lat_list,loc_to_del)
lon_list = np.delete(lon_list,loc_to_del)
hazard_y = np.delete(hazard_y,loc_to_del, 0)
cur_loc_hazard = np.zeros(hazard_x.shape)
for jj, _ in enumerate(hazard_x):
z = hazard_y[:,jj]
f = interp2d(lat_list, lon_list, z, kind='linear')
cur_loc_hazard[jj] = f(lat, lon)
y_vals[ii] = np.exp(interp1d(np.log(cur_loc_hazard), np.log(hazard_x), kind='linear')(np.log(1.0/return_period)))
return x_vals, y_vals
def main():
print('150 year return period:')
x_vals, y_vals = get_USGS_hazard_2018(lat=47.572260,lon = -122.347509, site_class='E', return_period=150)
for x, y in zip(x_vals, y_vals):
print(f'{x} {y}')
print('2500 year return period:')
x_vals, y_vals = get_USGS_hazard_2018(lat=47.572260,lon = -122.347509, site_class='E', return_period=2500)
for x, y in zip(x_vals, y_vals):
print(f'{x} {y}')
if __name__ == "__main__":
main()
|
[
"alborzgh@uw.edu"
] |
alborzgh@uw.edu
|
5038904bb1fd03747bf1c26a2daa2a87b5a5bcd8
|
fee88a67d4706bddb8999ce2701315c5f62f6e78
|
/onmt/modules/extensions/mlp/mlp_gelu.py
|
a1a1d998fa59cc0685f65e54bba7b2fe97e1aee0
|
[
"MIT"
] |
permissive
|
Dan-hbd/NMTGMinor
|
5cade7d3b6de83cc45a618ab59420274bcd86f15
|
84e59ac8391ee78852d7c71afc60c3c8b8e3d44d
|
refs/heads/master
| 2023-05-30T16:22:58.148920
| 2021-06-15T14:28:48
| 2021-06-15T14:28:48
| 372,408,488
| 0
| 0
|
NOASSERTION
| 2021-05-31T06:44:22
| 2021-05-31T06:44:22
| null |
UTF-8
|
Python
| false
| false
| 15,164
|
py
|
from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
import silu_cuda
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from ..optimized.compat import half_function
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
try:
import fused_mlp_agelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_agelu = None
try:
import fused_mlp_gelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu = None
#
# class MlpReluFunction(torch.autograd.Function):
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx, activation, *args):
# output = fused_mlp.forward(args)
# ctx.save_for_backward(*args)
# ctx.outputs = output
# return output[0]
#
# @staticmethod
# @custom_bwd
# def backward(ctx, grad_o):
# grads = fused_mlp.backward(grad_o, ctx.outputs, ctx.saved_tensors)
# del ctx.outputs
# return (None, *grads)
#
#
class MlpReluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
output = fused_mlp_relu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_relu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpSiluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_silu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_silu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpAGeLUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_agelu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_agelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpGeLUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_gelu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_gelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
if fused_mlp_agelu:
mlp_agelu_function = half_function(MlpAGeLUFunction.apply)
else:
mlp_agelu_function = None
if fused_mlp_gelu:
mlp_gelu_function = half_function(MlpGeLUFunction.apply)
else:
mlp_gelu_function = None
class SwishFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, inp):
ctx.save_for_backward(inp)
return silu_cuda.forward(inp)
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
inp, = ctx.saved_tensors
if not ctx.needs_input_grad[0]: return (None,)
return silu_cuda.backward(inp, grad_out)
def fast_silu(input):
return SwishFunction.apply(input)
class FastSiLU(torch.nn.Module):
def forward(self, input):
return fast_silu(input)
class AGELUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, x):
ctx.save_for_backward(x)
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3))))
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
x, = ctx.saved_tensors
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
BACKCOEFF = 0.1070322243
tanh_outf = torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3)))
retf = 0.5 * x * (1.0 - torch.pow(tanh_outf, 2)) * (SQRT_M2_PI + BACKCOEFF * torch.pow(x, 2)) + 0.5 * (
1.0 + tanh_outf)
return grad_out * retf
class AGELU(torch.nn.Module):
def forward(self, input):
return AGELUFunction.apply(input)
def agelu(x):
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
BACKCOEFF = SQRT_M2_PI * COEFF * 3
return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3))))
def agelu_backward(x, dy):
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
BACKCOEFF = 0.1070322243
tanh_outf = torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3)))
retf = 0.5 * x * (1.0 - torch.pow(tanh_outf, 2)) * (SQRT_M2_PI + BACKCOEFF * torch.pow(x, 2)) + 0.5 * (
1.0 + tanh_outf)
return dy * retf
if __name__ == '__main__':
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def __init__(self, mlp_sizes, activation='gelu', dropout=0.25):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
if activation is 'relu':
self.activation = 1
elif activation is 'sigmoid':
self.activation = 2
elif activation is 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., std)
def forward(self, input, mask=None, ref=False):
if ref:
return self.forward_ref(input, mask)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_agelu_function(self.dropout, input, *self.weights, *self.biases)
return mlp_gelu_function(self.dropout, input, *self.weights, *self.biases)
def forward_ref(self, input, mask):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
# print(mask.size())
# output = fast_silu(output) * dropout_mask.view(output.size(0), -1) * pinv
# output = GELUFunction.apply(output) * dropout_mask.view(output.size(0), -1) * pinv
output = F.gelu(output) * dropout_mask.view(output.size(0), -1) * pinv
i += output.numel()
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}"
return s
batch_size = 24568
mlp_sizes = [1024, 4096, 1024]
# mlp_sizes = [4, 7, 4]
num_iters = 10
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
mlp = MLP(mlp_sizes, activation='relu').cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(1):
bsz = random.randint(2850, batch_size // 8) * 8
test_input = torch.empty(bsz, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp.forward(ref_input, dropout_mask, ref=True)
print(dropout_mask.sum() / dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[0].grad.detach().cpu().numpy(),
ref_mlp.biases[0].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_with_bias(self):
for use_activation in ['relu']:
mlp = MLP(mlp_sizes, activation=use_activation).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
for l in range(mlp.num_layers):
np.testing.assert_allclose(
mlp.weights[l].grad.detach().cpu().numpy(),
ref_mlp.weights[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[l].grad.detach().cpu().numpy(),
ref_mlp.biases[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
ref_input = test_input.clone().detach()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
mlp = MLP(mlp_sizes).cuda().half()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
# mlp_layers.append(nn.ReLU(inplace=True))
mlp_layers.append(torch.nn.GELU())
mlp_layers.append(nn.Dropout(0.25))
ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
test_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(100):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
unittest.main()
# test = TestMLP()
# test.test_creation()
# test.test_performance_half()
# test.test_with_bias()
|
[
"quanpn90@gmail.com"
] |
quanpn90@gmail.com
|
6dd47cf9abf6588f76b33b1300c80b06fe34f86b
|
304e75224229786ba64c6ef2124007c305019b23
|
/src/easy/test_build_array_from_permutation.py
|
8fd8efbd03f279c3c5d2f1ed987d934e5687eadc
|
[] |
no_license
|
Takuma-Ikeda/other-LeetCode
|
9179a8100e07d56138fd3f3f626951195e285da2
|
499616d07011bee730b9967e9861e341e62d606d
|
refs/heads/master
| 2023-04-14T06:09:35.341039
| 2023-04-10T02:29:18
| 2023-04-10T02:29:18
| 226,260,312
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
import unittest
from answer.build_array_from_permutation import Solution
class TestSolution(unittest.TestCase):
def setUp(self):
self.nums = [
[0, 2, 1, 5, 3, 4],
[5, 0, 1, 2, 3, 4],
]
self.answers = [
[0, 1, 2, 4, 5, 3],
[4, 5, 0, 1, 2, 3],
]
def solution(self, i):
s = Solution()
result = s.buildArray(self.nums[i])
self.assertEqual(self.answers[i], result)
def test_solution0(self):
self.solution(0)
def test_solution1(self):
self.solution(1)
if __name__ == "__main__":
unittest.main()
|
[
"el.programdear@gmail.com"
] |
el.programdear@gmail.com
|
e65ee592dfffea41e8966e5f2acf9d9b8c7f9a31
|
ee3ba2af93581aaca5a1393f3eb22fa794be2a12
|
/app/main/form.py
|
fa863b1c847180707fef5db9f10b6ed3d97e222f
|
[] |
no_license
|
wbchief/myflask
|
303ed98c969c58a07953aa37c28f90ace3b9a284
|
a4d82bc80df84cb7e418058de3519c29e29db7f1
|
refs/heads/master
| 2020-03-30T23:48:10.771252
| 2018-10-14T09:56:17
| 2018-10-14T09:56:17
| 151,713,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,266
|
py
|
from flask_pagedown.fields import PageDownField
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, BooleanField, SelectField
from wtforms.validators import Length, DataRequired, Email, ValidationError
from app.models import User, Role
class EditProfileForm(FlaskForm):
'''
编辑资料表单
'''
name = StringField('真实姓名', validators=[Length(0, 64)])
location = StringField('地址', validators=[Length(0, 64)])
about_me = TextAreaField('自我介绍')
submit = SubmitField('确认')
class EditProfileAdminForm(FlaskForm):
'''
管理员编辑资料表单
'''
email = StringField('邮箱',validators=[DataRequired(), Length(1, 64), Email()])
username = StringField('用户名', validators=[DataRequired(), Length(1, 64)])
confirmed = BooleanField('是否确认')
role = SelectField('Role', coerce=int)
name = StringField('真实姓名', validators=[Length(0, 64)])
location = StringField('地址', validators=[Length(0, 64)])
about_me = TextAreaField('自我介绍')
submit = SubmitField('确认')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
# selectField 必须在choice属性中设置各选项
self.role.choices = [(role.id, role.name) for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
'''
验证邮箱
:param field: 邮箱
:return:
'''
if field.data != self.user.email and User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱已经被注册')
def validate_username(self, field):
'''
验证用户名
:param field: 用户名
:return:
'''
if field.data != self.user.username and User.query.filter_by(username=field.data).first():
raise ValidationError('用户名已经存在')
class PostForm(FlaskForm):
body = PageDownField('想写点什么', validators=[DataRequired()])
submit = SubmitField('提交')
class CommentForm(FlaskForm):
body = StringField('', validators=[DataRequired()])
submit = SubmitField('提交')
|
[
"712640388@qq.com"
] |
712640388@qq.com
|
6e55abddbe446bbbe2e2f07ae0edd692a27197ed
|
b3ac12dfbb8fa74500b406a0907337011d4aac72
|
/goldcoin/full_node/weight_proof.py
|
c12b097a836dbee13ac9816cccf3f9361015586b
|
[
"Apache-2.0"
] |
permissive
|
chia-os/goldcoin-blockchain
|
ab62add5396b7734c11d3c37c41776994489d5e7
|
5c294688dbbe995ae1d4422803f6fcf3e1cc6077
|
refs/heads/main
| 2023-08-11T23:58:53.617051
| 2021-09-12T15:33:26
| 2021-09-12T15:33:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67,454
|
py
|
import asyncio
import dataclasses
import logging
import math
import random
from concurrent.futures.process import ProcessPoolExecutor
from typing import Dict, List, Optional, Tuple
from goldcoin.consensus.block_header_validation import validate_finished_header_block
from goldcoin.consensus.block_record import BlockRecord
from goldcoin.consensus.blockchain_interface import BlockchainInterface
from goldcoin.consensus.constants import ConsensusConstants
from goldcoin.consensus.deficit import calculate_deficit
from goldcoin.consensus.full_block_to_block_record import header_block_to_sub_block_record
from goldcoin.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_iters,
is_overflow_block,
)
from goldcoin.consensus.vdf_info_computation import get_signage_point_vdf_info
from goldcoin.types.blockchain_format.classgroup import ClassgroupElement
from goldcoin.types.blockchain_format.sized_bytes import bytes32
from goldcoin.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot
from goldcoin.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from goldcoin.types.blockchain_format.vdf import VDFInfo
from goldcoin.types.end_of_slot_bundle import EndOfSubSlotBundle
from goldcoin.types.header_block import HeaderBlock
from goldcoin.types.weight_proof import (
SubEpochChallengeSegment,
SubEpochData,
SubSlotData,
WeightProof,
SubEpochSegments,
RecentChainData,
)
from goldcoin.util.block_cache import BlockCache
from goldcoin.util.hash import std_hash
from goldcoin.util.ints import uint8, uint32, uint64, uint128
from goldcoin.util.streamable import dataclass_from_dict, recurse_jsonify
log = logging.getLogger(__name__)
class WeightProofHandler:
LAMBDA_L = 100
C = 0.5
MAX_SAMPLES = 20
def __init__(
self,
constants: ConsensusConstants,
blockchain: BlockchainInterface,
):
self.tip: Optional[bytes32] = None
self.proof: Optional[WeightProof] = None
self.constants = constants
self.blockchain = blockchain
self.lock = asyncio.Lock()
async def get_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("unknown tip")
return None
if tip_rec.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
log.debug("chain to short for weight proof")
return None
async with self.lock:
if self.proof is not None:
if self.proof.recent_chain_data[-1].header_hash == tip:
return self.proof
wp = await self._create_proof_of_weight(tip)
if wp is None:
return None
self.proof = wp
self.tip = tip
return wp
def get_sub_epoch_data(self, tip_height: uint32, summary_heights: List[uint32]) -> List[SubEpochData]:
sub_epoch_data: List[SubEpochData] = []
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_height:
break
ses = self.blockchain.get_ses(ses_height)
log.debug(f"handle sub epoch summary {sub_epoch_n} at height: {ses_height} ses {ses}")
sub_epoch_data.append(_create_sub_epoch_data(ses))
return sub_epoch_data
async def _create_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
"""
Creates a weight proof object
"""
assert self.blockchain is not None
sub_epoch_segments: List[SubEpochChallengeSegment] = []
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("failed not tip in cache")
return None
log.info(f"create weight proof peak {tip} {tip_rec.height}")
recent_chain = await self._get_recent_chain(tip_rec.height)
if recent_chain is None:
return None
summary_heights = self.blockchain.get_ses_heights()
prev_ses_block = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(uint32(0)))
if prev_ses_block is None:
return None
sub_epoch_data = self.get_sub_epoch_data(tip_rec.height, summary_heights)
# use second to last ses as seed
seed = self.get_seed_for_proof(summary_heights, tip_rec.height)
rng = random.Random(seed)
weight_to_check = _get_weights_for_sampling(rng, tip_rec.weight, recent_chain)
sample_n = 0
ses_blocks = await self.blockchain.get_block_records_at(summary_heights)
if ses_blocks is None:
return None
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_rec.height:
break
# if we have enough sub_epoch samples, dont sample
if sample_n >= self.MAX_SAMPLES:
log.debug("reached sampled sub epoch cap")
break
# sample sub epoch
# next sub block
ses_block = ses_blocks[sub_epoch_n]
if ses_block is None or ses_block.sub_epoch_summary_included is None:
log.error("error while building proof")
return None
if _sample_sub_epoch(prev_ses_block.weight, ses_block.weight, weight_to_check): # type: ignore
sample_n += 1
segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash)
if segments is None:
segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n))
if segments is None:
log.error(
f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} "
)
return None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments)
log.debug(f"sub epoch {sub_epoch_n} has {len(segments)} segments")
sub_epoch_segments.extend(segments)
prev_ses_block = ses_block
log.debug(f"sub_epochs: {len(sub_epoch_data)}")
return WeightProof(sub_epoch_data, sub_epoch_segments, recent_chain)
def get_seed_for_proof(self, summary_heights: List[uint32], tip_height) -> bytes32:
count = 0
ses = None
for sub_epoch_n, ses_height in enumerate(reversed(summary_heights)):
if ses_height <= tip_height:
count += 1
if count == 2:
ses = self.blockchain.get_ses(ses_height)
break
assert ses is not None
seed = ses.get_hash()
return seed
async def _get_recent_chain(self, tip_height: uint32) -> Optional[List[HeaderBlock]]:
recent_chain: List[HeaderBlock] = []
ses_heights = self.blockchain.get_ses_heights()
min_height = 0
count_ses = 0
for ses_height in reversed(ses_heights):
if ses_height <= tip_height:
count_ses += 1
if count_ses == 2:
min_height = ses_height - 1
break
log.debug(f"start {min_height} end {tip_height}")
headers = await self.blockchain.get_header_blocks_in_range(min_height, tip_height, tx_filter=False)
blocks = await self.blockchain.get_block_records_in_range(min_height, tip_height)
ses_count = 0
curr_height = tip_height
blocks_n = 0
while ses_count < 2:
if curr_height == 0:
break
# add to needed reward chain recent blocks
header_block = headers[self.blockchain.height_to_hash(curr_height)]
block_rec = blocks[header_block.header_hash]
if header_block is None:
log.error("creating recent chain failed")
return None
recent_chain.insert(0, header_block)
if block_rec.sub_epoch_summary_included:
ses_count += 1
curr_height = uint32(curr_height - 1) # type: ignore
blocks_n += 1
header_block = headers[self.blockchain.height_to_hash(curr_height)]
recent_chain.insert(0, header_block)
log.info(
f"recent chain, "
f"start: {recent_chain[0].reward_chain_block.height} "
f"end: {recent_chain[-1].reward_chain_block.height} "
)
return recent_chain
async def create_prev_sub_epoch_segments(self):
log.debug("create prev sub_epoch_segments")
heights = self.blockchain.get_ses_heights()
if len(heights) < 3:
return None
count = len(heights) - 2
ses_sub_block = self.blockchain.height_to_block_record(heights[-2])
prev_ses_sub_block = self.blockchain.height_to_block_record(heights[-3])
assert prev_ses_sub_block.sub_epoch_summary_included is not None
segments = await self.__create_sub_epoch_segments(ses_sub_block, prev_ses_sub_block, uint32(count))
assert segments is not None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_sub_block.header_hash, segments)
log.debug("sub_epoch_segments done")
return None
async def create_sub_epoch_segments(self):
log.debug("check segments in db")
"""
Creates a weight proof object
"""
assert self.blockchain is not None
peak_height = self.blockchain.get_peak_height()
if peak_height is None:
log.error("no peak yet")
return None
summary_heights = self.blockchain.get_ses_heights()
prev_ses_block = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(uint32(0)))
if prev_ses_block is None:
return None
ses_blocks = await self.blockchain.get_block_records_at(summary_heights)
if ses_blocks is None:
return None
for sub_epoch_n, ses_height in enumerate(summary_heights):
log.debug(f"check db for sub epoch {sub_epoch_n}")
if ses_height > peak_height:
break
ses_block = ses_blocks[sub_epoch_n]
if ses_block is None or ses_block.sub_epoch_summary_included is None:
log.error("error while building proof")
return None
await self.__create_persist_segment(prev_ses_block, ses_block, ses_height, sub_epoch_n)
prev_ses_block = ses_block
await asyncio.sleep(2)
log.debug("done checking segments")
return None
async def __create_persist_segment(self, prev_ses_block, ses_block, ses_height, sub_epoch_n):
segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash)
if segments is None:
segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n))
if segments is None:
log.error(f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} ")
return None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments)
async def __create_sub_epoch_segments(
self, ses_block: BlockRecord, se_start: BlockRecord, sub_epoch_n: uint32
) -> Optional[List[SubEpochChallengeSegment]]:
segments: List[SubEpochChallengeSegment] = []
start_height = await self.get_prev_two_slots_height(se_start)
blocks = await self.blockchain.get_block_records_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS
)
header_blocks = await self.blockchain.get_header_blocks_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS, tx_filter=False
)
curr: Optional[HeaderBlock] = header_blocks[se_start.header_hash]
height = se_start.height
assert curr is not None
first = True
idx = 0
while curr.height < ses_block.height:
if blocks[curr.header_hash].is_challenge_block(self.constants):
log.debug(f"challenge segment {idx}, starts at {curr.height} ")
seg, height = await self._create_challenge_segment(curr, sub_epoch_n, header_blocks, blocks, first)
if seg is None:
log.error(f"failed creating segment {curr.header_hash} ")
return None
segments.append(seg)
idx += 1
first = False
else:
height = height + uint32(1) # type: ignore
curr = header_blocks[self.blockchain.height_to_hash(height)]
if curr is None:
return None
log.debug(f"next sub epoch starts at {height}")
return segments
async def get_prev_two_slots_height(self, se_start: BlockRecord) -> uint32:
# find prev 2 slots height
slot = 0
batch_size = 50
curr_rec = se_start
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
while slot < 2 and curr_rec.height > 0:
if curr_rec.first_in_sub_slot:
slot += 1
if end - curr_rec.height == batch_size - 1:
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
curr_rec = blocks[self.blockchain.height_to_hash(uint32(curr_rec.height - 1))]
return curr_rec.height
async def _create_challenge_segment(
self,
header_block: HeaderBlock,
sub_epoch_n: uint32,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_segment_in_sub_epoch: bool,
) -> Tuple[Optional[SubEpochChallengeSegment], uint32]:
assert self.blockchain is not None
sub_slots: List[SubSlotData] = []
log.debug(f"create challenge segment block {header_block.header_hash} block height {header_block.height} ")
# VDFs from sub slots before challenge block
first_sub_slots, first_rc_end_of_slot_vdf = await self.__first_sub_slot_vdfs(
header_block, header_blocks, blocks, first_segment_in_sub_epoch
)
if first_sub_slots is None:
log.error("failed building first sub slots")
return None, uint32(0)
sub_slots.extend(first_sub_slots)
ssd = await _challenge_block_vdfs(
self.constants,
header_block,
blocks[header_block.header_hash],
blocks,
)
sub_slots.append(ssd)
# # VDFs from slot after challenge block to end of slot
log.debug(f"create slot end vdf for block {header_block.header_hash} height {header_block.height} ")
challenge_slot_end_sub_slots, end_height = await self.__slot_end_vdf(
uint32(header_block.height + 1), header_blocks, blocks
)
if challenge_slot_end_sub_slots is None:
log.error("failed building slot end ")
return None, uint32(0)
sub_slots.extend(challenge_slot_end_sub_slots)
if first_segment_in_sub_epoch and sub_epoch_n != 0:
return (
SubEpochChallengeSegment(sub_epoch_n, sub_slots, first_rc_end_of_slot_vdf),
end_height,
)
return SubEpochChallengeSegment(sub_epoch_n, sub_slots, None), end_height
# returns a challenge chain vdf from slot start to signage point
async def __first_sub_slot_vdfs(
self,
header_block: HeaderBlock,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_in_sub_epoch: bool,
) -> Tuple[Optional[List[SubSlotData]], Optional[VDFInfo]]:
# combine cc vdfs of all reward blocks from the start of the sub slot to end
header_block_sub_rec = blocks[header_block.header_hash]
# find slot start
curr_sub_rec = header_block_sub_rec
first_rc_end_of_slot_vdf = None
if first_in_sub_epoch and curr_sub_rec.height > 0:
while not curr_sub_rec.sub_epoch_summary_included:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
first_rc_end_of_slot_vdf = self.first_rc_end_of_slot_vdf(header_block, blocks, header_blocks)
else:
if header_block_sub_rec.overflow and header_block_sub_rec.first_in_sub_slot:
sub_slots_num = 2
while sub_slots_num > 0 and curr_sub_rec.height > 0:
if curr_sub_rec.first_in_sub_slot:
assert curr_sub_rec.finished_challenge_slot_hashes is not None
sub_slots_num -= len(curr_sub_rec.finished_challenge_slot_hashes)
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
else:
while not curr_sub_rec.first_in_sub_slot and curr_sub_rec.height > 0:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
curr = header_blocks[curr_sub_rec.header_hash]
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
while curr.height < header_block.height:
if curr is None:
log.error("failed fetching block")
return None, None
if curr.first_in_sub_slot:
# if not blue boxed
if not blue_boxed_end_of_slot(curr.finished_sub_slots[0]):
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(curr.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
tmp_sub_slots_data = []
ssd = SubSlotData(
None,
None,
None,
None,
None,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
curr.reward_chain_block.infused_challenge_chain_ip_vdf,
curr.total_iters,
)
tmp_sub_slots_data.append(ssd)
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(header_block.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
return sub_slots_data, first_rc_end_of_slot_vdf
def first_rc_end_of_slot_vdf(
self,
header_block,
blocks: Dict[bytes32, BlockRecord],
header_blocks: Dict[bytes32, HeaderBlock],
) -> Optional[VDFInfo]:
curr = blocks[header_block.header_hash]
while curr.height > 0 and not curr.sub_epoch_summary_included:
curr = blocks[curr.prev_hash]
return header_blocks[curr.header_hash].finished_sub_slots[-1].reward_chain.end_of_slot_vdf
async def __slot_end_vdf(
self, start_height: uint32, header_blocks: Dict[bytes32, HeaderBlock], blocks: Dict[bytes32, BlockRecord]
) -> Tuple[Optional[List[SubSlotData]], uint32]:
# gets all vdfs first sub slot after challenge block to last sub slot
log.debug(f"slot end vdf start height {start_height}")
curr = header_blocks[self.blockchain.height_to_hash(start_height)]
curr_header_hash = curr.header_hash
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
while not blocks[curr_header_hash].is_challenge_block(self.constants):
if curr.first_in_sub_slot:
sub_slots_data.extend(tmp_sub_slots_data)
curr_prev_header_hash = curr.prev_header_hash
# add collected vdfs
for idx, sub_slot in enumerate(curr.finished_sub_slots):
prev_rec = blocks[curr_prev_header_hash]
eos_vdf_iters = prev_rec.sub_slot_iters
if idx == 0:
eos_vdf_iters = uint64(prev_rec.sub_slot_iters - prev_rec.ip_iters(self.constants))
sub_slots_data.append(handle_end_of_slot(sub_slot, eos_vdf_iters))
tmp_sub_slots_data = []
tmp_sub_slots_data.append(self.handle_block_vdfs(curr, blocks))
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
curr_header_hash = curr.header_hash
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
log.debug(f"slot end vdf end height {curr.height} slots {len(sub_slots_data)} ")
return sub_slots_data, curr.height
def handle_block_vdfs(self, curr: HeaderBlock, blocks: Dict[bytes32, BlockRecord]):
cc_sp_proof = None
icc_ip_proof = None
cc_sp_info = None
icc_ip_info = None
block_record = blocks[curr.header_hash]
if curr.infused_challenge_chain_ip_proof is not None:
assert curr.reward_chain_block.infused_challenge_chain_ip_vdf
icc_ip_proof = curr.infused_challenge_chain_ip_proof
icc_ip_info = curr.reward_chain_block.infused_challenge_chain_ip_vdf
if curr.challenge_chain_sp_proof is not None:
assert curr.reward_chain_block.challenge_chain_sp_vdf
cc_sp_vdf_info = curr.reward_chain_block.challenge_chain_sp_vdf
if not curr.challenge_chain_sp_proof.normalized_to_identity:
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
self.constants,
curr.finished_sub_slots,
block_record.overflow,
None if curr.height == 0 else blocks[curr.prev_header_hash],
BlockCache(blocks),
block_record.sp_total_iters(self.constants),
block_record.sp_iters(self.constants),
)
cc_sp_vdf_info = VDFInfo(
curr.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
curr.reward_chain_block.challenge_chain_sp_vdf.output,
)
cc_sp_proof = curr.challenge_chain_sp_proof
cc_sp_info = cc_sp_vdf_info
return SubSlotData(
None,
cc_sp_proof,
curr.challenge_chain_ip_proof,
icc_ip_proof,
cc_sp_info,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
icc_ip_info,
curr.total_iters,
)
def validate_weight_proof_single_proc(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed sub epoch data validation")
return False, uint32(0)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
log.info("validate sub epoch challenge segments")
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0)
if not _validate_sub_epoch_segments(constants, rng, wp_segment_bytes, summary_bytes):
return False, uint32(0)
log.info("validate weight proof recent blocks")
if not _validate_recent_blocks(constants, wp_recent_chain_bytes, summary_bytes):
return False, uint32(0)
return True, self.get_fork_point(summaries)
def get_fork_point_no_validations(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
log.debug("get fork point skip validations")
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed to validate sub epoch summaries")
return False, uint32(0)
return True, self.get_fork_point(summaries)
async def validate_weight_proof(self, weight_proof: WeightProof) -> Tuple[bool, uint32, List[SubEpochSummary]]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0), []
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.error("weight proof failed sub epoch data validation")
return False, uint32(0), []
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0), []
executor = ProcessPoolExecutor(1)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
segment_validation_task = asyncio.get_running_loop().run_in_executor(
executor, _validate_sub_epoch_segments, constants, rng, wp_segment_bytes, summary_bytes
)
recent_blocks_validation_task = asyncio.get_running_loop().run_in_executor(
executor, _validate_recent_blocks, constants, wp_recent_chain_bytes, summary_bytes
)
valid_segment_task = segment_validation_task
valid_recent_blocks_task = recent_blocks_validation_task
valid_recent_blocks = await valid_recent_blocks_task
if not valid_recent_blocks:
log.error("failed validating weight proof recent blocks")
return False, uint32(0), []
valid_segments = await valid_segment_task
if not valid_segments:
log.error("failed validating weight proof sub epoch segments")
return False, uint32(0), []
return True, self.get_fork_point(summaries), summaries
def get_fork_point(self, received_summaries: List[SubEpochSummary]) -> uint32:
# iterate through sub epoch summaries to find fork point
fork_point_index = 0
ses_heights = self.blockchain.get_ses_heights()
for idx, summary_height in enumerate(ses_heights):
log.debug(f"check summary {idx} height {summary_height}")
local_ses = self.blockchain.get_ses(summary_height)
if idx == len(received_summaries) - 1:
# end of wp summaries, local chain is longer or equal to wp chain
break
if local_ses is None or local_ses.get_hash() != received_summaries[idx].get_hash():
break
fork_point_index = idx
if fork_point_index > 2:
# Two summeries can have different blocks and still be identical
# This gets resolved after one full sub epoch
height = ses_heights[fork_point_index - 2]
else:
height = uint32(0)
return height
def _get_weights_for_sampling(
rng: random.Random, total_weight: uint128, recent_chain: List[HeaderBlock]
) -> Optional[List[uint128]]:
weight_to_check = []
last_l_weight = recent_chain[-1].reward_chain_block.weight - recent_chain[0].reward_chain_block.weight
delta = last_l_weight / total_weight
prob_of_adv_succeeding = 1 - math.log(WeightProofHandler.C, delta)
if prob_of_adv_succeeding <= 0:
return None
queries = -WeightProofHandler.LAMBDA_L * math.log(2, prob_of_adv_succeeding)
for i in range(int(queries) + 1):
u = rng.random()
q = 1 - delta ** u
# todo check division and type conversions
weight = q * float(total_weight)
weight_to_check.append(uint128(int(weight)))
weight_to_check.sort()
return weight_to_check
def _sample_sub_epoch(
start_of_epoch_weight: uint128,
end_of_epoch_weight: uint128,
weight_to_check: List[uint128],
) -> bool:
"""
weight_to_check: List[uint128] is expected to be sorted
"""
if weight_to_check is None:
return True
if weight_to_check[-1] < start_of_epoch_weight:
return False
if weight_to_check[0] > end_of_epoch_weight:
return False
choose = False
for weight in weight_to_check:
if weight > end_of_epoch_weight:
return False
if start_of_epoch_weight < weight < end_of_epoch_weight:
log.debug(f"start weight: {start_of_epoch_weight}")
log.debug(f"weight to check {weight}")
log.debug(f"end weight: {end_of_epoch_weight}")
choose = True
break
return choose
# wp creation methods
def _create_sub_epoch_data(
sub_epoch_summary: SubEpochSummary,
) -> SubEpochData:
reward_chain_hash: bytes32 = sub_epoch_summary.reward_chain_hash
# Number of subblocks overflow in previous slot
previous_sub_epoch_overflows: uint8 = sub_epoch_summary.num_blocks_overflow # total in sub epoch - expected
# New work difficulty and iterations per sub-slot
sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
return SubEpochData(reward_chain_hash, previous_sub_epoch_overflows, sub_slot_iters, new_difficulty)
async def _challenge_block_vdfs(
constants: ConsensusConstants,
header_block: HeaderBlock,
block_rec: BlockRecord,
sub_blocks: Dict[bytes32, BlockRecord],
):
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
constants,
header_block.finished_sub_slots,
block_rec.overflow,
None if header_block.height == 0 else sub_blocks[header_block.prev_header_hash],
BlockCache(sub_blocks),
block_rec.sp_total_iters(constants),
block_rec.sp_iters(constants),
)
cc_sp_info = None
if header_block.reward_chain_block.challenge_chain_sp_vdf:
cc_sp_info = header_block.reward_chain_block.challenge_chain_sp_vdf
assert header_block.challenge_chain_sp_proof
if not header_block.challenge_chain_sp_proof.normalized_to_identity:
cc_sp_info = VDFInfo(
header_block.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
)
ssd = SubSlotData(
header_block.reward_chain_block.proof_of_space,
header_block.challenge_chain_sp_proof,
header_block.challenge_chain_ip_proof,
None,
cc_sp_info,
header_block.reward_chain_block.signage_point_index,
None,
None,
None,
None,
header_block.reward_chain_block.challenge_chain_ip_vdf,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf,
block_rec.total_iters,
)
return ssd
def handle_finished_slots(end_of_slot: EndOfSubSlotBundle, icc_end_of_slot_info):
return SubSlotData(
None,
None,
None,
None,
None,
None,
None
if end_of_slot.proofs.challenge_chain_slot_proof is None
else end_of_slot.proofs.challenge_chain_slot_proof,
None
if end_of_slot.proofs.infused_challenge_chain_slot_proof is None
else end_of_slot.proofs.infused_challenge_chain_slot_proof,
end_of_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
icc_end_of_slot_info,
None,
None,
None,
)
def handle_end_of_slot(
sub_slot: EndOfSubSlotBundle,
eos_vdf_iters: uint64,
):
assert sub_slot.infused_challenge_chain
assert sub_slot.proofs.infused_challenge_chain_slot_proof
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
else:
icc_info = VDFInfo(
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
cc_info = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf
else:
cc_info = VDFInfo(
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
return SubSlotData(
None,
None,
None,
None,
None,
None,
sub_slot.proofs.challenge_chain_slot_proof,
sub_slot.proofs.infused_challenge_chain_slot_proof,
cc_info,
icc_info,
None,
None,
None,
)
def compress_segments(full_segment_index, segments: List[SubEpochChallengeSegment]) -> List[SubEpochChallengeSegment]:
compressed_segments = []
compressed_segments.append(segments[0])
for idx, segment in enumerate(segments[1:]):
if idx != full_segment_index:
# remove all redundant values
segment = compress_segment(segment)
compressed_segments.append(segment)
return compressed_segments
def compress_segment(segment: SubEpochChallengeSegment) -> SubEpochChallengeSegment:
# find challenge slot
comp_seg = SubEpochChallengeSegment(segment.sub_epoch_n, [], segment.rc_slot_end_info)
for slot in segment.sub_slots:
comp_seg.sub_slots.append(slot)
if slot.is_challenge():
break
return segment
# wp validation methods
def _validate_sub_epoch_summaries(
constants: ConsensusConstants,
weight_proof: WeightProof,
) -> Tuple[Optional[List[SubEpochSummary]], Optional[List[uint128]]]:
last_ses_hash, last_ses_sub_height = _get_last_ses_hash(constants, weight_proof.recent_chain_data)
if last_ses_hash is None:
log.warning("could not find last ses block")
return None, None
summaries, total, sub_epoch_weight_list = _map_sub_epoch_summaries(
constants.SUB_EPOCH_BLOCKS,
constants.GENESIS_CHALLENGE,
weight_proof.sub_epochs,
constants.DIFFICULTY_STARTING,
)
log.info(f"validating {len(summaries)} sub epochs")
# validate weight
if not _validate_summaries_weight(constants, total, summaries, weight_proof):
log.error("failed validating weight")
return None, None
last_ses = summaries[-1]
log.debug(f"last ses sub height {last_ses_sub_height}")
# validate last ses_hash
if last_ses.get_hash() != last_ses_hash:
log.error(f"failed to validate ses hashes block height {last_ses_sub_height}")
return None, None
return summaries, sub_epoch_weight_list
def _map_sub_epoch_summaries(
sub_blocks_for_se: uint32,
ses_hash: bytes32,
sub_epoch_data: List[SubEpochData],
curr_difficulty: uint64,
) -> Tuple[List[SubEpochSummary], uint128, List[uint128]]:
total_weight: uint128 = uint128(0)
summaries: List[SubEpochSummary] = []
sub_epoch_weight_list: List[uint128] = []
for idx, data in enumerate(sub_epoch_data):
ses = SubEpochSummary(
ses_hash,
data.reward_chain_hash,
data.num_blocks_overflow,
data.new_difficulty,
data.new_sub_slot_iters,
)
if idx < len(sub_epoch_data) - 1:
delta = 0
if idx > 0:
delta = sub_epoch_data[idx].num_blocks_overflow
log.debug(f"sub epoch {idx} start weight is {total_weight+curr_difficulty} ")
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
total_weight = total_weight + uint128( # type: ignore
curr_difficulty * (sub_blocks_for_se + sub_epoch_data[idx + 1].num_blocks_overflow - delta)
)
# if new epoch update diff and iters
if data.new_difficulty is not None:
curr_difficulty = data.new_difficulty
# add to dict
summaries.append(ses)
ses_hash = std_hash(ses)
# add last sub epoch weight
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
return summaries, total_weight, sub_epoch_weight_list
def _validate_summaries_weight(constants: ConsensusConstants, sub_epoch_data_weight, summaries, weight_proof) -> bool:
num_over = summaries[-1].num_blocks_overflow
ses_end_height = (len(summaries) - 1) * constants.SUB_EPOCH_BLOCKS + num_over - 1
curr = None
for block in weight_proof.recent_chain_data:
if block.reward_chain_block.height == ses_end_height:
curr = block
if curr is None:
return False
return curr.reward_chain_block.weight == sub_epoch_data_weight
def _validate_sub_epoch_segments(
constants_dict: Dict,
rng: random.Random,
weight_proof_bytes: bytes,
summaries_bytes: List[bytes],
):
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
sub_epoch_segments: SubEpochSegments = SubEpochSegments.from_bytes(weight_proof_bytes)
rc_sub_slot_hash = constants.GENESIS_CHALLENGE
total_blocks, total_ip_iters = 0, 0
total_slot_iters, total_slots = 0, 0
total_ip_iters = 0
prev_ses: Optional[SubEpochSummary] = None
segments_by_sub_epoch = map_segments_by_sub_epoch(sub_epoch_segments.challenge_segments)
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
for sub_epoch_n, segments in segments_by_sub_epoch.items():
prev_ssi = curr_ssi
curr_difficulty, curr_ssi = _get_curr_diff_ssi(constants, sub_epoch_n, summaries)
log.debug(f"validate sub epoch {sub_epoch_n}")
# recreate RewardChainSubSlot for next ses rc_hash
sampled_seg_index = rng.choice(range(len(segments)))
if sub_epoch_n > 0:
rc_sub_slot = __get_rc_sub_slot(constants, segments[0], summaries, curr_ssi)
prev_ses = summaries[sub_epoch_n - 1]
rc_sub_slot_hash = rc_sub_slot.get_hash()
if not summaries[sub_epoch_n].reward_chain_hash == rc_sub_slot_hash:
log.error(f"failed reward_chain_hash validation sub_epoch {sub_epoch_n}")
return False
for idx, segment in enumerate(segments):
valid_segment, ip_iters, slot_iters, slots = _validate_segment(
constants, segment, curr_ssi, prev_ssi, curr_difficulty, prev_ses, idx == 0, sampled_seg_index == idx
)
if not valid_segment:
log.error(f"failed to validate sub_epoch {segment.sub_epoch_n} segment {idx} slots")
return False
prev_ses = None
total_blocks += 1
total_slot_iters += slot_iters
total_slots += slots
total_ip_iters += ip_iters
return True
def _validate_segment(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
curr_ssi: uint64,
prev_ssi: uint64,
curr_difficulty: uint64,
ses: Optional[SubEpochSummary],
first_segment_in_se: bool,
sampled: bool,
) -> Tuple[bool, int, int, int]:
ip_iters, slot_iters, slots = 0, 0, 0
after_challenge = False
for idx, sub_slot_data in enumerate(segment.sub_slots):
if sampled and sub_slot_data.is_challenge():
after_challenge = True
required_iters = __validate_pospace(constants, segment, idx, curr_difficulty, ses, first_segment_in_se)
if required_iters is None:
return False, uint64(0), uint64(0), uint64(0)
assert sub_slot_data.signage_point_index is not None
ip_iters = ip_iters + calculate_ip_iters( # type: ignore
constants, curr_ssi, sub_slot_data.signage_point_index, required_iters
)
if not _validate_challenge_block_vdfs(constants, idx, segment.sub_slots, curr_ssi):
log.error(f"failed to validate challenge slot {idx} vdfs")
return False, uint64(0), uint64(0), uint64(0)
elif sampled and after_challenge:
if not _validate_sub_slot_data(constants, idx, segment.sub_slots, curr_ssi):
log.error(f"failed to validate sub slot data {idx} vdfs")
return False, uint64(0), uint64(0), uint64(0)
slot_iters = slot_iters + curr_ssi # type: ignore
slots = slots + uint64(1) # type: ignore
return True, ip_iters, slot_iters, slots
def _validate_challenge_block_vdfs(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> bool:
sub_slot_data = sub_slots[sub_slot_idx]
if sub_slot_data.cc_signage_point is not None and sub_slot_data.cc_sp_vdf_info:
assert sub_slot_data.signage_point_index
sp_input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity and sub_slot_idx >= 1:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
prev_ssd = sub_slots[sub_slot_idx - 1]
sp_input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
if not sub_slot_data.cc_signage_point.is_valid(constants, sp_input, sub_slot_data.cc_sp_vdf_info):
log.error(f"failed to validate challenge chain signage point 2 {sub_slot_data.cc_sp_vdf_info}")
return False
assert sub_slot_data.cc_infusion_point
assert sub_slot_data.cc_ip_vdf_info
ip_input = ClassgroupElement.get_default_element()
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and sub_slot_idx >= 1:
prev_ssd = sub_slots[sub_slot_idx - 1]
if prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
assert prev_ssd.total_iters
assert sub_slot_data.total_iters
ip_input = prev_ssd.cc_ip_vdf_info.output
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
if not sub_slot_data.cc_infusion_point.is_valid(constants, ip_input, cc_ip_vdf_info):
log.error(f"failed to validate challenge chain infusion point {sub_slot_data.cc_ip_vdf_info}")
return False
return True
def _validate_sub_slot_data(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> bool:
sub_slot_data = sub_slots[sub_slot_idx]
assert sub_slot_idx > 0
prev_ssd = sub_slots[sub_slot_idx - 1]
if sub_slot_data.is_end_of_slot():
if sub_slot_data.icc_slot_end is not None:
input = ClassgroupElement.get_default_element()
if not sub_slot_data.icc_slot_end.normalized_to_identity and prev_ssd.icc_ip_vdf_info is not None:
assert prev_ssd.icc_ip_vdf_info
input = prev_ssd.icc_ip_vdf_info.output
assert sub_slot_data.icc_slot_end_info
if not sub_slot_data.icc_slot_end.is_valid(constants, input, sub_slot_data.icc_slot_end_info, None):
log.error(f"failed icc slot end validation {sub_slot_data.icc_slot_end_info} ")
return False
assert sub_slot_data.cc_slot_end_info
assert sub_slot_data.cc_slot_end
input = ClassgroupElement.get_default_element()
if (not prev_ssd.is_end_of_slot()) and (not sub_slot_data.cc_slot_end.normalized_to_identity):
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
if not sub_slot_data.cc_slot_end.is_valid(constants, input, sub_slot_data.cc_slot_end_info):
log.error(f"failed cc slot end validation {sub_slot_data.cc_slot_end_info}")
return False
else:
# find end of slot
idx = sub_slot_idx
while idx < len(sub_slots) - 1:
curr_slot = sub_slots[idx]
if curr_slot.is_end_of_slot():
# dont validate intermediate vdfs if slot is blue boxed
assert curr_slot.cc_slot_end
if curr_slot.cc_slot_end.normalized_to_identity is True:
log.debug(f"skip intermediate vdfs slot {sub_slot_idx}")
return True
else:
break
idx += 1
if sub_slot_data.icc_infusion_point is not None and sub_slot_data.icc_ip_vdf_info is not None:
input = ClassgroupElement.get_default_element()
if not prev_ssd.is_challenge() and prev_ssd.icc_ip_vdf_info is not None:
input = prev_ssd.icc_ip_vdf_info.output
if not sub_slot_data.icc_infusion_point.is_valid(constants, input, sub_slot_data.icc_ip_vdf_info, None):
log.error(f"failed icc infusion point vdf validation {sub_slot_data.icc_slot_end_info} ")
return False
assert sub_slot_data.signage_point_index is not None
if sub_slot_data.cc_signage_point:
assert sub_slot_data.cc_sp_vdf_info
input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
if not sub_slot_data.cc_signage_point.is_valid(constants, input, sub_slot_data.cc_sp_vdf_info):
log.error(f"failed cc signage point vdf validation {sub_slot_data.cc_sp_vdf_info}")
return False
input = ClassgroupElement.get_default_element()
assert sub_slot_data.cc_ip_vdf_info
assert sub_slot_data.cc_infusion_point
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
assert sub_slot_data.total_iters
assert prev_ssd.total_iters
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
if not sub_slot_data.cc_infusion_point.is_valid(constants, input, cc_ip_vdf_info):
log.error(f"failed cc infusion point vdf validation {sub_slot_data.cc_slot_end_info}")
return False
return True
def sub_slot_data_vdf_input(
constants: ConsensusConstants,
sub_slot_data: SubSlotData,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
is_overflow: bool,
new_sub_slot: bool,
ssi: uint64,
) -> ClassgroupElement:
cc_input = ClassgroupElement.get_default_element()
sp_total_iters = get_sp_total_iters(constants, is_overflow, ssi, sub_slot_data)
ssd: Optional[SubSlotData] = None
if is_overflow and new_sub_slot:
if sub_slot_idx >= 2:
if sub_slots[sub_slot_idx - 2].cc_slot_end_info is None:
for ssd_idx in reversed(range(0, sub_slot_idx - 1)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
if ssd and ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not is_overflow and not new_sub_slot:
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not new_sub_slot and is_overflow:
slots_seen = 0
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
slots_seen += 1
if slots_seen == 2:
return ClassgroupElement.get_default_element()
if ssd.cc_slot_end_info is None and not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
def _validate_recent_blocks(constants_dict: Dict, recent_chain_bytes: bytes, summaries_bytes: List[bytes]) -> bool:
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
recent_chain: RecentChainData = RecentChainData.from_bytes(recent_chain_bytes)
sub_blocks = BlockCache({})
first_ses_idx = _get_ses_idx(recent_chain.recent_chain_data)
ses_idx = len(summaries) - len(first_ses_idx)
ssi: uint64 = constants.SUB_SLOT_ITERS_STARTING
diff: Optional[uint64] = constants.DIFFICULTY_STARTING
last_blocks_to_validate = 100 # todo remove cap after benchmarks
for summary in summaries[:ses_idx]:
if summary.new_sub_slot_iters is not None:
ssi = summary.new_sub_slot_iters
if summary.new_difficulty is not None:
diff = summary.new_difficulty
ses_blocks, sub_slots, transaction_blocks = 0, 0, 0
challenge, prev_challenge = None, None
tip_height = recent_chain.recent_chain_data[-1].height
prev_block_record = None
deficit = uint8(0)
for idx, block in enumerate(recent_chain.recent_chain_data):
required_iters = uint64(0)
overflow = False
ses = False
height = block.height
for sub_slot in block.finished_sub_slots:
prev_challenge = challenge
challenge = sub_slot.challenge_chain.get_hash()
deficit = sub_slot.reward_chain.deficit
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
ses = True
assert summaries[ses_idx].get_hash() == sub_slot.challenge_chain.subepoch_summary_hash
ses_idx += 1
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
ssi = sub_slot.challenge_chain.new_sub_slot_iters
if sub_slot.challenge_chain.new_difficulty is not None:
diff = sub_slot.challenge_chain.new_difficulty
if (challenge is not None) and (prev_challenge is not None):
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
deficit = get_deficit(constants, deficit, prev_block_record, overflow, len(block.finished_sub_slots))
log.debug(f"wp, validate block {block.height}")
if sub_slots > 2 and transaction_blocks > 11 and (tip_height - block.height < last_blocks_to_validate):
required_iters, error = validate_finished_header_block(
constants, sub_blocks, block, False, diff, ssi, ses_blocks > 2
)
if error is not None:
log.error(f"block {block.header_hash} failed validation {error}")
return False
else:
required_iters = _validate_pospace_recent_chain(
constants, block, challenge, diff, overflow, prev_challenge
)
if required_iters is None:
return False
curr_block_ses = None if not ses else summaries[ses_idx - 1]
block_record = header_block_to_sub_block_record(
constants, required_iters, block, ssi, overflow, deficit, height, curr_block_ses
)
log.debug(f"add block {block_record.height} to tmp sub blocks")
sub_blocks.add_block_record(block_record)
if block.first_in_sub_slot:
sub_slots += 1
if block.is_transaction_block:
transaction_blocks += 1
if ses:
ses_blocks += 1
prev_block_record = block_record
return True
def _validate_pospace_recent_chain(
constants: ConsensusConstants,
block: HeaderBlock,
challenge: bytes32,
diff: uint64,
overflow: bool,
prev_challenge: bytes32,
):
if block.reward_chain_block.challenge_chain_sp_vdf is None:
# Edge case of first sp (start of slot), where sp_iters == 0
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
assert cc_sp_hash is not None
q_str = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants,
challenge if not overflow else prev_challenge,
cc_sp_hash,
)
if q_str is None:
log.error(f"could not verify proof of space block {block.height} {overflow}")
return None
required_iters = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
diff,
cc_sp_hash,
)
return required_iters
def __validate_pospace(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
idx: int,
curr_diff: uint64,
ses: Optional[SubEpochSummary],
first_in_sub_epoch: bool,
) -> Optional[uint64]:
if first_in_sub_epoch and segment.sub_epoch_n == 0 and idx == 0:
cc_sub_slot_hash = constants.GENESIS_CHALLENGE
else:
cc_sub_slot_hash = __get_cc_sub_slot(segment.sub_slots, idx, ses).get_hash()
sub_slot_data: SubSlotData = segment.sub_slots[idx]
if sub_slot_data.signage_point_index and is_overflow_block(constants, sub_slot_data.signage_point_index):
curr_slot = segment.sub_slots[idx - 1]
assert curr_slot.cc_slot_end_info
challenge = curr_slot.cc_slot_end_info.challenge
else:
challenge = cc_sub_slot_hash
if sub_slot_data.cc_sp_vdf_info is None:
cc_sp_hash = cc_sub_slot_hash
else:
cc_sp_hash = sub_slot_data.cc_sp_vdf_info.output.get_hash()
# validate proof of space
assert sub_slot_data.proof_of_space is not None
q_str = sub_slot_data.proof_of_space.verify_and_get_quality_string(
constants,
challenge,
cc_sp_hash,
)
if q_str is None:
log.error("could not verify proof of space")
return None
return calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
sub_slot_data.proof_of_space.size,
curr_diff,
cc_sp_hash,
)
def __get_rc_sub_slot(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
summaries: List[SubEpochSummary],
curr_ssi: uint64,
) -> RewardChainSubSlot:
ses = summaries[uint32(segment.sub_epoch_n - 1)]
# find first challenge in sub epoch
first_idx = None
first = None
for idx, curr in enumerate(segment.sub_slots):
if curr.cc_slot_end is None:
first_idx = idx
first = curr
break
assert first_idx
idx = first_idx
slots = segment.sub_slots
# number of slots to look for
slots_n = 1
assert first
assert first.signage_point_index is not None
if is_overflow_block(constants, first.signage_point_index):
if idx >= 2 and slots[idx - 2].cc_slot_end is None:
slots_n = 2
new_diff = None if ses is None else ses.new_difficulty
new_ssi = None if ses is None else ses.new_sub_slot_iters
ses_hash = None if ses is None else ses.get_hash()
overflow = is_overflow_block(constants, first.signage_point_index)
if overflow:
if idx >= 2 and slots[idx - 2].cc_slot_end is not None and slots[idx - 1].cc_slot_end is not None:
ses_hash = None
new_ssi = None
new_diff = None
sub_slot = slots[idx]
while True:
if sub_slot.cc_slot_end:
slots_n -= 1
if slots_n == 0:
break
idx -= 1
sub_slot = slots[idx]
icc_sub_slot_hash: Optional[bytes32] = None
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
assert segment.rc_slot_end_info is not None
if idx != 0:
cc_vdf_info = VDFInfo(sub_slot.cc_slot_end_info.challenge, curr_ssi, sub_slot.cc_slot_end_info.output)
if sub_slot.icc_slot_end_info is not None:
icc_slot_end_info = VDFInfo(
sub_slot.icc_slot_end_info.challenge, curr_ssi, sub_slot.icc_slot_end_info.output
)
icc_sub_slot_hash = icc_slot_end_info.get_hash()
else:
cc_vdf_info = sub_slot.cc_slot_end_info
if sub_slot.icc_slot_end_info is not None:
icc_sub_slot_hash = sub_slot.icc_slot_end_info.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
cc_vdf_info,
icc_sub_slot_hash,
ses_hash,
new_ssi,
new_diff,
)
rc_sub_slot = RewardChainSubSlot(
segment.rc_slot_end_info,
cc_sub_slot.get_hash(),
icc_sub_slot_hash,
constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,
)
return rc_sub_slot
def __get_cc_sub_slot(sub_slots: List[SubSlotData], idx, ses: Optional[SubEpochSummary]) -> ChallengeChainSubSlot:
sub_slot: Optional[SubSlotData] = None
for i in reversed(range(0, idx)):
sub_slot = sub_slots[i]
if sub_slot.cc_slot_end_info is not None:
break
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
icc_vdf = sub_slot.icc_slot_end_info
icc_vdf_hash: Optional[bytes32] = None
if icc_vdf is not None:
icc_vdf_hash = icc_vdf.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
sub_slot.cc_slot_end_info,
icc_vdf_hash,
None if ses is None else ses.get_hash(),
None if ses is None else ses.new_sub_slot_iters,
None if ses is None else ses.new_difficulty,
)
return cc_sub_slot
def _get_curr_diff_ssi(constants: ConsensusConstants, idx, summaries):
curr_difficulty = constants.DIFFICULTY_STARTING
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
for ses in reversed(summaries[0:idx]):
if ses.new_sub_slot_iters is not None:
curr_ssi = ses.new_sub_slot_iters
curr_difficulty = ses.new_difficulty
break
return curr_difficulty, curr_ssi
def vars_to_bytes(constants, summaries, weight_proof):
constants_dict = recurse_jsonify(dataclasses.asdict(constants))
wp_recent_chain_bytes = bytes(RecentChainData(weight_proof.recent_chain_data))
wp_segment_bytes = bytes(SubEpochSegments(weight_proof.sub_epoch_segments))
summary_bytes = []
for summary in summaries:
summary_bytes.append(bytes(summary))
return constants_dict, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes
def bytes_to_vars(constants_dict, summaries_bytes):
summaries = []
for summary in summaries_bytes:
summaries.append(SubEpochSummary.from_bytes(summary))
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
return constants, summaries
def _get_last_ses_hash(
constants: ConsensusConstants, recent_reward_chain: List[HeaderBlock]
) -> Tuple[Optional[bytes32], uint32]:
for idx, block in enumerate(reversed(recent_reward_chain)):
if (block.reward_chain_block.height % constants.SUB_EPOCH_BLOCKS) == 0:
idx = len(recent_reward_chain) - 1 - idx # reverse
# find first block after sub slot end
while idx < len(recent_reward_chain):
curr = recent_reward_chain[idx]
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
return (
slot.challenge_chain.subepoch_summary_hash,
curr.reward_chain_block.height,
)
idx += 1
return None, uint32(0)
def _get_ses_idx(recent_reward_chain: List[HeaderBlock]) -> List[int]:
idxs: List[int] = []
for idx, curr in enumerate(recent_reward_chain):
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
idxs.append(idx)
return idxs
def get_deficit(
constants: ConsensusConstants,
curr_deficit: uint8,
prev_block: BlockRecord,
overflow: bool,
num_finished_sub_slots: int,
) -> uint8:
if prev_block is None:
if curr_deficit >= 1 and not (overflow and curr_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK):
curr_deficit -= 1
return curr_deficit
return calculate_deficit(constants, uint32(prev_block.height + 1), prev_block, overflow, num_finished_sub_slots)
def get_sp_total_iters(constants: ConsensusConstants, is_overflow: bool, ssi: uint64, sub_slot_data: SubSlotData):
assert sub_slot_data.cc_ip_vdf_info is not None
assert sub_slot_data.total_iters is not None
assert sub_slot_data.signage_point_index is not None
sp_iters: uint64 = calculate_sp_iters(constants, ssi, sub_slot_data.signage_point_index)
ip_iters: uint64 = sub_slot_data.cc_ip_vdf_info.number_of_iterations
sp_sub_slot_total_iters = uint128(sub_slot_data.total_iters - ip_iters)
if is_overflow:
sp_sub_slot_total_iters = uint128(sp_sub_slot_total_iters - ssi)
return sp_sub_slot_total_iters + sp_iters
def blue_boxed_end_of_slot(sub_slot: EndOfSubSlotBundle):
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None:
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
return True
else:
return True
return False
def validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
tip = weight_proof.recent_chain_data[-1]
weight_to_check = _get_weights_for_sampling(rng, tip.weight, weight_proof.recent_chain_data)
sampled_sub_epochs: dict[int, bool] = {}
for idx in range(1, len(sub_epoch_weight_list)):
if _sample_sub_epoch(sub_epoch_weight_list[idx - 1], sub_epoch_weight_list[idx], weight_to_check):
sampled_sub_epochs[idx - 1] = True
if len(sampled_sub_epochs) == WeightProofHandler.MAX_SAMPLES:
break
curr_sub_epoch_n = -1
for sub_epoch_segment in weight_proof.sub_epoch_segments:
if curr_sub_epoch_n < sub_epoch_segment.sub_epoch_n:
if sub_epoch_segment.sub_epoch_n in sampled_sub_epochs:
del sampled_sub_epochs[sub_epoch_segment.sub_epoch_n]
curr_sub_epoch_n = sub_epoch_segment.sub_epoch_n
if len(sampled_sub_epochs) > 0:
return False
return True
def map_segments_by_sub_epoch(sub_epoch_segments) -> Dict[int, List[SubEpochChallengeSegment]]:
segments: Dict[int, List[SubEpochChallengeSegment]] = {}
curr_sub_epoch_n = -1
for idx, segment in enumerate(sub_epoch_segments):
if curr_sub_epoch_n < segment.sub_epoch_n:
curr_sub_epoch_n = segment.sub_epoch_n
segments[curr_sub_epoch_n] = []
segments[curr_sub_epoch_n].append(segment)
return segments
def validate_total_iters(
segment: SubEpochChallengeSegment,
sub_slot_data_idx,
expected_sub_slot_iters: uint64,
finished_sub_slots_since_prev: int,
prev_b: SubSlotData,
prev_sub_slot_data_iters,
genesis,
) -> bool:
sub_slot_data = segment.sub_slots[sub_slot_data_idx]
if genesis:
total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)
elif segment.sub_slots[sub_slot_data_idx - 1].is_end_of_slot():
assert prev_b.total_iters
assert prev_b.cc_ip_vdf_info
total_iters = prev_b.total_iters
# Add the rest of the slot of prev_b
total_iters = uint128(total_iters + prev_sub_slot_data_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
# Add other empty slots
total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))
else:
# Slot iters is guaranteed to be the same for header_block and prev_b
# This takes the beginning of the slot, and adds ip_iters
assert prev_b.cc_ip_vdf_info
assert prev_b.total_iters
total_iters = uint128(prev_b.total_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
total_iters = uint128(total_iters + sub_slot_data.cc_ip_vdf_info.number_of_iterations)
return total_iters == sub_slot_data.total_iters
|
[
"faurepierre78@yahoo.com"
] |
faurepierre78@yahoo.com
|
393fe84cca759b9236a600d0c71f10ffda0a904a
|
fc78f7cfa4e63e5d6df787d4bd5ba58d50ac2895
|
/manage.py
|
81154eb46b1ff68007a6fa716702021e1dc4c026
|
[] |
no_license
|
Erecik1/boostingweb
|
d608c109d61d4e2f0badd9af5477e10f4b780291
|
e0032c039b1a527dccc76a4b1cf5e04355001aa3
|
refs/heads/master
| 2023-08-01T03:43:19.992022
| 2021-09-22T22:03:19
| 2021-09-22T22:03:19
| 405,763,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tipsy.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"eret112@gmail.com"
] |
eret112@gmail.com
|
81aee84f897fcf6c6ae6f9a8c473d7c9123dda6d
|
e6db96ae32326be9b448f4c3bff158a295036571
|
/tensorFlowStudy/classification_test.py
|
95c54c9b0eba777168868053a5ce2ffcab877578
|
[] |
no_license
|
houweitao/TensorFlow
|
f2e65285f01b52df282bdecc2e01e2e30e3b9b43
|
d8a70592dde70e37f47aae2649b3003b746188f7
|
refs/heads/master
| 2021-01-12T14:59:27.935750
| 2017-01-13T07:48:34
| 2017-01-13T07:48:34
| 71,659,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,046
|
py
|
# -*- coding: utf-8 -*-
# _author_ = 'hou'
# _project_: classification_test
# _date_ = 16/10/23 下午4:15
# https://www.youtube.com/watch?v=aNjdw9w_Qyc&index=17&list=PLXO45tsB95cKI5AIlf5TxxFPzb-0zeVZ8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import add_layer as layer
# data http://yann.lecun.com/exdb/mnist/
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# define placeholder
xs = tf.placeholder(tf.float32, [None, 784])
# xs = tf.placeholder(tf.float(32), [None, 784]) # 28*28个像素点
ys = tf.placeholder(tf.float32, [None, 10]) # 10个输出
# add output layer
predication = layer.add_layer(xs, 784, 10, activation_function=tf.nn.softmax)
# another
# b = tf.Variable(tf.zeros([10]))
# W = tf.Variable(tf.zeros([784,10]))
# predication= tf.nn.softmax(tf.matmul(xs,W) + b);
# loss
# neg?
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(predication), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
def compute_accuracy(v_xs, v_ys):
global predication
y_pre = sess.run(predication, feed_dict={xs: v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
return result
# def compute_accuracy1(v_xs, v_ys):
# global predication
# y_pre = sess.run(predication, feed_dict={xs: v_xs})
# correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
# return result
for step in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100) # 每次取100
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})
if step % 50 == 0:
print compute_accuracy(mnist.test.images, mnist.test.labels)
|
[
"hou103880@163.com"
] |
hou103880@163.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.