hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f48a37c83c1796f2a62bd36beced843be001facf | 704 | py | Python | abides-markets/tests/test_generators.py | jpmorganchase/ABIDES-jpmc-gym | 198736a1b1316190072356c980412569579f15a6 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-09-23T13:17:26.000Z | 2021-09-23T13:17:26.000Z | abides-markets/tests/test_generators.py | jpmorganchase/ABIDES-gym | 198736a1b1316190072356c980412569579f15a6 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | abides-markets/tests/test_generators.py | jpmorganchase/ABIDES-gym | 198736a1b1316190072356c980412569579f15a6 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | import numpy as np
from abides_markets.generators import (
ConstantDepthGenerator,
ConstantOrderSizeGenerator,
UniformDepthGenerator,
UniformOrderSizeGenerator,
)
| 20.114286 | 65 | 0.711648 | import numpy as np
from abides_markets.generators import (
ConstantDepthGenerator,
ConstantOrderSizeGenerator,
UniformDepthGenerator,
UniformOrderSizeGenerator,
)
def test_constant_depth_generator():
g = ConstantDepthGenerator(10)
assert g.next() == 10
assert g.mean() == 10
def test_constant_order_size_generator():
g = ConstantOrderSizeGenerator(10)
assert g.next() == 10
assert g.mean() == 10
def test_uniform_depth_generator():
g = UniformDepthGenerator(0, 10, np.random.RandomState())
assert g.mean() == 5
def test_uniform_order_size_generator():
g = UniformOrderSizeGenerator(0, 10, np.random.RandomState())
assert g.mean() == 5
| 428 | 0 | 92 |
b37f4171da265d31b0fad57514eb67101347cbc0 | 754 | py | Python | kaffeefinder/apps/comments/views.py | rezyte/kaffeefinder | e29a10ce3bc9e6e80f36f2db0e46262ea0f27363 | [
"MIT"
] | null | null | null | kaffeefinder/apps/comments/views.py | rezyte/kaffeefinder | e29a10ce3bc9e6e80f36f2db0e46262ea0f27363 | [
"MIT"
] | null | null | null | kaffeefinder/apps/comments/views.py | rezyte/kaffeefinder | e29a10ce3bc9e6e80f36f2db0e46262ea0f27363 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, reverse
from django.views import View, generic
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import CommentForm
| 34.272727 | 71 | 0.738727 | from django.shortcuts import render, redirect, reverse
from django.views import View, generic
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import CommentForm
class CreateCommentView(LoginRequiredMixin, View):
def post(self, request, slug, *args, **kwargs):
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
print("\n"*3,"form is is_valid")
form.save()
messages.success(request, "شما با موفقیت نظر خود را ثبت کردید. ")
return redirect(reverse("cafes:single-cafe", kwargs={"slug": slug}))
else:
print(form.errors)
messages.success(request, "قرم نظر به درستی کامل نشده است")
return redirect(reverse("cafes:single-cafe", kwargs={"slug": slug}))
| 508 | 29 | 47 |
81514642a6cee548a76da2c49b83ed84280a5b7b | 777 | py | Python | backend/migrations/0006_auto_20210811_2310.py | saad4software/MMSS-Backend | fddb2ff94eab905df321dd0ce574b7ae787f067d | [
"Apache-2.0"
] | null | null | null | backend/migrations/0006_auto_20210811_2310.py | saad4software/MMSS-Backend | fddb2ff94eab905df321dd0ce574b7ae787f067d | [
"Apache-2.0"
] | null | null | null | backend/migrations/0006_auto_20210811_2310.py | saad4software/MMSS-Backend | fddb2ff94eab905df321dd0ce574b7ae787f067d | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2 on 2021-08-11 20:10
import datetime
from django.db import migrations, models
| 26.793103 | 100 | 0.592021 | # Generated by Django 3.2 on 2021-08-11 20:10
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0005_auto_20210811_2231'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='from_date',
field=models.DateField(verbose_name=datetime.datetime(2021, 8, 11, 23, 10, 47, 533432)),
),
migrations.AlterField(
model_name='transaction',
name='date',
field=models.DateField(verbose_name=datetime.datetime(2021, 8, 11, 23, 10, 47, 533432)),
),
migrations.AlterUniqueTogether(
name='ratio',
unique_together=set(),
),
]
| 0 | 649 | 23 |
d46b3e6bd18c2456b69e5e39f37c88329572a0b2 | 16,233 | py | Python | arfpe/localize_exceptions.py | yixin-09/ARFPE | 8760b519f2b240d7880f16b2b903c5b060dd34d4 | [
"MIT"
] | 1 | 2021-06-02T02:39:34.000Z | 2021-06-02T02:39:34.000Z | arfpe/localize_exceptions.py | yixin-09/ARFPE | 8760b519f2b240d7880f16b2b903c5b060dd34d4 | [
"MIT"
] | null | null | null | arfpe/localize_exceptions.py | yixin-09/ARFPE | 8760b519f2b240d7880f16b2b903c5b060dd34d4 | [
"MIT"
] | null | null | null | import basic_func as bf
from gen_drivfunc import *
from fpexception_detection import FPexcption_detector_whole
from fpexception_detection import get_testing_point
from math_lib import rfl
from plot_domain import plot_2vfunc_domain
from plot_domain import plot_1func_domain
from mpmath import *
import itertools
import numpy as np
import random
import signal
signal.signal(signal.SIGALRM, handler)
NoConvergence = mp.NoConvergence
# flag 0 not need to repair 0 f
# flag 1 may need to repair 1 inf/nan f
# flag 2,3 overflow handle 2 inf inf 3 inf nan
# flag 4 domain error handle 4 nan nan
if __name__ == "__main__":
cal_exceptions()
| 30.116883 | 97 | 0.519744 | import basic_func as bf
from gen_drivfunc import *
from fpexception_detection import FPexcption_detector_whole
from fpexception_detection import get_testing_point
from math_lib import rfl
from plot_domain import plot_2vfunc_domain
from plot_domain import plot_1func_domain
from mpmath import *
import itertools
import numpy as np
import random
import signal
class TimeoutError (RuntimeError):
pass
def handler(signum, frame):
raise TimeoutError()
signal.signal(signal.SIGALRM, handler)
def get_bound_type(ret_vals,num_excp):
if 0 not in ret_vals:
bound_type = 2
else:
if len(ret_vals) == 1:
if num_excp == 0:
bound_type = 1
else:
bound_type = 3
else:
if num_excp == 0:
bound_type = 4
else:
bound_type = 3
return bound_type
NoConvergence = mp.NoConvergence
def rf_f(fid,inp):
try:
signal.alarm(1)
resf = re(rfl[fid](*inp))
except (TimeoutError, ValueError, ZeroDivisionError, TypeError, OverflowError,NoConvergence):
resf = np.nan
signal.alarm(0)
return resf
def isfloat(x):
if isnan(x):
return 0
if isinf(x):
return 0
return 1
# flag 0 not need to repair 0 f
# flag 1 may need to repair 1 inf/nan f
# flag 2,3 overflow handle 2 inf inf 3 inf nan
# flag 4 domain error handle 4 nan nan
def classifer(res,fres,stat):
flag = 0
if stat != 0:
return flag
if isfloat(res):
return flag
else:
if isfloat(fres):
return 1
else:
if isinf(res):
if isinf(fres):
return 2
else:
return 3
else:
return 4
def point_in_bound(point,bound):
flag = 1
for i,j in zip(point,bound):
if (i<=j[1])&(i>=j[0]):
flag = 1*flag
else:
flag = 0*flag
return flag
def get_random_points(bound):
n = 4
ps_lst = []
for i in bound:
points = []
print i
for j in range(0,n):
points.append(random.uniform(i[0],i[1]))
ps_lst.append(points)
ret_lst = []
# for i in points_lst:
# print len(i)
for element in itertools.product(*ps_lst):
ret_lst.append(list(element))
return ret_lst
def random_bound_test(bound,fun_pu):
points = get_random_points(bound)
flag = 1
for i in points:
res = fun_pu(*i)
if isfloat(res.val):
return 0
return flag
def accuracy_bound_search_lb(ubp,lbp,test_fun,mid_p,idx):
for i in range(0, 2000):
mid_i = float(fadd(lbp, fsub(ubp, lbp) / 2.0))
mid_p[idx] = mid_i
res = test_fun(*mid_p)
if isfloat(res.val):
lbp = mid_i
else:
ubp = mid_i
dis_lu = bf.getUlpError(lbp, ubp)
# print dis_lu
if dis_lu <= 2:
break
return lbp
def accuracy_bound_search_ub(ubp,lbp,test_fun,mid_p,idx):
for i in range(0, 2000):
mid_i = float(fadd(lbp, fsub(ubp, lbp) / 2.0))
mid_p[idx] = mid_i
res = test_fun(*mid_p)
if isfloat(res.val):
ubp = mid_i
else:
lbp = mid_i
dis_lu = bf.getUlpError(lbp, ubp)
# print dis_lu
if dis_lu <= 2:
break
return ubp
def binary_find_bound(sinp,bound,inp,test_fun,idx):
mid_p = list(inp)
# print "binary_find_bound"
# print sinp
# print bound
lbp = bound[0]
ubp = sinp
max_step = bf.getUlpError(sinp,bound[0])
ini_step = np.min([40,max_step])
st_p = sinp + 0
temp_p = sinp
for i in range(0,2000):
# print "hello"
# print sinp
# print st_p
# print ini_step
# print float(max_step)
# print bound[0]
if ini_step >= max_step:
lbp = accuracy_bound_search_lb(temp_p, bound[0], test_fun, list(inp), idx)
break
st_p = bf.get_next_point(sinp,ini_step,-1)
mid_p[idx] = st_p
if st_p < bound[0]:
lbp = bound[0]
break
res = test_fun(*mid_p)
# print res.val
if not isfloat(res.val):
ini_step = ini_step * 2.0
else:
lbp = accuracy_bound_search_lb(temp_p,st_p,test_fun,list(inp),idx)
break
temp_p = st_p
# dis_lu = bf.getUlpError(lbp,ubp)
# if dis_lu <= 2:
# break
fl_lbp = lbp
lbp = sinp
ubp = bound[1]
max_step = bf.getUlpError(sinp, bound[1])
ini_step = np.min([40, max_step])
st_p = sinp + 0
mid_p = list(inp)
temp_p = sinp
for i in range(0, 2000):
if ini_step >= max_step:
ubp = accuracy_bound_search_lb(temp_p, bound[1], test_fun, list(inp), idx)
break
st_p = bf.get_next_point(sinp, ini_step, 1)
mid_p[idx] = st_p
if st_p > bound[1]:
ubp = bound[1]
break
res = test_fun(*mid_p)
if not isfloat(res.val):
ini_step = ini_step * 2.0
else:
ubp = accuracy_bound_search_ub(temp_p,st_p,test_fun,list(inp),idx)
break
temp_p = st_p
# mid_i = float(fadd(lbp, fsub(ubp, lbp) / 2.0))
# mid_p[idx] = mid_i
# res = test_fun(*mid_p)
# if isfloat(res.val):
# ubp = mid_i
# else:
# lbp = mid_i
# dis_lu = bf.getUlpError(lbp, ubp)
# if dis_lu <= 2:
# break
fl_ubp = ubp
return [fl_lbp,fl_ubp]
def generate_mix_bound(old_bound,new_bound):
mix_bound = []
for i,j in zip(old_bound,new_bound):
mix_bd = []
if i[0]<j[0]:
mix_bd.append(i[0])
else:
mix_bd.append(j[0])
if i[1]<j[1]:
mix_bd.append(j[1])
else:
mix_bd.append(i[1])
mix_bound.append(mix_bd)
return mix_bound
def localize_inbound(bound,fun_pu,stat_fun,inps_lst):
lc_bound = []
for i in bound:
lc_bound.append([])
temp_bound = list(lc_bound)
# print "inps_lst"
# print inps_lst
flag = 0
lc_bds_lst = []
for i in inps_lst:
if lc_bound[0] != []:
for lbi in lc_bds_lst:
flag = point_in_bound(i,lbi)
if flag == 1:
break
# print flag
if flag==0:
idx = 0
fpb_i = []
temp_inp = i
for j in i:
idx_bd = bound[idx]
lc_bound[idx] = binary_find_bound(temp_inp[idx], idx_bd, temp_inp, fun_pu, idx)
# print "idx"
# print temp_bound
# print lc_bound
idx = idx + 1
lc_bds_lst.append(lc_bound)
# if temp_bound[0] != []:
# lc_bound = generate_mix_bound(lc_bound,temp_bound)
# temp_bound = list(lc_bound)
return bf.rm_dump_lst(lc_bds_lst)
def bound_equal(bd1,bd2):
flag = 1
for i,j in zip(bd1,bd2):
if i == j:
flag = flag*1
else:
flag = flag*0
return flag
def bound_find2v(lc_bound,inp,test_fun,bound):
bd0 = lc_bound[0]
bds_lst = bf.bound_fpDiv(bd0)
inps_lst = []
lc_bds_lst = []
temp_inp = list(inp)
for i in bds_lst:
inps_lst.append(random.uniform(i[0],i[1]))
# print "len inps"
for j in range(0,len(inps_lst)):
temp_bd = []
# print "temp_bd"
# print j
temp_bd.append(bf.fp_to_bound(inps_lst[j]))
temp_inp[0]=inps_lst[j]
# print temp_inp
temp_bd.append(binary_find_bound(temp_inp[1], bound[1], temp_inp, test_fun, 1))
lc_bds_lst.append(temp_bd)
new_lc_bds = []
merge_bd = lc_bds_lst[0]
for i in lc_bds_lst[1:]:
if bound_equal(merge_bd[1],i[1])==1:
merge_bd = generate_mix_bound(merge_bd,i)
else:
new_lc_bds.append(merge_bd)
merge_bd = i
new_lc_bds.append(merge_bd)
return new_lc_bds
def localize_inbound2v(bound,fun_pu,stat_fun,inps_lst):
lc_bound = []
for i in bound:
lc_bound.append([])
temp_bound = list(lc_bound)
# print "inps_lst"
# print inps_lst
flag = 0
lc_bds_lst = []
final_bds_lst = []
for i in inps_lst:
if lc_bound[0] != []:
for lbi in final_bds_lst:
flag = point_in_bound(i,lbi)
if flag == 1:
break
# print flag
if flag==0:
idx = 0
fpb_i = []
temp_inp = i
for j in i:
idx_bd = bound[idx]
lc_bound[idx] = binary_find_bound(temp_inp[idx], idx_bd, temp_inp, fun_pu, idx)
# print "idx"
# print temp_bound
# print lc_bound
idx = idx + 1
lc_bds_lst = bound_find2v(lc_bound, i, fun_pu, bound)
final_bds_lst = final_bds_lst + lc_bds_lst
# if temp_bound[0] != []:
# lc_bound = generate_mix_bound(lc_bound,temp_bound)
# temp_bound = list(lc_bound)
# for i in lc_bound:
# print i
return bf.rm_dump_lst(final_bds_lst)
def localize4exceptions(fid, test_fun, detect_res,eva_bound_lst,limit_time):
fun_pu, stat_fun = load_pure_fun(test_fun[0])
i = detect_res
print detect_res
print eva_bound_lst
# print test_fun
fpe_lst = []
for j in range(0, len(i), 2):
bt = i[j]
res_lst = i[j + 1]
if bt == 3:
fpe_lst.append(res_lst[0][0] + res_lst[0][1] + res_lst[0][2])
count = 0
rf = lambda x: rf_f(fid, x)
new_bounds = []
print fpe_lst
# new_bounds.append(fid)
# new_bounds.append(test_fun[0])
repair_bounds_final = []
var_num = bf.get_var_num(test_fun)
for i in eva_bound_lst:
repair_bounds = []
# print i
ret_vals = i[0][1]
num_excp = i[0][0]
bt = get_bound_type(ret_vals, num_excp)
bound = i[1]
inp_lst = []
loc_inps = []
ty_f = 0
if (bt == 3) & (fid != 97):
# inp_lst = FPexcption_detector_whole(fun_pu, stat_fun, bound)
inp_lst = inp_lst + fpe_lst[count]
count = count + 1
for ti in inp_lst:
res_p = fun_pu(*ti)
res_rf = float(rf(ti))
signal.alarm(limit_time)
ty_f = classifer(res_p.val, res_rf, stat_fun())
print "ty_f"
print ty_f
if ty_f != 0:
loc_inps.append(ti)
if loc_inps == []:
new_bounds.append([(0, [-1]), bound])
else:
repair_bounds.append(bound)
repair_bounds.append(ty_f)
repair_bounds.append(loc_inps)
new_bounds.append(i)
if var_num == 1:
loc_bound_lst = localize_inbound(bound, fun_pu, stat_fun, loc_inps)
for lcb in loc_bound_lst:
new_bounds.append([(0, [-2]), lcb])
repair_bounds.append(loc_bound_lst)
else:
loc_bound_lst = localize_inbound2v(bound, fun_pu, stat_fun, loc_inps)
for lcb in loc_bound_lst:
new_bounds.append([(0, [-2]), lcb])
repair_bounds.append(loc_bound_lst)
repair_bounds_final.append(repair_bounds)
else:
new_bounds.append(i)
return new_bounds,repair_bounds_final
def cal_exceptions():
inter_funcs = bf.load_pickle('fun_index.pkl')
eva_bound_lst = bf.load_pickle("eva_bound_lst.plk")
sum_lst = bf.load_pickle("detect_res_lst.pkl")
cal_num = 0
name_lst = []
for fid in range(0,len(inter_funcs)):
# fid = 14
flag = 0
test_fun = inter_funcs[fid]
fun_pu, stat_fun = load_pure_fun(test_fun[0])
i = sum_lst[fid]
print test_fun
fpe_lst = []
for j in range(0, len(i[2]), 2):
bt = i[2][j]
res_lst = i[2][j + 1]
if bt == 3:
fpe_lst.append(res_lst[0][0] + res_lst[0][1] + res_lst[0][2])
count = 0
rf = lambda x: rf_f(fid,x)
new_bounds = []
# new_bounds.append(fid)
# new_bounds.append(test_fun[0])
var_num = bf.get_var_num(test_fun)
print eva_bound_lst[fid]
for i in eva_bound_lst[fid][2:]:
print i
ret_vals = i[0][1]
num_excp = i[0][0]
bt = get_bound_type(ret_vals, num_excp)
bound = i[1]
inp_lst = []
loc_inps = []
# print bound
if (bt == 3) & (fid != 97):
inp_lst = FPexcption_detector_whole(fun_pu, stat_fun, bound)
inp_lst = inp_lst + fpe_lst[count]
count = count + 1
for ti in inp_lst:
res_p = fun_pu(*ti)
res_rf = float(rf(ti))
ty_f = classifer(res_p.val,res_rf,stat_fun())
if ty_f!=0:
loc_inps.append(ti)
flag = 1
if flag == 1:
name_lst.append([fid,test_fun[1]])
cal_num = cal_num + 1
print cal_num
print cal_num
for nai in name_lst:
print nai
def localize_exceptions():
inter_funcs = bf.load_pickle('fun_index.pkl')
eva_bound_lst = bf.load_pickle("eva_bound_lst.plk")
sum_lst = bf.load_pickle("detect_res_lst.pkl")
for fid in range(74,75):
# fid = 14
test_fun = inter_funcs[fid]
fun_pu, stat_fun = load_pure_fun(test_fun[0])
i = sum_lst[fid]
print test_fun
fpe_lst = []
for j in range(0, len(i[2]), 2):
bt = i[2][j]
res_lst = i[2][j + 1]
if bt == 3:
fpe_lst.append(res_lst[0][0] + res_lst[0][1] + res_lst[0][2])
count = 0
rf = lambda x: rf_f(fid,x)
new_bounds = []
# new_bounds.append(fid)
# new_bounds.append(test_fun[0])
var_num = bf.get_var_num(test_fun)
print eva_bound_lst[fid]
for i in eva_bound_lst[fid][2:]:
print i
ret_vals = i[0][1]
num_excp = i[0][0]
bt = get_bound_type(ret_vals, num_excp)
bound = i[1]
inp_lst = []
loc_inps = []
# print bound
if (bt == 3) & (fid != 97):
inp_lst = FPexcption_detector_whole(fun_pu, stat_fun, bound)
inp_lst = inp_lst + fpe_lst[count]
count = count + 1
for ti in inp_lst:
res_p = fun_pu(*ti)
res_rf = float(rf(ti))
ty_f = classifer(res_p.val,res_rf,stat_fun())
if ty_f!=0:
loc_inps.append(ti)
if loc_inps == []:
new_bounds.append([(0,[-1]),bound])
else:
new_bounds.append(i)
if var_num == 1:
loc_bound_lst = localize_inbound(bound, fun_pu, stat_fun, loc_inps)
print "loc_bound_lst"
print bound
print loc_bound_lst
for lcb in loc_bound_lst:
new_bounds.append([(0, [-2]), lcb])
else:
loc_bound_lst = localize_inbound2v(bound, fun_pu, stat_fun, loc_inps)
print "loc_bound_lst"
# print loc_bound_lst
print len(loc_bound_lst)
for lcb in loc_bound_lst:
new_bounds.append([(0, [-2]), lcb])
else:
new_bounds.append(i)
if var_num == 1:
plot_1func_domain(new_bounds)
if var_num == 2:
plot_2vfunc_domain(new_bounds)
print new_bounds
if __name__ == "__main__":
cal_exceptions()
| 15,101 | 22 | 457 |
06411224f4cb00347bca536557497780a0a47c31 | 4,403 | py | Python | server/common/utils.py | guiloga/scalade | fd59b239fb35e8a7028baea3ed6d4b23282c200d | [
"MIT"
] | 4 | 2021-12-22T18:07:10.000Z | 2021-12-29T09:22:44.000Z | server/common/utils.py | guiloga/scalade | fd59b239fb35e8a7028baea3ed6d4b23282c200d | [
"MIT"
] | null | null | null | server/common/utils.py | guiloga/scalade | fd59b239fb35e8a7028baea3ed6d4b23282c200d | [
"MIT"
] | null | null | null | import binascii
import os
from typing import Union
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import QuerySet
from django.db.models.base import Model
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.status import (
HTTP_403_FORBIDDEN,
HTTP_409_CONFLICT,
HTTP_422_UNPROCESSABLE_ENTITY,
)
from scalade.utils import BASE64_REGEX, decode_scalade_token
class DecoratorShipper:
"""
Ships common used decorators as static methods.
"""
@staticmethod
@staticmethod
def extract_job_from_token(func):
"""
Decorator used in 'runtime' api views
to extract job from token.
"""
return wrapper
@staticmethod
def with_permission(perm: str):
"""
Decorator used in 'resources' api views
that restricts content using permissions.
"""
return decorator
| 31.905797 | 79 | 0.56893 | import binascii
import os
from typing import Union
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import QuerySet
from django.db.models.base import Model
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.status import (
HTTP_403_FORBIDDEN,
HTTP_409_CONFLICT,
HTTP_422_UNPROCESSABLE_ENTITY,
)
from scalade.utils import BASE64_REGEX, decode_scalade_token
class ModelManager:
@classmethod
def handle(
cls, app_name_model: str, method: str, *args, **kwargs
) -> Union[Model, QuerySet]:
app_name, model_name = app_name_model.split(".")
model = cls.get_model(app_name, model_name)
handler_method = getattr(model.objects, method)
return handler_method(*args, **kwargs)
@staticmethod
def get_model(app_name: str, model_name: str) -> Model:
all_models = {
model.__name__.lower(): model
for model in apps.get_app_config(app_name).get_models()
}
normalized_name = model_name.lower()
suffix = "model"
if "model" in normalized_name:
suffix = ""
return all_models[f"{normalized_name}{suffix}"]
def validate_b64_encoded(body: str):
class APIError(APIException):
status_code = HTTP_422_UNPROCESSABLE_ENTITY
default_code = "unprocessable_entity"
if not BASE64_REGEX.search(str(body)):
raise APIError(detail={"body": "must be bytes encoded in base64."})
def get_hex_string(size=12):
return binascii.b2a_hex(os.urandom(size)).decode()
class DecoratorShipper:
"""
Ships common used decorators as static methods.
"""
@staticmethod
def base_headers(func):
def wrapper(inst, *args, **kwargs):
hd_ = kwargs.get("headers", {})
kwargs["headers"] = hd_ | inst.base_headers
return func(inst, *args, **kwargs)
return wrapper
@staticmethod
def extract_job_from_token(func):
"""
Decorator used in 'runtime' api views
to extract job from token.
"""
def _parse_bearer_token(request):
auth_header = request.headers.get("Authorization", "")
if not auth_header or "Bearer " not in auth_header:
return None
bearer_token = auth_header.split("Bearer ")[-1]
return bearer_token
def wrapper(inst, request, *args, **kwargs):
try:
token = _parse_bearer_token(request)
if not token:
return Response(
data={
"error": "Unable to parse token: "
"Bearer Token not found."
},
status=HTTP_403_FORBIDDEN,
)
decoded_json = decode_scalade_token(token)
job = ModelManager.handle(
"streams.job", "get", uuid=decoded_json["job_uuid"]
)
except ObjectDoesNotExist:
return Response(
data={"error": "Conflict: " "job resource doesn't exist."},
status=HTTP_409_CONFLICT,
)
except Exception as exc:
return Response(
data={
"error": "The provided token is not valid: "
"'%s'" % exc.__class__.__name__
},
status=HTTP_403_FORBIDDEN,
)
setattr(request, "job", job)
return func(inst, request, *args, **kwargs)
return wrapper
@staticmethod
def with_permission(perm: str):
"""
Decorator used in 'resources' api views
that restricts content using permissions.
"""
def decorator(func):
def wrapper(inst, request, *args, **kwargs):
account = request.user
if account.is_authenticated and not account.has_perm(perm):
return Response(
data={"error": "Not enough privileges."},
status=HTTP_403_FORBIDDEN,
)
return func(inst, request, *args, **kwargs)
return wrapper
return decorator
| 3,132 | 86 | 188 |
e6ab8bead5af415bf70651f3100284aa3bc4feec | 306 | py | Python | src/media/element.py | sffjunkie/media | 819912d776aa1440b533e7de0e9befb27f135ecc | [
"Apache-2.0"
] | null | null | null | src/media/element.py | sffjunkie/media | 819912d776aa1440b533e7de0e9befb27f135ecc | [
"Apache-2.0"
] | null | null | null | src/media/element.py | sffjunkie/media | 819912d776aa1440b533e7de0e9befb27f135ecc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Simon Kennedy <sffjunkie+code@gmail.com>
from collections import namedtuple
__all__ = ['Element']
_Element = namedtuple('Element', "title reader writer key log")
| 27.818182 | 65 | 0.738562 | # Copyright (c) 2015 Simon Kennedy <sffjunkie+code@gmail.com>
from collections import namedtuple
__all__ = ['Element']
_Element = namedtuple('Element', "title reader writer key log")
def Element(title, reader=None, writer=None, key=None, log=True):
return _Element(title, reader, writer, key, log)
| 97 | 0 | 23 |
ca9083d28d5c0ecb71de3616780683f16db0f7eb | 361 | py | Python | tests/test_utils.py | ClementGautier/substra-tools | 5f0e302013ff51a2e9f3e8927102bbb8504fbe31 | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | ClementGautier/substra-tools | 5f0e302013ff51a2e9f3e8927102bbb8504fbe31 | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | ClementGautier/substra-tools | 5f0e302013ff51a2e9f3e8927102bbb8504fbe31 | [
"Apache-2.0"
] | null | null | null | from substratools import exceptions, Metrics
from substratools.utils import import_module, load_interface_from_module
import pytest
| 24.066667 | 72 | 0.761773 | from substratools import exceptions, Metrics
from substratools.utils import import_module, load_interface_from_module
import pytest
def test_invalid_interface():
code = """
def score():
pass
"""
import_module('score', code)
with pytest.raises(exceptions.InvalidInterface):
load_interface_from_module('score', interface_class=Metrics)
| 204 | 0 | 23 |
422e8e1bfdb262270d10d40463304bfd76f2fbee | 1,912 | py | Python | client/UIfile/chooseserver.no.py | xutongxin1/UnitAi-project | 226ccc7d73096fd3582a55bf76593756d8033892 | [
"MIT"
] | 5 | 2019-03-23T09:21:14.000Z | 2019-10-18T11:31:10.000Z | client/UIfile/chooseserver.no.py | xutongxin1/UnitAi-project | 226ccc7d73096fd3582a55bf76593756d8033892 | [
"MIT"
] | null | null | null | client/UIfile/chooseserver.no.py | xutongxin1/UnitAi-project | 226ccc7d73096fd3582a55bf76593756d8033892 | [
"MIT"
] | 2 | 2020-01-12T06:03:44.000Z | 2020-01-17T00:23:20.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'chooseserver.no.ui'
#
# Created by: PyQt5 UI code generator 5.13.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 39.020408 | 73 | 0.66318 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'chooseserver.no.ui'
#
# Created by: PyQt5 UI code generator 5.13.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
self.ip = QtWidgets.QLineEdit(Dialog)
self.ip.setGeometry(QtCore.QRect(100, 50, 201, 31))
self.ip.setObjectName("ip")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(10, 60, 81, 20))
self.label.setObjectName("label")
self.port = QtWidgets.QLineEdit(Dialog)
self.port.setGeometry(QtCore.QRect(100, 110, 201, 31))
self.port.setObjectName("port")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(40, 120, 54, 12))
self.label_2.setObjectName("label_2")
self.save = QtWidgets.QPushButton(Dialog)
self.save.setGeometry(QtCore.QRect(320, 250, 75, 23))
self.save.setObjectName("save")
self.comboBox = QtWidgets.QComboBox(Dialog)
self.comboBox.setGeometry(QtCore.QRect(40, 10, 141, 22))
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "服务器地址"))
self.label_2.setText(_translate("Dialog", "端口"))
self.save.setText(_translate("Dialog", "保存"))
self.comboBox.setItemText(0, _translate("Dialog", "共享工作站"))
self.comboBox.setItemText(1, _translate("Dialog", "私有工作站"))
| 1,622 | 3 | 76 |
8e40c6b17a617f18c4af241232f51410d9518006 | 5,360 | py | Python | 1_basic_models/2_logistic_regression/train_mul.py | eubchain/tfdistributedtestcase | a81f99e051537fcd860de28587f0ab2bd9b1b5d4 | [
"MIT"
] | 6 | 2018-07-30T08:47:25.000Z | 2019-01-13T16:17:31.000Z | 1_basic_models/2_logistic_regression/train_mul.py | eubchain/tfdistributedtestcase | a81f99e051537fcd860de28587f0ab2bd9b1b5d4 | [
"MIT"
] | null | null | null | 1_basic_models/2_logistic_regression/train_mul.py | eubchain/tfdistributedtestcase | a81f99e051537fcd860de28587f0ab2bd9b1b5d4 | [
"MIT"
] | 2 | 2018-11-28T09:37:26.000Z | 2018-11-29T02:51:20.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 12:02:05 2018
@author: eub_hmy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import random
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='input/occupancy_data/datatest.txt',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--training_steps',
type=int,
default=3000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=1000,
help='How many lines to train on at a time.'
)
parser.add_argument(
'--task_index',
type=int,
default=0,
help="""\
Index of task within the job.\
"""
)
parser.add_argument(
'--ps_hosts',
type=str,
default=0,
help="""\
Comma-separated list of hostname:port pairs.\
"""
)
parser.add_argument(
'--worker_hosts',
type=str,
default=0,
help="""\
Comma-separated list of hostname:port pairs.\
"""
)
parser.add_argument(
'--job_name',
type=str,
default=0,
help="""\
job name: worker or ps.\
"""
)
parser.add_argument(
'--issync',
type=int,
default=0,
help="""\
between graph or not.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | 31.529412 | 110 | 0.584328 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 12:02:05 2018
@author: eub_hmy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import random
def main(_):
ps_hosts = FLAGS.ps_hosts.split(",")
worker_hosts = FLAGS.worker_hosts.split(",")
num_worker = len(worker_hosts)
print ("Number of worker = " + str(num_worker))
print ("ps_hosts = ")
print(*ps_hosts)
print ("worker_hosts = ")
print(*worker_hosts)
cluster = tf.train.ClusterSpec({"ps": ps_hosts,"worker": worker_hosts})
print ("After defining Cluster")
print ("Job name = " + FLAGS.job_name)
print ("task index = " + str(FLAGS.task_index))
server = tf.train.Server(cluster,job_name=FLAGS.job_name,task_index=FLAGS.task_index)
print ("After defining server")
if FLAGS.job_name =="ps":
server.join()
elif FLAGS.job_name =="worker":
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d"% FLAGS.task_index,
cluster=cluster)):
is_chief = (FLAGS.task_index == 0)
data=pd.read_csv(FLAGS.data_dir)
X_train, X_test, y_train, y_test = train_test_split(\
data[["Temperature", "Humidity", "Light", "CO2", "HumidityRatio"]].values,\
data["Occupancy"].values.reshape(-1, 1), random_state=42,test_size=0.25)
y_train=np.concatenate((1-y_train,y_train),axis=1)
y_test=np.concatenate((1-y_test,y_test),axis=1)
training_steps=FLAGS.training_steps
learning_rate=FLAGS.learning_rate
batch_size=FLAGS.train_batch_size
features=len(X_train[0])
class_num=len(y_train[0])
samples_num=len(y_train)
batch_num=int(samples_num/batch_size)
x=tf.placeholder(tf.float32,[None,features])
y=tf.placeholder(tf.float32,[None,class_num])
w=tf.Variable(np.random.rand(features,class_num),dtype=tf.float32)
b=tf.Variable(np.random.rand(class_num),dtype=tf.float32)
pred=tf.matmul(x,w)+b
cost=tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred,labels=y))
global_step = tf.contrib.framework.get_or_create_global_step()
train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost,global_step=global_step)
correct=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct,tf.float32))
init=tf.global_variables_initializer()
sv = tf.train.Supervisor(
is_chief=is_chief,
init_op=init,
global_step=global_step,
)
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
step=0
while not sv.stop() and step <= training_steps:
i=random.randrange(batch_num)
feed={x:X_train[i*batch_size:(i+1)*batch_size],y:y_train[i*batch_size:(i+1)*batch_size]}
_,loss,step=sess.run([train_step,cost,global_step],feed_dict=feed)
if step % 100 == 0:
print("Step %d: accuracy=%f"%(step,sess.run(accuracy,feed_dict={x:X_test,y:y_test})))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='input/occupancy_data/datatest.txt',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--training_steps',
type=int,
default=3000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=1000,
help='How many lines to train on at a time.'
)
parser.add_argument(
'--task_index',
type=int,
default=0,
help="""\
Index of task within the job.\
"""
)
parser.add_argument(
'--ps_hosts',
type=str,
default=0,
help="""\
Comma-separated list of hostname:port pairs.\
"""
)
parser.add_argument(
'--worker_hosts',
type=str,
default=0,
help="""\
Comma-separated list of hostname:port pairs.\
"""
)
parser.add_argument(
'--job_name',
type=str,
default=0,
help="""\
job name: worker or ps.\
"""
)
parser.add_argument(
'--issync',
type=int,
default=0,
help="""\
between graph or not.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | 3,288 | 0 | 23 |
9eba321c0b82c5b907ce3ddd97d401fc4347af6f | 14,586 | py | Python | pysnmp-with-texts/FA-EXT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/FA-EXT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/FA-EXT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module FA-EXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FA-EXT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:11:49 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
connUnitPortEntry, = mibBuilder.importSymbols("FCMGMT-MIB", "connUnitPortEntry")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, ObjectIdentity, Counter64, Unsigned32, Integer32, Counter32, ModuleIdentity, IpAddress, NotificationType, Gauge32, TimeTicks, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "ObjectIdentity", "Counter64", "Unsigned32", "Integer32", "Counter32", "ModuleIdentity", "IpAddress", "NotificationType", "Gauge32", "TimeTicks", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso")
TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention")
sw, = mibBuilder.importSymbols("SW-MIB", "sw")
faExt = ModuleIdentity((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28))
faExt.setRevisions(('2010-11-22 10:30', '2013-09-12 10:30', '2013-09-24 13:55', '2013-10-29 13:54',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: faExt.setRevisionsDescriptions(('Initial version of this module.', 'Added swConnUnitPortFECMode', 'Added swConnUnitPortFECState', 'Added notsupported value for swConnUnitPortFECState',))
if mibBuilder.loadTexts: faExt.setLastUpdated('201310291354Z')
if mibBuilder.loadTexts: faExt.setOrganization('Brocade Communications Systems, Inc.,')
if mibBuilder.loadTexts: faExt.setContactInfo('Customer Support Group Brocade Communications Systems, 1745 Technology Drive, San Jose, CA 95110 U.S.A Tel: +1-408-392-6061 Fax: +1-408-392-6656 Email: support@Brocade.COM WEB: www.brocade.com')
if mibBuilder.loadTexts: faExt.setDescription('The MIB module is Extension for FA-MIB. Copyright (c) 1996-2003 Brocade Communications Systems, Inc. All rights reserved.')
swSfpStatTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1), )
if mibBuilder.loadTexts: swSfpStatTable.setStatus('current')
if mibBuilder.loadTexts: swSfpStatTable.setDescription('This represents the diagnostic stats of SFPs.')
swFapwwnFeature = ObjectIdentity((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2))
if mibBuilder.loadTexts: swFapwwnFeature.setStatus('current')
if mibBuilder.loadTexts: swFapwwnFeature.setDescription('The OID sub-tree for Fapwwn feature. Using this feature user can configure virtual port WWN for a port.')
swPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3), )
if mibBuilder.loadTexts: swPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: swPortConfigTable.setDescription('This represents the configuration of encryption / compression feature on a port')
swConnUnitPortTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4), )
if mibBuilder.loadTexts: swConnUnitPortTable.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortTable.setDescription('This represents the Conn unit Port entry')
swSfpStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swSfpStatEntry"))
swSfpStatEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swSfpStatEntry.setStatus('current')
if mibBuilder.loadTexts: swSfpStatEntry.setDescription('This represents the diagnostic stats of SFPs')
swSfpTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('centigrade').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpTemperature.setStatus('current')
if mibBuilder.loadTexts: swSfpTemperature.setDescription('This object identifies the temperature of SFP')
swSfpVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('milli voltage').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpVoltage.setStatus('current')
if mibBuilder.loadTexts: swSfpVoltage.setDescription('This object identifies the voltage of SFP.')
swSfpCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('milli amphere').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpCurrent.setStatus('current')
if mibBuilder.loadTexts: swSfpCurrent.setDescription('This object identifies the current of SFP.')
swSfpRxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpRxPower.setStatus('current')
if mibBuilder.loadTexts: swSfpRxPower.setDescription('This object identifies the Rx power consumption of SFP.')
swSfpTxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpTxPower.setStatus('current')
if mibBuilder.loadTexts: swSfpTxPower.setDescription('This object identifies the Tx power consumption of SFP.')
swSfpPoweronHrs = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 6), Integer32()).setUnits('hours').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpPoweronHrs.setStatus('current')
if mibBuilder.loadTexts: swSfpPoweronHrs.setDescription('This object identifies the power on hours of SFP. This is applicable only to 16G SFPs.')
swSfpUnitId = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpUnitId.setStatus('current')
if mibBuilder.loadTexts: swSfpUnitId.setDescription('This object identifies unit ID of SFP. This is applicable only to QSFP.')
swPortFapwwnConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1), )
if mibBuilder.loadTexts: swPortFapwwnConfigTable.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigTable.setDescription('This represents the configuration of ports.')
swPortFapwwnConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swPortFapwwnConfigEntry"))
swPortFapwwnConfigEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swPortFapwwnConfigEntry.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigEntry.setDescription('This represents the configuration of ports.')
swPortFapwwnConfigEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1, 1), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortFapwwnConfigEnable.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigEnable.setDescription('Represents the Fapwwn status. This is for per port.')
swPortFapwwnConfigFapwwn = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(256, 256)).setFixedLength(256)).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortFapwwnConfigFapwwn.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigFapwwn.setDescription('Represents the Fapwwn. For AG it is range of WWNs. If Fapwwn feature is not enabled in a port this object value is NA(Not Applicable.')
swPortFapwwnConfigType = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1, 3), FapwwnType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortFapwwnConfigType.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigType.setDescription('Represents the Fapwwn type. ')
swPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swPortConfigEntry"))
swPortConfigEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: swPortConfigEntry.setDescription('This represents the configuration of encryption / compression feature on a port')
swPortEncrypt = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 1), EncryptCompressStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortEncrypt.setStatus('current')
if mibBuilder.loadTexts: swPortEncrypt.setDescription('Represents the encryption status on a port.')
swPortCompression = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 2), EncryptCompressStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortCompression.setStatus('current')
if mibBuilder.loadTexts: swPortCompression.setDescription('Represents the compression status on port.')
swPortCipherKeySize = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortCipherKeySize.setStatus('current')
if mibBuilder.loadTexts: swPortCipherKeySize.setDescription('Represents the Cipher key size. FOS supports 256 bytes key')
swPortCipherMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 4), CiperMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortCipherMode.setStatus('current')
if mibBuilder.loadTexts: swPortCipherMode.setDescription('Represents the Cipher mode. ')
swConnUnitPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swConnUnitPortEntry"))
swConnUnitPortEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swConnUnitPortEntry.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortEntry.setDescription('This represents the Conn unit Port Entry')
swConnUnitPortCapableSpeeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortCapableSpeeds.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortCapableSpeeds.setDescription('This represents the available speeds, that a port is capable of configuring')
swConnUnitPortSpeedMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("auto-neg", 1), ("static", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortSpeedMode.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortSpeedMode.setDescription('This represents the type of speed modes that can be configured for the particular port. The modes that can be configured are auto-negotiable and static speeds.')
swConnUnitPortFECMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("disabled", 2), ("enabled", 3), ("notsupported", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortFECMode.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortFECMode.setDescription('This represents the port Forward Error Correction Mode. FEC feature is only applicable to 10G/16G platforms.')
swConnUnitPortFECState = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("active", 1), ("inactive", 2), ("notsupported", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortFECState.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortFECState.setDescription('This represents FEC state of a port.If in link both sfp connected are of brocade vendor type then state is active, else it will be inactive.')
mibBuilder.exportSymbols("FA-EXT-MIB", CiperMode=CiperMode, swSfpCurrent=swSfpCurrent, swSfpTxPower=swSfpTxPower, swPortFapwwnConfigType=swPortFapwwnConfigType, swPortFapwwnConfigTable=swPortFapwwnConfigTable, faExt=faExt, swFapwwnFeature=swFapwwnFeature, swPortFapwwnConfigEntry=swPortFapwwnConfigEntry, swSfpVoltage=swSfpVoltage, swPortConfigEntry=swPortConfigEntry, swSfpRxPower=swSfpRxPower, FapwwnType=FapwwnType, swConnUnitPortCapableSpeeds=swConnUnitPortCapableSpeeds, swSfpPoweronHrs=swSfpPoweronHrs, swPortCompression=swPortCompression, swConnUnitPortEntry=swConnUnitPortEntry, PYSNMP_MODULE_ID=faExt, EncryptCompressStatus=EncryptCompressStatus, swPortEncrypt=swPortEncrypt, swSfpUnitId=swSfpUnitId, swSfpStatEntry=swSfpStatEntry, swConnUnitPortFECMode=swConnUnitPortFECMode, swPortCipherKeySize=swPortCipherKeySize, swPortFapwwnConfigFapwwn=swPortFapwwnConfigFapwwn, swConnUnitPortSpeedMode=swConnUnitPortSpeedMode, swPortCipherMode=swPortCipherMode, swConnUnitPortFECState=swConnUnitPortFECState, swSfpTemperature=swSfpTemperature, swSfpStatTable=swSfpStatTable, swConnUnitPortTable=swConnUnitPortTable, swPortFapwwnConfigEnable=swPortFapwwnConfigEnable, swPortConfigTable=swPortConfigTable)
| 111.343511 | 1,203 | 0.777321 | #
# PySNMP MIB module FA-EXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FA-EXT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:11:49 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
connUnitPortEntry, = mibBuilder.importSymbols("FCMGMT-MIB", "connUnitPortEntry")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, ObjectIdentity, Counter64, Unsigned32, Integer32, Counter32, ModuleIdentity, IpAddress, NotificationType, Gauge32, TimeTicks, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "ObjectIdentity", "Counter64", "Unsigned32", "Integer32", "Counter32", "ModuleIdentity", "IpAddress", "NotificationType", "Gauge32", "TimeTicks", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso")
TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention")
sw, = mibBuilder.importSymbols("SW-MIB", "sw")
faExt = ModuleIdentity((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28))
faExt.setRevisions(('2010-11-22 10:30', '2013-09-12 10:30', '2013-09-24 13:55', '2013-10-29 13:54',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: faExt.setRevisionsDescriptions(('Initial version of this module.', 'Added swConnUnitPortFECMode', 'Added swConnUnitPortFECState', 'Added notsupported value for swConnUnitPortFECState',))
if mibBuilder.loadTexts: faExt.setLastUpdated('201310291354Z')
if mibBuilder.loadTexts: faExt.setOrganization('Brocade Communications Systems, Inc.,')
if mibBuilder.loadTexts: faExt.setContactInfo('Customer Support Group Brocade Communications Systems, 1745 Technology Drive, San Jose, CA 95110 U.S.A Tel: +1-408-392-6061 Fax: +1-408-392-6656 Email: support@Brocade.COM WEB: www.brocade.com')
if mibBuilder.loadTexts: faExt.setDescription('The MIB module is Extension for FA-MIB. Copyright (c) 1996-2003 Brocade Communications Systems, Inc. All rights reserved.')
swSfpStatTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1), )
if mibBuilder.loadTexts: swSfpStatTable.setStatus('current')
if mibBuilder.loadTexts: swSfpStatTable.setDescription('This represents the diagnostic stats of SFPs.')
swFapwwnFeature = ObjectIdentity((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2))
if mibBuilder.loadTexts: swFapwwnFeature.setStatus('current')
if mibBuilder.loadTexts: swFapwwnFeature.setDescription('The OID sub-tree for Fapwwn feature. Using this feature user can configure virtual port WWN for a port.')
swPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3), )
if mibBuilder.loadTexts: swPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: swPortConfigTable.setDescription('This represents the configuration of encryption / compression feature on a port')
swConnUnitPortTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4), )
if mibBuilder.loadTexts: swConnUnitPortTable.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortTable.setDescription('This represents the Conn unit Port entry')
swSfpStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swSfpStatEntry"))
swSfpStatEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swSfpStatEntry.setStatus('current')
if mibBuilder.loadTexts: swSfpStatEntry.setDescription('This represents the diagnostic stats of SFPs')
swSfpTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('centigrade').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpTemperature.setStatus('current')
if mibBuilder.loadTexts: swSfpTemperature.setDescription('This object identifies the temperature of SFP')
swSfpVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('milli voltage').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpVoltage.setStatus('current')
if mibBuilder.loadTexts: swSfpVoltage.setDescription('This object identifies the voltage of SFP.')
swSfpCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('milli amphere').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpCurrent.setStatus('current')
if mibBuilder.loadTexts: swSfpCurrent.setDescription('This object identifies the current of SFP.')
swSfpRxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpRxPower.setStatus('current')
if mibBuilder.loadTexts: swSfpRxPower.setDescription('This object identifies the Rx power consumption of SFP.')
swSfpTxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpTxPower.setStatus('current')
if mibBuilder.loadTexts: swSfpTxPower.setDescription('This object identifies the Tx power consumption of SFP.')
swSfpPoweronHrs = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 6), Integer32()).setUnits('hours').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpPoweronHrs.setStatus('current')
if mibBuilder.loadTexts: swSfpPoweronHrs.setDescription('This object identifies the power on hours of SFP. This is applicable only to 16G SFPs.')
swSfpUnitId = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpUnitId.setStatus('current')
if mibBuilder.loadTexts: swSfpUnitId.setDescription('This object identifies unit ID of SFP. This is applicable only to QSFP.')
swPortFapwwnConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1), )
if mibBuilder.loadTexts: swPortFapwwnConfigTable.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigTable.setDescription('This represents the configuration of ports.')
swPortFapwwnConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swPortFapwwnConfigEntry"))
swPortFapwwnConfigEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swPortFapwwnConfigEntry.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigEntry.setDescription('This represents the configuration of ports.')
class FapwwnType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("unknown", 1), ("auto", 2), ("userConfigured", 3))
swPortFapwwnConfigEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1, 1), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortFapwwnConfigEnable.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigEnable.setDescription('Represents the Fapwwn status. This is for per port.')
swPortFapwwnConfigFapwwn = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(256, 256)).setFixedLength(256)).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortFapwwnConfigFapwwn.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigFapwwn.setDescription('Represents the Fapwwn. For AG it is range of WWNs. If Fapwwn feature is not enabled in a port this object value is NA(Not Applicable.')
swPortFapwwnConfigType = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1, 3), FapwwnType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortFapwwnConfigType.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigType.setDescription('Represents the Fapwwn type. ')
swPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swPortConfigEntry"))
swPortConfigEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: swPortConfigEntry.setDescription('This represents the configuration of encryption / compression feature on a port')
class CiperMode(TextualConvention, Integer32):
description = 'Represents cipher mode.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("none", 1), ("allFrames", 2), ("fcpAndNonFCP", 3), ("onlyFCP", 4))
class EncryptCompressStatus(TextualConvention, Integer32):
description = 'Represents status of feature encryption or compression.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("enabled", 1), ("disabled", 2), ("unknown", 3))
swPortEncrypt = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 1), EncryptCompressStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortEncrypt.setStatus('current')
if mibBuilder.loadTexts: swPortEncrypt.setDescription('Represents the encryption status on a port.')
swPortCompression = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 2), EncryptCompressStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortCompression.setStatus('current')
if mibBuilder.loadTexts: swPortCompression.setDescription('Represents the compression status on port.')
swPortCipherKeySize = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortCipherKeySize.setStatus('current')
if mibBuilder.loadTexts: swPortCipherKeySize.setDescription('Represents the Cipher key size. FOS supports 256 bytes key')
swPortCipherMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 4), CiperMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortCipherMode.setStatus('current')
if mibBuilder.loadTexts: swPortCipherMode.setDescription('Represents the Cipher mode. ')
swConnUnitPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swConnUnitPortEntry"))
swConnUnitPortEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swConnUnitPortEntry.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortEntry.setDescription('This represents the Conn unit Port Entry')
swConnUnitPortCapableSpeeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortCapableSpeeds.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortCapableSpeeds.setDescription('This represents the available speeds, that a port is capable of configuring')
swConnUnitPortSpeedMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("auto-neg", 1), ("static", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortSpeedMode.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortSpeedMode.setDescription('This represents the type of speed modes that can be configured for the particular port. The modes that can be configured are auto-negotiable and static speeds.')
swConnUnitPortFECMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("disabled", 2), ("enabled", 3), ("notsupported", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortFECMode.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortFECMode.setDescription('This represents the port Forward Error Correction Mode. FEC feature is only applicable to 10G/16G platforms.')
swConnUnitPortFECState = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("active", 1), ("inactive", 2), ("notsupported", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortFECState.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortFECState.setDescription('This represents FEC state of a port.If in link both sfp connected are of brocade vendor type then state is active, else it will be inactive.')
mibBuilder.exportSymbols("FA-EXT-MIB", CiperMode=CiperMode, swSfpCurrent=swSfpCurrent, swSfpTxPower=swSfpTxPower, swPortFapwwnConfigType=swPortFapwwnConfigType, swPortFapwwnConfigTable=swPortFapwwnConfigTable, faExt=faExt, swFapwwnFeature=swFapwwnFeature, swPortFapwwnConfigEntry=swPortFapwwnConfigEntry, swSfpVoltage=swSfpVoltage, swPortConfigEntry=swPortConfigEntry, swSfpRxPower=swSfpRxPower, FapwwnType=FapwwnType, swConnUnitPortCapableSpeeds=swConnUnitPortCapableSpeeds, swSfpPoweronHrs=swSfpPoweronHrs, swPortCompression=swPortCompression, swConnUnitPortEntry=swConnUnitPortEntry, PYSNMP_MODULE_ID=faExt, EncryptCompressStatus=EncryptCompressStatus, swPortEncrypt=swPortEncrypt, swSfpUnitId=swSfpUnitId, swSfpStatEntry=swSfpStatEntry, swConnUnitPortFECMode=swConnUnitPortFECMode, swPortCipherKeySize=swPortCipherKeySize, swPortFapwwnConfigFapwwn=swPortFapwwnConfigFapwwn, swConnUnitPortSpeedMode=swConnUnitPortSpeedMode, swPortCipherMode=swPortCipherMode, swConnUnitPortFECState=swConnUnitPortFECState, swSfpTemperature=swSfpTemperature, swSfpStatTable=swSfpStatTable, swConnUnitPortTable=swConnUnitPortTable, swPortFapwwnConfigEnable=swPortFapwwnConfigEnable, swPortConfigTable=swPortConfigTable)
| 0 | 770 | 67 |
0a999005e044b4fd06bea86861d41cb6317f5fef | 29,957 | py | Python | research/object_detection/exporter_test.py | Zhangxu0501/models | 7c8ca1647926226556e05fdd6535a35abe3100eb | [
"Apache-2.0"
] | 3,326 | 2018-01-26T22:42:25.000Z | 2022-02-16T13:16:39.000Z | research/object_detection/exporter_test.py | wzy1510300a28/models | 42a3da72313b8814ef0ced8f425af90b57313b9f | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | research/object_detection/exporter_test.py | wzy1510300a28/models | 42a3da72313b8814ef0ced8f425af90b57313b9f | [
"Apache-2.0"
] | 2,580 | 2017-05-14T14:33:41.000Z | 2022-03-31T15:04:14.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.export_inference_graph."""
import os
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top
slim = tf.contrib.slim
if __name__ == '__main__':
tf.test.main()
| 49.515702 | 80 | 0.673198 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.export_inference_graph."""
import os
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top
slim = tf.contrib.slim
class FakeModel(model.DetectionModel):
def __init__(self, add_detection_masks=False):
self._add_detection_masks = add_detection_masks
def preprocess(self, inputs):
return tf.identity(inputs)
def predict(self, preprocessed_inputs):
return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)}
def postprocess(self, prediction_dict):
with tf.control_dependencies(prediction_dict.values()):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]], tf.float32),
'detection_scores': tf.constant([[0.7, 0.6],
[0.9, 0.0]], tf.float32),
'detection_classes': tf.constant([[0, 1],
[1, 0]], tf.float32),
'num_detections': tf.constant([2, 1], tf.float32)
}
if self._add_detection_masks:
postprocessed_tensors['detection_masks'] = tf.constant(
np.arange(64).reshape([2, 2, 4, 4]), tf.float32)
return postprocessed_tensors
def restore_map(self, checkpoint_path, from_detection_checkpoint):
pass
def loss(self, prediction_dict):
pass
class ExportInferenceGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path,
use_moving_averages):
g = tf.Graph()
with g.as_default():
mock_model = FakeModel()
preprocessed_inputs = mock_model.preprocess(
tf.placeholder(tf.float32, shape=[None, None, None, 3]))
predictions = mock_model.predict(preprocessed_inputs)
mock_model.postprocess(predictions)
if use_moving_averages:
tf.train.ExponentialMovingAverage(0.0).apply()
slim.get_or_create_global_step()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
def _load_inference_graph(self, inference_graph_path):
od_graph = tf.Graph()
with od_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(inference_graph_path) as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return od_graph
def _create_tf_example(self, image_array):
with self.test_session():
encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval()
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': _bytes_feature(encoded_image),
'image/format': _bytes_feature('jpg'),
'image/source_id': _bytes_feature('image_id')
})).SerializeToString()
return example
def test_export_graph_with_image_tensor_input(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
def test_export_graph_with_fixed_size_image_tensor_input(self):
input_shape = [1, 320, 320, 3]
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
input_shape=input_shape)
saved_model_path = os.path.join(output_directory, 'saved_model')
self.assertTrue(
os.path.exists(os.path.join(saved_model_path, 'saved_model.pb')))
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
meta_graph = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
signature = meta_graph.signature_def['serving_default']
input_tensor_name = signature.inputs['inputs'].name
image_tensor = od_graph.get_tensor_by_name(input_tensor_name)
self.assertSequenceEqual(image_tensor.get_shape().as_list(),
input_shape)
def test_export_graph_with_tf_example_input(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
def test_export_graph_with_encoded_image_string_input(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='encoded_image_string_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
def _get_variables_in_checkpoint(self, checkpoint_file):
return set([
var_name
for var_name, _ in tf.train.list_variables(checkpoint_file)])
def test_replace_variable_values_with_moving_averages(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
new_checkpoint_prefix = os.path.join(tmp_dir, 'new.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
graph = tf.Graph()
with graph.as_default():
fake_model = FakeModel()
preprocessed_inputs = fake_model.preprocess(
tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3]))
predictions = fake_model.predict(preprocessed_inputs)
fake_model.postprocess(predictions)
exporter.replace_variable_values_with_moving_averages(
graph, trained_checkpoint_prefix, new_checkpoint_prefix)
expected_variables = set(['conv2d/bias', 'conv2d/kernel'])
variables_in_old_ckpt = self._get_variables_in_checkpoint(
trained_checkpoint_prefix)
self.assertIn('conv2d/bias/ExponentialMovingAverage',
variables_in_old_ckpt)
self.assertIn('conv2d/kernel/ExponentialMovingAverage',
variables_in_old_ckpt)
variables_in_new_ckpt = self._get_variables_in_checkpoint(
new_checkpoint_prefix)
self.assertTrue(expected_variables.issubset(variables_in_new_ckpt))
self.assertNotIn('conv2d/bias/ExponentialMovingAverage',
variables_in_new_ckpt)
self.assertNotIn('conv2d/kernel/ExponentialMovingAverage',
variables_in_new_ckpt)
def test_export_graph_with_moving_averages(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = True
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
expected_variables = set(['conv2d/bias', 'conv2d/kernel', 'global_step'])
actual_variables = set(
[var_name for var_name, _ in tf.train.list_variables(output_directory)])
self.assertTrue(expected_variables.issubset(actual_variables))
def test_export_model_with_all_output_nodes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph):
inference_graph.get_tensor_by_name('image_tensor:0')
inference_graph.get_tensor_by_name('detection_boxes:0')
inference_graph.get_tensor_by_name('detection_scores:0')
inference_graph.get_tensor_by_name('detection_classes:0')
inference_graph.get_tensor_by_name('detection_masks:0')
inference_graph.get_tensor_by_name('num_detections:0')
def test_export_model_with_detection_only_nodes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=False)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph):
inference_graph.get_tensor_by_name('image_tensor:0')
inference_graph.get_tensor_by_name('detection_boxes:0')
inference_graph.get_tensor_by_name('detection_scores:0')
inference_graph.get_tensor_by_name('detection_classes:0')
inference_graph.get_tensor_by_name('num_detections:0')
with self.assertRaises(KeyError):
inference_graph.get_tensor_by_name('detection_masks:0')
def test_export_and_run_inference_with_image_tensor(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph) as sess:
image_tensor = inference_graph.get_tensor_by_name('image_tensor:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
masks = inference_graph.get_tensor_by_name('detection_masks:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes_np, scores_np, classes_np, masks_np, num_detections_np) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={image_tensor: np.ones((2, 4, 4, 3)).astype(np.uint8)})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def _create_encoded_image_string(self, image_array_np, encoding_format):
od_graph = tf.Graph()
with od_graph.as_default():
if encoding_format == 'jpg':
encoded_string = tf.image.encode_jpeg(image_array_np)
elif encoding_format == 'png':
encoded_string = tf.image.encode_png(image_array_np)
else:
raise ValueError('Supports only the following formats: `jpg`, `png`')
with self.test_session(graph=od_graph):
return encoded_string.eval()
def test_export_and_run_inference_with_encoded_image_string_tensor(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='encoded_image_string_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
jpg_image_str = self._create_encoded_image_string(
np.ones((4, 4, 3)).astype(np.uint8), 'jpg')
png_image_str = self._create_encoded_image_string(
np.ones((4, 4, 3)).astype(np.uint8), 'png')
with self.test_session(graph=inference_graph) as sess:
image_str_tensor = inference_graph.get_tensor_by_name(
'encoded_image_string_tensor:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
masks = inference_graph.get_tensor_by_name('detection_masks:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
for image_str in [jpg_image_str, png_image_str]:
image_str_batch_np = np.hstack([image_str]* 2)
(boxes_np, scores_np, classes_np, masks_np,
num_detections_np) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={image_str_tensor: image_str_batch_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_raise_runtime_error_on_images_with_different_sizes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='encoded_image_string_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
large_image = self._create_encoded_image_string(
np.ones((4, 4, 3)).astype(np.uint8), 'jpg')
small_image = self._create_encoded_image_string(
np.ones((2, 2, 3)).astype(np.uint8), 'jpg')
image_str_batch_np = np.hstack([large_image, small_image])
with self.test_session(graph=inference_graph) as sess:
image_str_tensor = inference_graph.get_tensor_by_name(
'encoded_image_string_tensor:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
masks = inference_graph.get_tensor_by_name('detection_masks:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'^TensorArray has inconsistent shapes.'):
sess.run([boxes, scores, classes, masks, num_detections],
feed_dict={image_str_tensor: image_str_batch_np})
def test_export_and_run_inference_with_tf_example(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
tf_example_np = np.expand_dims(self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8)), axis=0)
with self.test_session(graph=inference_graph) as sess:
tf_example = inference_graph.get_tensor_by_name('tf_example:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
masks = inference_graph.get_tensor_by_name('detection_masks:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes_np, scores_np, classes_np, masks_np, num_detections_np) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={tf_example: tf_example_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_export_saved_model_and_run_inference(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
tf_example_np = np.hstack([self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8))] * 2)
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
meta_graph = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
signature = meta_graph.signature_def['serving_default']
input_tensor_name = signature.inputs['inputs'].name
tf_example = od_graph.get_tensor_by_name(input_tensor_name)
boxes = od_graph.get_tensor_by_name(
signature.outputs['detection_boxes'].name)
scores = od_graph.get_tensor_by_name(
signature.outputs['detection_scores'].name)
classes = od_graph.get_tensor_by_name(
signature.outputs['detection_classes'].name)
masks = od_graph.get_tensor_by_name(
signature.outputs['detection_masks'].name)
num_detections = od_graph.get_tensor_by_name(
signature.outputs['num_detections'].name)
(boxes_np, scores_np, classes_np, masks_np,
num_detections_np) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={tf_example: tf_example_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_export_checkpoint_and_run_inference(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
output_directory = os.path.join(tmp_dir, 'output')
model_path = os.path.join(output_directory, 'model.ckpt')
meta_graph_path = model_path + '.meta'
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
tf_example_np = np.hstack([self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8))] * 2)
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
new_saver = tf.train.import_meta_graph(meta_graph_path)
new_saver.restore(sess, model_path)
tf_example = od_graph.get_tensor_by_name('tf_example:0')
boxes = od_graph.get_tensor_by_name('detection_boxes:0')
scores = od_graph.get_tensor_by_name('detection_scores:0')
classes = od_graph.get_tensor_by_name('detection_classes:0')
masks = od_graph.get_tensor_by_name('detection_masks:0')
num_detections = od_graph.get_tensor_by_name('num_detections:0')
(boxes_np, scores_np, classes_np, masks_np,
num_detections_np) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={tf_example: tf_example_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
if __name__ == '__main__':
tf.test.main()
| 28,040 | 45 | 671 |
43d9be562b85f69da1f783c9b7eee32fc6351ce2 | 878 | py | Python | pane/reader_pane.py | ddmms/JournalManagementSystem | caed0464bdb8649cba8f4d1696690fb57952b66f | [
"Apache-2.0"
] | null | null | null | pane/reader_pane.py | ddmms/JournalManagementSystem | caed0464bdb8649cba8f4d1696690fb57952b66f | [
"Apache-2.0"
] | null | null | null | pane/reader_pane.py | ddmms/JournalManagementSystem | caed0464bdb8649cba8f4d1696690fb57952b66f | [
"Apache-2.0"
] | null | null | null | from PyQt5.Qt import *
from UI.reader_interface import Ui_MainWindow
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
window = ReaderPane()
window.show()
sys.exit(app.exec_())
| 22.512821 | 45 | 0.677677 | from PyQt5.Qt import *
from UI.reader_interface import Ui_MainWindow
import sys
class ReaderPane(Ui_MainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("期刊管理系统")
self.mode = "reader"
self.setupUi(self)
self.retranslateUi(self)
def log_out(self):
self.log_out_signal.emit(self.mode)
def journal_inquiry(self):
self.to_journal_inquiry_signal.emit()
def borrow_inquiry(self):
self.to_borrow_inquiry_signal.emit()
def book_and_inquiry(self):
self.to_book_signal.emit()
log_out_signal = pyqtSignal(str)
to_journal_inquiry_signal = pyqtSignal()
to_borrow_inquiry_signal = pyqtSignal()
to_book_signal = pyqtSignal()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = ReaderPane()
window.show()
sys.exit(app.exec_())
| 346 | 306 | 23 |
6ef676221579b6e422f07297be23bebb0667aa7a | 671 | py | Python | TimeCallBack.py | PashaIanko/Covid19Classifier | ee75a2b17babb8c9701351dfaa6052afa083168f | [
"MIT"
] | null | null | null | TimeCallBack.py | PashaIanko/Covid19Classifier | ee75a2b17babb8c9701351dfaa6052afa083168f | [
"MIT"
] | 1 | 2022-01-27T13:30:38.000Z | 2022-01-27T13:30:38.000Z | TimeCallBack.py | PashaIanko/Covid19Classifier | ee75a2b17babb8c9701351dfaa6052afa083168f | [
"MIT"
] | null | null | null | import tensorflow as tf
import matplotlib.pyplot as plt
import time
| 30.5 | 73 | 0.639344 | import tensorflow as tf
import matplotlib.pyplot as plt
import time
class TimeCallBack(tf.keras.callbacks.Callback):
def __init__(self):
self.times = []
# use this value as reference to calculate cummulative time taken
# self.timetaken = time.clock()
def on_train_begin(self, logs = {}):
self.timetaken = time.clock()
def on_epoch_end(self,epoch,logs = {}):
self.times.append((epoch,time.clock() - self.timetaken))
def on_train_end(self,logs = {}):
plt.xlabel('Epoch')
plt.ylabel('Total time taken until an epoch in seconds')
plt.plot(*zip(*self.times), marker = 'o')
plt.show() | 446 | 27 | 130 |
10aea4a51cd3f3d84a77572f0197b4a8121a3db8 | 4,090 | py | Python | com/vmware/vapi/provider/filter.py | sumitrsystems/Vmware | 7705d9979bee71f02c71d63890616409044cba08 | [
"MIT"
] | null | null | null | com/vmware/vapi/provider/filter.py | sumitrsystems/Vmware | 7705d9979bee71f02c71d63890616409044cba08 | [
"MIT"
] | null | null | null | com/vmware/vapi/provider/filter.py | sumitrsystems/Vmware | 7705d9979bee71f02c71d63890616409044cba08 | [
"MIT"
] | null | null | null | """
API Provider filter
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2015, 2017 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
import abc
import six
from vmware.vapi.core import ApiProvider
from vmware.vapi.data.serializers.introspection import (
convert_data_def_to_data_value)
from vmware.vapi.lib.log import get_vapi_logger
from vmware.vapi.provider.lib import augment_method_result_with_errors
logger = get_vapi_logger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ApiProviderFilter(ApiProvider):
"""
ApiProviderFilter is a base class for all ApiProvider filters.
This handles all the common methods and also takes care of augmenting
errors reported by an ApiProvider filter.
:type next_provider: :class:`vmware.vapi.core.ApiProvider`
:ivar next_provider: Next API Provider in the chain
"""
def __init__(self, next_provider=None, errors_to_augment=None, name=None):
"""
Initialize ApiProviderFilter
:type next_provider: :class:`vmware.vapi.core.ApiProvider` or ``None``
:param next_provider: API Provider to invoke the requests
:type errors_to_augment: :class:`list` of
:class:`vmware.vapi.data.definition.ErrorDefinition` or ``None``
:param errors_to_augment: List of error definitions to be added to
method definitions
:type name: :class:`str`
:param name: The name of the filter
"""
ApiProvider.__init__(self)
self.name = name if name is not None else self.__class__.__name__
self.next_provider = next_provider
self._error_defs_to_augment = errors_to_augment or []
self._error_values_to_augment = [
convert_data_def_to_data_value(error_def)
for error_def in self._error_defs_to_augment
]
@abc.abstractmethod
def invoke(self, service_id, operation_id, input_value, ctx):
"""
Invoke an API request. Derived classes of ApiProviderFilter
should call this method to invoke the request. This can be done
by: ApiProviderFilter.invoke(self, ctx, method_id, input_value).
This method calls the next API Provider. If the request is made to
"get" operation of vAPI Operation Introspection service, errors are
augmented to the method result.
:type service_id: :class:`str`
:param service_id: Service identifier
:type operation_id: :class:`str`
:param operation_id: Operation identifier
:type input_value: :class:`vmware.vapi.data.value.StructValue`
:param input_value: Method input parameters
:type ctx: :class:`vmware.vapi.core.ExecutionContext`
:param ctx: Execution context for this method
:rtype: :class:`vmware.vapi.core.MethodResult`
:return: Result of the method invocation
"""
method_result = self.next_provider.invoke(
service_id, operation_id, input_value, ctx)
return augment_method_result_with_errors(
service_id, operation_id, method_result,
self._error_values_to_augment)
def get_api_provider(self):
"""
Get the last provider in the chain.
:rtype: :class:`vmware.vapi.core.ApiProvider`
:return: Last provider in the provider chain which is not a filter
"""
if isinstance(self.next_provider, ApiProviderFilter):
return self.next_provider.get_api_provider()
return self.next_provider
def find_first_api_filter(self, name):
"""
Get the first filter with the specified name in the provider chain
:type name: :class:`str`
:param name: Filter name
:rtype: :class:`vmware.vapi.core.ApiProviderFilter` or ``None``
:return: First filter that matches the name
"""
if self.name == name:
return self
if isinstance(self.next_provider, ApiProviderFilter):
return self.next_provider.find_first_api_filter(name)
return None
| 38.584906 | 129 | 0.682885 | """
API Provider filter
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2015, 2017 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
import abc
import six
from vmware.vapi.core import ApiProvider
from vmware.vapi.data.serializers.introspection import (
convert_data_def_to_data_value)
from vmware.vapi.lib.log import get_vapi_logger
from vmware.vapi.provider.lib import augment_method_result_with_errors
logger = get_vapi_logger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ApiProviderFilter(ApiProvider):
"""
ApiProviderFilter is a base class for all ApiProvider filters.
This handles all the common methods and also takes care of augmenting
errors reported by an ApiProvider filter.
:type next_provider: :class:`vmware.vapi.core.ApiProvider`
:ivar next_provider: Next API Provider in the chain
"""
def __init__(self, next_provider=None, errors_to_augment=None, name=None):
"""
Initialize ApiProviderFilter
:type next_provider: :class:`vmware.vapi.core.ApiProvider` or ``None``
:param next_provider: API Provider to invoke the requests
:type errors_to_augment: :class:`list` of
:class:`vmware.vapi.data.definition.ErrorDefinition` or ``None``
:param errors_to_augment: List of error definitions to be added to
method definitions
:type name: :class:`str`
:param name: The name of the filter
"""
ApiProvider.__init__(self)
self.name = name if name is not None else self.__class__.__name__
self.next_provider = next_provider
self._error_defs_to_augment = errors_to_augment or []
self._error_values_to_augment = [
convert_data_def_to_data_value(error_def)
for error_def in self._error_defs_to_augment
]
@abc.abstractmethod
def invoke(self, service_id, operation_id, input_value, ctx):
"""
Invoke an API request. Derived classes of ApiProviderFilter
should call this method to invoke the request. This can be done
by: ApiProviderFilter.invoke(self, ctx, method_id, input_value).
This method calls the next API Provider. If the request is made to
"get" operation of vAPI Operation Introspection service, errors are
augmented to the method result.
:type service_id: :class:`str`
:param service_id: Service identifier
:type operation_id: :class:`str`
:param operation_id: Operation identifier
:type input_value: :class:`vmware.vapi.data.value.StructValue`
:param input_value: Method input parameters
:type ctx: :class:`vmware.vapi.core.ExecutionContext`
:param ctx: Execution context for this method
:rtype: :class:`vmware.vapi.core.MethodResult`
:return: Result of the method invocation
"""
method_result = self.next_provider.invoke(
service_id, operation_id, input_value, ctx)
return augment_method_result_with_errors(
service_id, operation_id, method_result,
self._error_values_to_augment)
def get_api_provider(self):
"""
Get the last provider in the chain.
:rtype: :class:`vmware.vapi.core.ApiProvider`
:return: Last provider in the provider chain which is not a filter
"""
if isinstance(self.next_provider, ApiProviderFilter):
return self.next_provider.get_api_provider()
return self.next_provider
def find_first_api_filter(self, name):
"""
Get the first filter with the specified name in the provider chain
:type name: :class:`str`
:param name: Filter name
:rtype: :class:`vmware.vapi.core.ApiProviderFilter` or ``None``
:return: First filter that matches the name
"""
if self.name == name:
return self
if isinstance(self.next_provider, ApiProviderFilter):
return self.next_provider.find_first_api_filter(name)
return None
| 0 | 0 | 0 |
72a794e2f400b9940054a471ba93b1bf4b828fd0 | 3,998 | py | Python | WaveBlocksND/Interface/EnergiesWavefunction.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | 3 | 2016-09-01T21:13:54.000Z | 2020-03-23T15:45:32.000Z | WaveBlocksND/Interface/EnergiesWavefunction.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | null | null | null | WaveBlocksND/Interface/EnergiesWavefunction.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | 6 | 2016-03-16T15:22:01.000Z | 2021-03-13T14:06:54.000Z | """The WaveBlocks Project
Compute the kinetic and potential energies of a wavefunction.
@author: R. Bourquin
@copyright: Copyright (C) 2012, 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import zeros
from WaveBlocksND import BlockFactory
from WaveBlocksND import KineticOperator
from WaveBlocksND import WaveFunction
from WaveBlocksND import BasisTransformationWF
def compute_energy(iom, blockid=0, eigentrafo=True, iseigen=True):
"""
:param iom: An :py:class:`IOManager: instance providing the simulation data.
:param blockid: The data block from which the values are read. Default is `0`.
:param eigentrafo: Whether to make a transformation into the eigenbasis.
:type eigentrafo: Boolean, default is ``True``.
:param iseigen: Whether the data is assumed to be in the eigenbasis.
:type iseigen: Boolean, default is ``True``
"""
parameters = iom.load_parameters()
# Number of time steps we saved
timesteps = iom.load_wavefunction_timegrid(blockid=blockid)
nrtimesteps = timesteps.shape[0]
# Construct grid from the parameters
grid = BlockFactory().create_grid(parameters)
# The potential used
Potential = BlockFactory().create_potential(parameters)
# The operators
KO = KineticOperator(grid)
KO.calculate_operator(parameters["eps"])
opT = KO
if eigentrafo is True:
opV = Potential.evaluate_at(grid)
else:
if iseigen is True:
opV = Potential.evaluate_eigenvalues_at(grid, as_matrix=True)
else:
opV = Potential.evaluate_at(grid, as_matrix=True)
# Basis transformator
if eigentrafo is True:
BT = BasisTransformationWF(Potential)
BT.set_grid(grid)
# And two empty wavefunctions
WF = WaveFunction(parameters)
WF.set_grid(grid)
WF2 = WaveFunction(parameters)
WF2.set_grid(grid)
# We want to save norms, thus add a data slot to the data file
iom.add_energy(parameters, timeslots=nrtimesteps, blockid=blockid)
nst = Potential.get_number_components()
if eigentrafo is True:
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Computing energies of timestep %d" % step)
# Retrieve simulation data
values = iom.load_wavefunction(timestep=step, blockid=blockid)
values = [values[j, ...] for j in range(parameters["ncomponents"])]
WF.set_values(values)
# Project wavefunction values to eigenbasis
BT.transform_to_eigen(WF)
ekinlist = []
epotlist = []
# For each component of |Psi>
values = WF.get_values()
for index, item in enumerate(values):
# tmp is the Vector (0, 0, 0, \psi_i, 0, 0, ...)
tmp = [zeros(item.shape) for z in range(nst)]
tmp[index] = item
WF2.set_values(tmp)
# Project this vector to the canonical basis
BT.transform_to_canonical(WF2)
# And calculate the energies of these components
ekinlist.append(WF2.kinetic_energy(opT, summed=True))
epotlist.append(WF2.potential_energy(opV, summed=True))
iom.save_energy((ekinlist, epotlist), timestep=step, blockid=blockid)
else:
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Computing energies of timestep %d" % step)
# Retrieve simulation data
values = iom.load_wavefunction(timestep=step, blockid=blockid)
values = [values[j, ...] for j in range(parameters["ncomponents"])]
WF.set_values(values)
# And calculate the energies of these components
ekinlist = WF.kinetic_energy(opT, summed=False)
epotlist = WF.potential_energy(opV, summed=False)
iom.save_energy((ekinlist, epotlist), timestep=step, blockid=blockid)
| 33.596639 | 82 | 0.650075 | """The WaveBlocks Project
Compute the kinetic and potential energies of a wavefunction.
@author: R. Bourquin
@copyright: Copyright (C) 2012, 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import zeros
from WaveBlocksND import BlockFactory
from WaveBlocksND import KineticOperator
from WaveBlocksND import WaveFunction
from WaveBlocksND import BasisTransformationWF
def compute_energy(iom, blockid=0, eigentrafo=True, iseigen=True):
"""
:param iom: An :py:class:`IOManager: instance providing the simulation data.
:param blockid: The data block from which the values are read. Default is `0`.
:param eigentrafo: Whether to make a transformation into the eigenbasis.
:type eigentrafo: Boolean, default is ``True``.
:param iseigen: Whether the data is assumed to be in the eigenbasis.
:type iseigen: Boolean, default is ``True``
"""
parameters = iom.load_parameters()
# Number of time steps we saved
timesteps = iom.load_wavefunction_timegrid(blockid=blockid)
nrtimesteps = timesteps.shape[0]
# Construct grid from the parameters
grid = BlockFactory().create_grid(parameters)
# The potential used
Potential = BlockFactory().create_potential(parameters)
# The operators
KO = KineticOperator(grid)
KO.calculate_operator(parameters["eps"])
opT = KO
if eigentrafo is True:
opV = Potential.evaluate_at(grid)
else:
if iseigen is True:
opV = Potential.evaluate_eigenvalues_at(grid, as_matrix=True)
else:
opV = Potential.evaluate_at(grid, as_matrix=True)
# Basis transformator
if eigentrafo is True:
BT = BasisTransformationWF(Potential)
BT.set_grid(grid)
# And two empty wavefunctions
WF = WaveFunction(parameters)
WF.set_grid(grid)
WF2 = WaveFunction(parameters)
WF2.set_grid(grid)
# We want to save norms, thus add a data slot to the data file
iom.add_energy(parameters, timeslots=nrtimesteps, blockid=blockid)
nst = Potential.get_number_components()
if eigentrafo is True:
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Computing energies of timestep %d" % step)
# Retrieve simulation data
values = iom.load_wavefunction(timestep=step, blockid=blockid)
values = [values[j, ...] for j in range(parameters["ncomponents"])]
WF.set_values(values)
# Project wavefunction values to eigenbasis
BT.transform_to_eigen(WF)
ekinlist = []
epotlist = []
# For each component of |Psi>
values = WF.get_values()
for index, item in enumerate(values):
# tmp is the Vector (0, 0, 0, \psi_i, 0, 0, ...)
tmp = [zeros(item.shape) for z in range(nst)]
tmp[index] = item
WF2.set_values(tmp)
# Project this vector to the canonical basis
BT.transform_to_canonical(WF2)
# And calculate the energies of these components
ekinlist.append(WF2.kinetic_energy(opT, summed=True))
epotlist.append(WF2.potential_energy(opV, summed=True))
iom.save_energy((ekinlist, epotlist), timestep=step, blockid=blockid)
else:
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Computing energies of timestep %d" % step)
# Retrieve simulation data
values = iom.load_wavefunction(timestep=step, blockid=blockid)
values = [values[j, ...] for j in range(parameters["ncomponents"])]
WF.set_values(values)
# And calculate the energies of these components
ekinlist = WF.kinetic_energy(opT, summed=False)
epotlist = WF.potential_energy(opV, summed=False)
iom.save_energy((ekinlist, epotlist), timestep=step, blockid=blockid)
| 0 | 0 | 0 |
88be7ca8e683cde69ec490542e2e34689e718497 | 526 | py | Python | main.py | Truta446/tibia-crawler | b08f8ee7c4c9d5cd772d939b396938ebac68266e | [
"MIT"
] | null | null | null | main.py | Truta446/tibia-crawler | b08f8ee7c4c9d5cd772d939b396938ebac68266e | [
"MIT"
] | null | null | null | main.py | Truta446/tibia-crawler | b08f8ee7c4c9d5cd772d939b396938ebac68266e | [
"MIT"
] | null | null | null | import argparse
from importlib import import_module
if __name__ == "__main__":
main() | 26.3 | 70 | 0.71673 | import argparse
from importlib import import_module
def main():
module = _importModule()
parsed = _getFlag()
return module.Character(parsed.name)
def _importModule():
return import_module("tibia")
def _getFlag():
parser = argparse.ArgumentParser(description="Chamado Crawler")
subparser = parser.add_subparsers()
crawler = subparser.add_parser("crawler")
crawler.add_argument("--name", help="Nome do personagem do tibia")
return parser.parse_args()
if __name__ == "__main__":
main() | 367 | 0 | 69 |
8eba5dac7af060048aa386fd729dad3519619fe6 | 41,528 | py | Python | dim.py | zmwangx/dim | 899f777eb892fc9d3d5a116bd54f6840b3923b33 | [
"WTFPL"
] | 7 | 2018-10-16T22:27:54.000Z | 2022-02-28T02:34:44.000Z | dim.py | zmwangx/dim | 899f777eb892fc9d3d5a116bd54f6840b3923b33 | [
"WTFPL"
] | null | null | null | dim.py | zmwangx/dim | 899f777eb892fc9d3d5a116bd54f6840b3923b33 | [
"WTFPL"
] | 1 | 2020-09-11T21:39:04.000Z | 2020-09-11T21:39:04.000Z | """
:mod:`dim` is an HTML parser and simple DOM implementation with CSS
selector support.
:mod:`dim`
- is a single module;
- has no dependency outside `PSL <https://docs.python.org/3/library/>`_;
- is not crazy long;
- supports Python 3.6 and forward,
so the file could be directly embedded in any Python 3.4+ application,
or even in a monolithic source file. :mod:`dim` was designed to ease the
development of `googler(1) <https://github.com/jarun/googler/>`_, which
itself promises to be a single Python script with zero third-party dep.
Simple example:
.. doctest::
>>> import dim
>>> html = '''
... <html>
... <body>
... <table id="primary">
... <thead>
... <tr><th class="bold">A</th><th>B</th></tr>
... </thead>
... <tbody>
... <tr class="highlight"><td class="bold">1</td><td>2</td></tr>
... <tr><td class="bold">3</td><td>4</td></tr>
... <tr><td class="bold">5</td><td>6</td></tr>
... <tr><td class="bold">7</td><td>8</td></tr>
... </tbody>
... </table>
... <table id="secondary">
... <thead>
... <tr><th class="bold">C</th><th>D</th></tr>
... </thead>
... <tbody></tbody>
... </table>
... </body>
... </html>'''
>>> root = dim.parse_html(html)
>>> [elem.text for elem in root.select_all('table#primary th.bold, '
... 'table#primary tr.highlight + tr > td.bold')]
['A', '3']
>>> [elem.text for elem in root.select_all('table#primary th.bold, '
... 'table#primary tr.highlight ~ tr > td.bold')]
['A', '3', '5', '7']
>>> [elem.text for elem in root.select_all('th.bold, tr.highlight ~ tr > td.bold')]
['A', '3', '5', '7', 'C']
"""
import html
import re
from collections import OrderedDict
from enum import Enum
from html.parser import HTMLParser
from typing import (
Any,
Dict,
Generator,
Iterable,
Iterator,
List,
Match,
Optional,
Sequence,
Tuple,
Union,
cast,
)
SelectorGroupLike = Union[str, "SelectorGroup", "Selector"]
class Node(object):
"""
Represents a DOM node.
Parts of JavaScript's DOM ``Node`` API and ``Element`` API are
mirrored here, with extensions. In particular, ``querySelector`` and
``querySelectorAll`` are mirrored.
Notable properties and methods: :meth:`attr()`, :attr:`classes`,
:attr:`html`, :attr:`text`, :meth:`ancestors()`,
:meth:`descendants()`, :meth:`select()`, :meth:`select_all()`,
:meth:`matched_by()`,
Attributes:
tag (:class:`Optional`\\[:class:`str`])
attrs (:class:`Dict`\\[:class:`str`, :class:`str`])
parent (:class:`Optional`\\[:class:`Node`])
children (:class:`List`\\[:class:`Node`])
"""
# Meant to be reimplemented by subclasses.
# HTML representation of the node. Meant to be implemented by
# subclasses.
def select(self, selector: SelectorGroupLike) -> Optional["Node"]:
"""DOM ``querySelector`` clone. Returns one match (if any)."""
selector = self._normalize_selector(selector)
for node in self._select_all(selector):
return node
return None
def query_selector(self, selector: SelectorGroupLike) -> Optional["Node"]:
"""Alias of :meth:`select`."""
return self.select(selector)
def select_all(self, selector: SelectorGroupLike) -> List["Node"]:
"""DOM ``querySelectorAll`` clone. Returns all matches in a list."""
selector = self._normalize_selector(selector)
return list(self._select_all(selector))
def query_selector_all(self, selector: SelectorGroupLike) -> List["Node"]:
"""Alias of :meth:`select_all`."""
return self.select_all(selector)
def matched_by(
self, selector: SelectorGroupLike, root: Optional["Node"] = None
) -> bool:
"""
Checks whether this node is matched by `selector`.
See :meth:`SelectorGroup.matches()`.
"""
selector = self._normalize_selector(selector)
return selector.matches(self, root=root)
@staticmethod
def next_sibling(self) -> Optional["Node"]:
""".. note:: Not O(1), use with caution."""
next_siblings = self.next_siblings()
if next_siblings:
return next_siblings[0]
else:
return None
def next_element_sibling(self) -> Optional["ElementNode"]:
""".. note:: Not O(1), use with caution."""
for sibling in self.next_siblings():
if isinstance(sibling, ElementNode):
return sibling
return None
def previous_sibling(self) -> Optional["Node"]:
""".. note:: Not O(1), use with caution."""
previous_siblings = self.previous_siblings()
if previous_siblings:
return previous_siblings[0]
else:
return None
def previous_siblings(self) -> List["Node"]:
"""
Compared to the natural DOM order, the order of returned nodes
are reversed. That is, the adjacent sibling (if any) is the
first in the returned list.
"""
parent = self.parent
if not parent:
return []
try:
index = parent.children.index(self)
if index > 0:
return parent.children[index - 1 :: -1]
else:
return []
except ValueError: # pragma: no cover
raise ValueError("node is not found in children of its parent")
def previous_element_sibling(self) -> Optional["ElementNode"]:
""".. note:: Not O(1), use with caution."""
for sibling in self.previous_siblings():
if isinstance(sibling, ElementNode):
return sibling
return None
def ancestors(
self, *, root: Optional["Node"] = None
) -> Generator["Node", None, None]:
"""
Ancestors are generated in reverse order of depth, stopping at
`root`.
A :class:`RuntimeException` is raised if `root` is not in the
ancestral chain.
"""
if self is root:
return
ancestor = self.parent
while ancestor is not root:
if ancestor is None:
raise RuntimeError("provided root node not found in ancestral chain")
yield ancestor
ancestor = ancestor.parent
if root:
yield root
def descendants(self) -> Generator["Node", None, None]:
"""Descendants are generated in depth-first order."""
for child in self.children:
yield child
yield from child.descendants()
def attr(self, attr: str) -> Optional[str]:
"""Returns the attribute if it exists on the node, otherwise ``None``."""
return self.attrs.get(attr)
@property
def html(self) -> str:
"""
HTML representation of the node.
(For a :class:`TextNode`, :meth:`html` returns the escaped version of the
text.
"""
return str(self)
def outer_html(self) -> str:
"""Alias of :attr:`html`."""
return self.html
def inner_html(self) -> str:
"""HTML representation of the node's children."""
return "".join(child.html for child in self.children)
@property
def text(self) -> str: # pragma: no cover
"""This property is expected to be implemented by subclasses."""
raise NotImplementedError
def text_content(self) -> str:
"""Alias of :attr:`text`."""
return self.text
@property
class ElementNode(Node):
"""
Represents an element node.
Note that tag and attribute names are case-insensitive; attribute
values are case-sensitive.
"""
# https://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html
def __str__(self) -> str:
"""HTML representation of the node."""
s = "<" + self.tag
for attr, val in self.attrs.items():
s += ' %s="%s"' % (attr, html.escape(val))
if self.children:
s += ">"
s += "".join(str(child) for child in self.children)
s += "</%s>" % self.tag
else:
if _tag_is_void(self.tag):
s += "/>"
else:
s += "></%s>" % self.tag
return s
@property
def text(self) -> str:
"""The concatenation of all descendant text nodes."""
return "".join(child.text for child in self.children)
class TextNode(str, Node):
"""
Represents a text node.
Subclasses :class:`Node` and :class:`str`.
"""
# HTML-escaped form of the text node. use text() for unescaped
# version.
def __eq__(self, other: object) -> bool:
"""
Two text nodes are equal if and only if they are the same node.
For string comparison, use :attr:`text`.
"""
return self is other
def __ne__(self, other: object) -> bool:
"""
Two text nodes are non-equal if they are not the same node.
For string comparison, use :attr:`text`.
"""
return self is not other
@property
class DOMBuilderException(Exception):
"""
Exception raised when :class:`DOMBuilder` detects a bad state.
Attributes:
pos (:class:`Tuple`\\[:class:`int`, :class:`int`]):
Line number and offset in HTML input.
why (:class:`str`):
Reason of the exception.
"""
class DOMBuilder(HTMLParser):
"""
HTML parser / DOM builder.
Subclasses :class:`html.parser.HTMLParser`.
Consume HTML and builds a :class:`Node` tree. Once finished, use
:attr:`root` to access the root of the tree.
This parser cannot parse malformed HTML with tag mismatch.
"""
# Make parser behavior for explicitly and implicitly void elements
# (e.g., <hr> vs <hr/>) consistent. The former triggers
# handle_starttag only, whereas the latter triggers
# handle_startendtag (which by default triggers both handle_starttag
# and handle_endtag). See https://bugs.python.org/issue25258.
#
# An exception is foreign elements, which aren't considered void
# elements but can be explicitly marked as self-closing according to
# the HTML spec (e.g. <path/> is valid but <path> is not).
# Therefore, both handle_starttag and handle_endtag must be called,
# and handle_endtag should not be triggered from within
# handle_starttag in that case.
#
# Note that for simplicity we do not check whether the foreign
# element in question is allowed to be self-closing by spec. (The
# SVG spec unfortunately doesn't provide a readily available list of
# such elements.)
#
# https://html.spec.whatwg.org/multipage/syntax.html#foreign-elements
@property
def root(self) -> "Node":
"""
Finishes processing and returns the root node.
Raises :class:`DOMBuilderException` if there is no root tag or
root tag is not closed yet.
"""
if not self._stack:
raise DOMBuilderException(self.getpos(), "no root tag")
if self._stack[0]._partial:
raise DOMBuilderException(self.getpos(), "root tag not closed yet")
return self._stack[0]
def parse_html(html: str, *, ParserClass: type = DOMBuilder) -> "Node":
"""
Parses HTML string, builds DOM, and returns root node.
The parser may raise :class:`DOMBuilderException`.
Args:
html: input HTML string
ParserClass: :class:`DOMBuilder` or a subclass
Returns:
Root note of the parsed tree. If the HTML string contains
multiple top-level elements, only the first is returned and the
rest are lost.
"""
builder = ParserClass() # type: DOMBuilder
builder.feed(html)
builder.close()
return builder.root
class SelectorParserException(Exception):
"""
Exception raised when the selector parser fails to parse an input.
Attributes:
s (:class:`str`):
The input string to be parsed.
cursor (:class:`int`):
Cursor position where the failure occurred.
why (:class:`str`):
Reason of the failure.
"""
class SelectorGroup:
"""
Represents a group of CSS selectors.
A group of CSS selectors is simply a comma-separated list of
selectors. [#]_ See :class:`Selector` documentation for the scope of
support.
Typically, a :class:`SelectorGroup` is constructed from a string
(e.g., ``th.center, td.center``) using the factory function
:meth:`from_str`.
.. [#] https://www.w3.org/TR/selectors-3/#grouping
"""
@classmethod
def from_str(cls, s: str) -> "SelectorGroup":
"""
Parses input string into a group of selectors.
:class:`SelectorParserException` is raised on invalid input. See
:class:`Selector` documentation for the scope of support.
Args:
s: input string
Returns:
Parsed group of selectors.
"""
i = 0
selectors = []
while i < len(s):
selector, i = Selector.from_str(s, i)
selectors.append(selector)
if not selectors:
raise SelectorParserException(s, i, "selector group is empty")
return cls(selectors)
def matches(self, node: "Node", root: Optional["Node"] = None) -> bool:
"""
Decides whether the group of selectors matches `node`.
The group of selectors matches `node` as long as one of the
selectors matches `node`.
If `root` is provided and child and/or descendant combinators
are involved, parent/ancestor lookup terminates at `root`.
"""
return any(selector.matches(node, root=root) for selector in self)
class Selector:
"""
Represents a CSS selector.
Recall that a CSS selector is a chain of one or more *sequences of
simple selectors* separated by *combinators*. [#selectors-3]_ This
concept is represented as a cons list of sequences of simple
selectors (in right to left order). This class in fact holds a
single sequence, with an optional combinator and reference to the
previous sequence.
For instance, ``main#main p.important.definition >
a.term[id][href]`` would be parsed into (schematically) the
following structure::
">" tag='a' classes=('term') attrs=([id], [href]) ~>
" " tag='p' classes=('important', 'definition') ~>
tag='main' id='main'
Each line is held in a separate instance of :class:`Selector`,
linked together by the :attr:`previous` attribute.
Supported grammar (from selectors level 3 [#selectors-3]_):
- Type selectors;
- Universal selectors;
- Class selectors;
- ID selectors;
- Attribute selectors;
- Combinators.
Unsupported grammar:
- Pseudo-classes;
- Pseudo-elements;
- Namespace prefixes (``ns|``, ``*|``, ``|``) in any part of any
selector.
Rationale:
- Pseudo-classes have too many variants, a few of which even
complete with an admittedly not-so-complex minilanguage. These add
up to a lot of code.
- Pseudo-elements are useless outside rendering contexts, hence out of
scope.
- Namespace support is too niche to be worth the parsing headache.
*Using namespace prefixes may confuse the parser!*
Note that the parser only loosely follows the spec and priotizes
ease of parsing (which includes readability and *writability* of
regexes), so some invalid selectors may be accepted (in fact, false
positives abound, but accepting valid inputs is a much more
important goal than rejecting invalid inputs for this library), and
some valid selectors may be rejected (but as long as you stick to
the scope outlined above and common sense you should be fine; the
false negatives shouldn't be used by actual human beings anyway).
In particular, whitespace character is simplified to ``\\s`` (ASCII
mode) despite CSS spec not counting U+000B (VT) as whitespace,
identifiers are simplified to ``[\\w-]+`` (ASCII mode), and strings
(attribute selector values can be either identifiers or strings)
allow escaped quotes (i.e., ``\\'`` inside single-quoted strings and
``\\"`` inside double-quoted strings) but everything else is
interpreted literally. The exact specs for CSS identifiers and
strings can be found at [#]_.
Certain selectors and combinators may be implemented in the parser
but not implemented in matching and/or selection APIs.
.. [#selectors-3] https://www.w3.org/TR/selectors-3/
.. [#] https://www.w3.org/TR/CSS21/syndata.html
Attributes:
tag (:class:`Optional`\\[:class:`str`]):
Type selector.
classes (:class:`List`\\[:class:`str`]):
Class selectors.
id (:class:`Optional`\\[:class:`str`]):
ID selector.
attrs (:class:`List`\\[:class:`AttributeSelector`]):
Attribute selectors.
combinator (:class:`Optional`\\[:class:`Combinator`]):
Combinator with the previous sequence of simple selectors in
chain.
previous (:class:`Optional`\\[:class:`Selector`]):
Reference to the previous sequence of simple selectors in
chain.
"""
# Format a single sequence of simple selectors, without combinator.
@classmethod
def from_str(cls, s: str, cursor: int = 0) -> Tuple["Selector", int]:
"""
Parses input string into selector.
This factory function only parses out one selector (up to a
comma or EOS), so partial consumption is allowed --- an optional
`cursor` is taken as input (0 by default) and the moved cursor
(either after the comma or at EOS) is returned as part of the
output.
:class:`SelectorParserException` is raised on invalid input. See
:class:`Selector` documentation for the scope of support.
If you need to completely consume a string representing
(potentially) a group of selectors, use
:meth:`SelectorGroup.from_str()`.
Args:
s: input string
cursor: initial cursor position on `s`
Returns:
A tuple containing the parsed selector and the moved the
cursor (either after a comma-delimiter, or at EOS).
"""
# Simple selectors.
TYPE_SEL = re.compile(r"[\w-]+", re.A)
UNIVERSAL_SEL = re.compile(r"\*")
ATTR_SEL = re.compile(
r"""\[
\s*(?P<attr>[\w-]+)\s*
(
(?P<op>[~|^$*]?=)\s*
(
(?P<val_identifier>[\w-]+)|
(?P<val_string>
(?P<quote>['"])
(?P<val_string_inner>.*?)
(?<!\\)(?P=quote)
)
)\s*
)?
\]""",
re.A | re.X,
)
CLASS_SEL = re.compile(r"\.([\w-]+)", re.A)
ID_SEL = re.compile(r"#([\w-]+)", re.A)
PSEUDO_CLASS_SEL = re.compile(r":[\w-]+(\([^)]+\))?", re.A)
PSEUDO_ELEM_SEL = re.compile(r"::[\w-]+", re.A)
# Combinators
DESCENDANT_COM = re.compile(r"\s+")
CHILD_COM = re.compile(r"\s*>\s*")
NEXT_SIB_COM = re.compile(r"\s*\+\s*")
SUB_SIB_COM = re.compile(r"\s*~\s*")
# Misc
WHITESPACE = re.compile(r"\s*")
END_OF_SELECTOR = re.compile(r"\s*($|,)")
tag = None
classes = []
id = None
attrs = []
combinator = None
selector = None
previous_combinator = None
i = cursor
# Skip leading whitespace
m = WHITESPACE.match(s, i)
if m:
i = m.end()
while i < len(s):
# Parse one simple selector.
#
# PEP 572 (assignment expressions; the one that burned Guido
# so much that he resigned as BDFL) would have been nice; it
# would have saved us from all the regex match
# reassignments, and worse still, the casts, since mypy
# complains about getting Optional[Match[str]] instead of
# Match[str].
if TYPE_SEL.match(s, i):
if tag:
raise SelectorParserException(s, i, "multiple type selectors found")
m = cast(Match[str], TYPE_SEL.match(s, i))
tag = m.group()
elif UNIVERSAL_SEL.match(s, i):
m = cast(Match[str], UNIVERSAL_SEL.match(s, i))
elif ATTR_SEL.match(s, i):
m = cast(Match[str], ATTR_SEL.match(s, i))
attr = m.group("attr")
op = m.group("op")
val_identifier = m.group("val_identifier")
quote = m.group("quote")
val_string_inner = m.group("val_string_inner")
if val_identifier is not None:
val = val_identifier
elif val_string_inner is not None:
val = val_string_inner.replace("\\" + quote, quote)
else:
val = None
if op is None:
type = AttributeSelectorType.BARE
elif op == "=":
type = AttributeSelectorType.EQUAL
elif op == "~=":
type = AttributeSelectorType.TILDE
elif op == "|=":
type = AttributeSelectorType.PIPE
elif op == "^=":
type = AttributeSelectorType.CARET
elif op == "$=":
type = AttributeSelectorType.DOLLAR
elif op == "*=":
type = AttributeSelectorType.ASTERISK
else: # pragma: no cover
raise SelectorParserException(
s,
i,
"unrecognized operator %s in attribute selector" % repr(op),
)
attrs.append(AttributeSelector(attr, val, type))
elif CLASS_SEL.match(s, i):
m = cast(Match[str], CLASS_SEL.match(s, i))
classes.append(m.group(1))
elif ID_SEL.match(s, i):
if id:
raise SelectorParserException(s, i, "multiple id selectors found")
m = cast(Match[str], ID_SEL.match(s, i))
id = m.group(1)
elif PSEUDO_CLASS_SEL.match(s, i):
raise SelectorParserException(s, i, "pseudo-classes not supported")
elif PSEUDO_ELEM_SEL.match(s, i):
raise SelectorParserException(s, i, "pseudo-elements not supported")
else:
raise SelectorParserException(
s, i, "expecting simple selector, found none"
)
i = m.end()
# Try to parse a combinator, or end the selector.
if CHILD_COM.match(s, i):
m = cast(Match[str], CHILD_COM.match(s, i))
combinator = Combinator.CHILD
elif NEXT_SIB_COM.match(s, i):
m = cast(Match[str], NEXT_SIB_COM.match(s, i))
combinator = Combinator.NEXT_SIBLING
elif SUB_SIB_COM.match(s, i):
m = cast(Match[str], SUB_SIB_COM.match(s, i))
combinator = Combinator.SUBSEQUENT_SIBLING
elif END_OF_SELECTOR.match(s, i):
m = cast(Match[str], END_OF_SELECTOR.match(s, i))
combinator = None
# Need to parse descendant combinator at the very end
# because it could be a prefix to all previous cases.
elif DESCENDANT_COM.match(s, i):
m = cast(Match[str], DESCENDANT_COM.match(s, i))
combinator = Combinator.DESCENDANT
else:
continue
i = m.end()
if combinator and i == len(s):
raise SelectorParserException(s, i, "unexpected end at combinator")
selector = cls(
tag=tag,
classes=classes,
id=id,
attrs=attrs,
combinator=previous_combinator,
previous=selector,
)
previous_combinator = combinator
# End of selector.
if combinator is None:
break
tag = None
classes = []
id = None
attrs = []
combinator = None
if not selector:
raise SelectorParserException(s, i, "selector is empty")
return selector, i
def matches(self, node: "Node", root: Optional["Node"] = None) -> bool:
"""
Decides whether the selector matches `node`.
Each sequence of simple selectors in the selector's chain must
be matched for a positive.
If `root` is provided and child and/or descendant combinators
are involved, parent/ancestor lookup terminates at `root`.
"""
if self.tag:
if not node.tag or node.tag != self.tag:
return False
if self.id:
if node.attrs.get("id") != self.id:
return False
if self.classes:
classes = node.classes
for class_ in self.classes:
if class_ not in classes:
return False
if self.attrs:
for attr_selector in self.attrs:
if not attr_selector.matches(node):
return False
if not self.previous:
return True
if self.combinator == Combinator.DESCENDANT:
return any(
self.previous.matches(ancestor, root=root)
for ancestor in node.ancestors()
)
elif self.combinator == Combinator.CHILD:
if node is root or node.parent is None:
return False
else:
return self.previous.matches(node.parent)
elif self.combinator == Combinator.NEXT_SIBLING:
sibling = node.previous_element_sibling()
if not sibling:
return False
else:
return self.previous.matches(sibling)
elif self.combinator == Combinator.SUBSEQUENT_SIBLING:
return any(
self.previous.matches(sibling, root=root)
for sibling in node.previous_siblings()
if isinstance(sibling, ElementNode)
)
else: # pragma: no cover
raise RuntimeError("unimplemented combinator: %s" % repr(self.combinator))
class AttributeSelector:
"""
Represents an attribute selector.
Attributes:
attr (:class:`str`)
val (:class:`Optional`\\[:class:`str`])
type (:class:`AttributeSelectorType`)
"""
# Enum: basis for poor man's algebraic data type.
class AttributeSelectorType(Enum):
"""
Attribute selector types.
Members correspond to the following forms of attribute selector:
- :attr:`BARE`: ``[attr]``;
- :attr:`EQUAL`: ``[attr=val]``;
- :attr:`TILDE`: ``[attr~=val]``;
- :attr:`PIPE`: ``[attr|=val]``;
- :attr:`CARET`: ``[attr^=val]``;
- :attr:`DOLLAR`: ``[attr$=val]``;
- :attr:`ASTERISK`: ``[attr*=val]``.
"""
# [attr]
BARE = 1
# [attr=val]
EQUAL = 2
# [attr~=val]
TILDE = 3
# [attr|=val]
PIPE = 4
# [attr^=val]
CARET = 5
# [attr$=val]
DOLLAR = 6
# [attr*=val]
ASTERISK = 7
class Combinator(Enum):
"""
Combinator types.
Members correspond to the following combinators:
- :attr:`DESCENDANT`: ``A B``;
- :attr:`CHILD`: ``A > B``;
- :attr:`NEXT_SIBLING`: ``A + B``;
- :attr:`SUBSEQUENT_SIBLING`: ``A ~ B``.
"""
# ' '
DESCENDANT = 1
# >
CHILD = 2
# +
NEXT_SIBLING = 3
# ~
SUBSEQUENT_SIBLING = 4
def _tag_is_void(tag: str) -> bool:
"""
Checks whether the tag corresponds to a void element.
https://www.w3.org/TR/html5/syntax.html#void-elements
https://html.spec.whatwg.org/multipage/syntax.html#void-elements
"""
return tag.lower() in (
"area",
"base",
"br",
"col",
"embed",
"hr",
"img",
"input",
"link",
"meta",
"param",
"source",
"track",
"wbr",
)
def _tag_encloses_foreign_namespace(tag: str) -> bool:
"""
Checks whether the tag encloses a foreign namespace (MathML or SVG).
https://html.spec.whatwg.org/multipage/syntax.html#foreign-elements
"""
return tag.lower() in ("math", "svg")
| 33.707792 | 91 | 0.564149 | """
:mod:`dim` is an HTML parser and simple DOM implementation with CSS
selector support.
:mod:`dim`
- is a single module;
- has no dependency outside `PSL <https://docs.python.org/3/library/>`_;
- is not crazy long;
- supports Python 3.6 and forward,
so the file could be directly embedded in any Python 3.4+ application,
or even in a monolithic source file. :mod:`dim` was designed to ease the
development of `googler(1) <https://github.com/jarun/googler/>`_, which
itself promises to be a single Python script with zero third-party dep.
Simple example:
.. doctest::
>>> import dim
>>> html = '''
... <html>
... <body>
... <table id="primary">
... <thead>
... <tr><th class="bold">A</th><th>B</th></tr>
... </thead>
... <tbody>
... <tr class="highlight"><td class="bold">1</td><td>2</td></tr>
... <tr><td class="bold">3</td><td>4</td></tr>
... <tr><td class="bold">5</td><td>6</td></tr>
... <tr><td class="bold">7</td><td>8</td></tr>
... </tbody>
... </table>
... <table id="secondary">
... <thead>
... <tr><th class="bold">C</th><th>D</th></tr>
... </thead>
... <tbody></tbody>
... </table>
... </body>
... </html>'''
>>> root = dim.parse_html(html)
>>> [elem.text for elem in root.select_all('table#primary th.bold, '
... 'table#primary tr.highlight + tr > td.bold')]
['A', '3']
>>> [elem.text for elem in root.select_all('table#primary th.bold, '
... 'table#primary tr.highlight ~ tr > td.bold')]
['A', '3', '5', '7']
>>> [elem.text for elem in root.select_all('th.bold, tr.highlight ~ tr > td.bold')]
['A', '3', '5', '7', 'C']
"""
import html
import re
from collections import OrderedDict
from enum import Enum
from html.parser import HTMLParser
from typing import (
Any,
Dict,
Generator,
Iterable,
Iterator,
List,
Match,
Optional,
Sequence,
Tuple,
Union,
cast,
)
SelectorGroupLike = Union[str, "SelectorGroup", "Selector"]
class Node(object):
"""
Represents a DOM node.
Parts of JavaScript's DOM ``Node`` API and ``Element`` API are
mirrored here, with extensions. In particular, ``querySelector`` and
``querySelectorAll`` are mirrored.
Notable properties and methods: :meth:`attr()`, :attr:`classes`,
:attr:`html`, :attr:`text`, :meth:`ancestors()`,
:meth:`descendants()`, :meth:`select()`, :meth:`select_all()`,
:meth:`matched_by()`,
Attributes:
tag (:class:`Optional`\\[:class:`str`])
attrs (:class:`Dict`\\[:class:`str`, :class:`str`])
parent (:class:`Optional`\\[:class:`Node`])
children (:class:`List`\\[:class:`Node`])
"""
# Meant to be reimplemented by subclasses.
def __init__(self) -> None:
self.tag = None # type: Optional[str]
self.attrs = {} # type: Dict[str, str]
self.parent = None # type: Optional[Node]
self.children = [] # type: List[Node]
# Used in DOMBuilder.
self._partial = False
self._namespace = None # type: Optional[str]
# HTML representation of the node. Meant to be implemented by
# subclasses.
def __str__(self) -> str: # pragma: no cover
raise NotImplementedError
def select(self, selector: SelectorGroupLike) -> Optional["Node"]:
"""DOM ``querySelector`` clone. Returns one match (if any)."""
selector = self._normalize_selector(selector)
for node in self._select_all(selector):
return node
return None
def query_selector(self, selector: SelectorGroupLike) -> Optional["Node"]:
"""Alias of :meth:`select`."""
return self.select(selector)
def select_all(self, selector: SelectorGroupLike) -> List["Node"]:
"""DOM ``querySelectorAll`` clone. Returns all matches in a list."""
selector = self._normalize_selector(selector)
return list(self._select_all(selector))
def query_selector_all(self, selector: SelectorGroupLike) -> List["Node"]:
"""Alias of :meth:`select_all`."""
return self.select_all(selector)
def matched_by(
self, selector: SelectorGroupLike, root: Optional["Node"] = None
) -> bool:
"""
Checks whether this node is matched by `selector`.
See :meth:`SelectorGroup.matches()`.
"""
selector = self._normalize_selector(selector)
return selector.matches(self, root=root)
@staticmethod
def _normalize_selector(selector: SelectorGroupLike) -> "SelectorGroup":
if isinstance(selector, str):
return SelectorGroup.from_str(selector)
if isinstance(selector, SelectorGroup):
return selector
if isinstance(selector, Selector):
return SelectorGroup([selector])
raise ValueError("not a selector or group of selectors: %s" % repr(selector))
def _select_all(self, selector: "SelectorGroup") -> Generator["Node", None, None]:
for descendant in self.descendants():
if selector.matches(descendant, root=self):
yield descendant
def child_nodes(self) -> List["Node"]:
return self.children
def first_child(self) -> Optional["Node"]:
if self.children:
return self.children[0]
else:
return None
def first_element_child(self) -> Optional["Node"]:
for child in self.children:
if isinstance(child, ElementNode):
return child
return None
def last_child(self) -> Optional["Node"]:
if self.children:
return self.children[-1]
else:
return None
def last_element_child(self) -> Optional["Node"]:
for child in reversed(self.children):
if isinstance(child, ElementNode):
return child
return None
def next_sibling(self) -> Optional["Node"]:
""".. note:: Not O(1), use with caution."""
next_siblings = self.next_siblings()
if next_siblings:
return next_siblings[0]
else:
return None
def next_siblings(self) -> List["Node"]:
parent = self.parent
if not parent:
return []
try:
index = parent.children.index(self)
return parent.children[index + 1 :]
except ValueError: # pragma: no cover
raise ValueError("node is not found in children of its parent")
def next_element_sibling(self) -> Optional["ElementNode"]:
""".. note:: Not O(1), use with caution."""
for sibling in self.next_siblings():
if isinstance(sibling, ElementNode):
return sibling
return None
def previous_sibling(self) -> Optional["Node"]:
""".. note:: Not O(1), use with caution."""
previous_siblings = self.previous_siblings()
if previous_siblings:
return previous_siblings[0]
else:
return None
def previous_siblings(self) -> List["Node"]:
"""
Compared to the natural DOM order, the order of returned nodes
are reversed. That is, the adjacent sibling (if any) is the
first in the returned list.
"""
parent = self.parent
if not parent:
return []
try:
index = parent.children.index(self)
if index > 0:
return parent.children[index - 1 :: -1]
else:
return []
except ValueError: # pragma: no cover
raise ValueError("node is not found in children of its parent")
def previous_element_sibling(self) -> Optional["ElementNode"]:
""".. note:: Not O(1), use with caution."""
for sibling in self.previous_siblings():
if isinstance(sibling, ElementNode):
return sibling
return None
def ancestors(
self, *, root: Optional["Node"] = None
) -> Generator["Node", None, None]:
"""
Ancestors are generated in reverse order of depth, stopping at
`root`.
A :class:`RuntimeException` is raised if `root` is not in the
ancestral chain.
"""
if self is root:
return
ancestor = self.parent
while ancestor is not root:
if ancestor is None:
raise RuntimeError("provided root node not found in ancestral chain")
yield ancestor
ancestor = ancestor.parent
if root:
yield root
def descendants(self) -> Generator["Node", None, None]:
"""Descendants are generated in depth-first order."""
for child in self.children:
yield child
yield from child.descendants()
def attr(self, attr: str) -> Optional[str]:
"""Returns the attribute if it exists on the node, otherwise ``None``."""
return self.attrs.get(attr)
@property
def html(self) -> str:
"""
HTML representation of the node.
(For a :class:`TextNode`, :meth:`html` returns the escaped version of the
text.
"""
return str(self)
def outer_html(self) -> str:
"""Alias of :attr:`html`."""
return self.html
def inner_html(self) -> str:
"""HTML representation of the node's children."""
return "".join(child.html for child in self.children)
@property
def text(self) -> str: # pragma: no cover
"""This property is expected to be implemented by subclasses."""
raise NotImplementedError
def text_content(self) -> str:
"""Alias of :attr:`text`."""
return self.text
@property
def classes(self) -> List[str]:
return self.attrs.get("class", "").split()
def class_list(self) -> List[str]:
return self.classes
class ElementNode(Node):
"""
Represents an element node.
Note that tag and attribute names are case-insensitive; attribute
values are case-sensitive.
"""
def __init__(
self,
tag: str,
attrs: Iterable[Tuple[str, Optional[str]]],
*,
parent: Optional["Node"] = None,
children: Optional[Sequence["Node"]] = None
) -> None:
Node.__init__(self)
self.tag = tag.lower() # type: str
self.attrs = OrderedDict((attr.lower(), val or "") for attr, val in attrs)
self.parent = parent
self.children = list(children or [])
def __repr__(self) -> str:
s = "<" + self.tag
if self.attrs:
s += " attrs=%s" % repr(list(self.attrs.items()))
if self.children:
s += " children=%s" % repr(self.children)
s += ">"
return s
# https://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html
def _repr_pretty_(self, p: Any, cycle: bool) -> None: # pragma: no cover
if cycle:
raise RuntimeError("cycle detected in DOM tree")
p.text("<\x1b[1m%s\x1b[0m" % self.tag)
if self.attrs:
p.text(" attrs=%s" % repr(list(self.attrs.items())))
if self.children:
p.text(" children=[")
if len(self.children) == 1 and isinstance(self.first_child(), TextNode):
p.text("\x1b[4m%s\x1b[0m" % repr(self.first_child()))
else:
with p.indent(2):
for child in self.children:
p.break_()
if hasattr(child, "_repr_pretty_"):
child._repr_pretty_(p, False) # type: ignore
else:
p.text("\x1b[4m%s\x1b[0m" % repr(child))
p.text(",")
p.break_()
p.text("]")
p.text(">")
def __str__(self) -> str:
"""HTML representation of the node."""
s = "<" + self.tag
for attr, val in self.attrs.items():
s += ' %s="%s"' % (attr, html.escape(val))
if self.children:
s += ">"
s += "".join(str(child) for child in self.children)
s += "</%s>" % self.tag
else:
if _tag_is_void(self.tag):
s += "/>"
else:
s += "></%s>" % self.tag
return s
@property
def text(self) -> str:
"""The concatenation of all descendant text nodes."""
return "".join(child.text for child in self.children)
class TextNode(str, Node):
"""
Represents a text node.
Subclasses :class:`Node` and :class:`str`.
"""
def __new__(cls, text: str) -> "TextNode":
s = str.__new__(cls, text) # type: ignore
s.parent = None
return s # type: ignore
def __init__(self, text: str) -> None:
Node.__init__(self)
def __repr__(self) -> str:
return "<%s>" % str.__repr__(self)
# HTML-escaped form of the text node. use text() for unescaped
# version.
def __str__(self) -> str:
return html.escape(self)
def __eq__(self, other: object) -> bool:
"""
Two text nodes are equal if and only if they are the same node.
For string comparison, use :attr:`text`.
"""
return self is other
def __ne__(self, other: object) -> bool:
"""
Two text nodes are non-equal if they are not the same node.
For string comparison, use :attr:`text`.
"""
return self is not other
@property
def text(self) -> str:
return str.__str__(self)
class DOMBuilderException(Exception):
"""
Exception raised when :class:`DOMBuilder` detects a bad state.
Attributes:
pos (:class:`Tuple`\\[:class:`int`, :class:`int`]):
Line number and offset in HTML input.
why (:class:`str`):
Reason of the exception.
"""
def __init__(self, pos: Tuple[int, int], why: str) -> None:
self.pos = pos
self.why = why
def __str__(self) -> str: # pragma: no cover
return "DOM builder aborted at %d:%d: %s" % (self.pos[0], self.pos[1], self.why)
class DOMBuilder(HTMLParser):
"""
HTML parser / DOM builder.
Subclasses :class:`html.parser.HTMLParser`.
Consume HTML and builds a :class:`Node` tree. Once finished, use
:attr:`root` to access the root of the tree.
This parser cannot parse malformed HTML with tag mismatch.
"""
def __init__(self) -> None:
super().__init__(convert_charrefs=True)
# _stack is the stack for nodes. Each node is pushed to the
# stack when its start tag is processed, and remains on the
# stack until its parent node is completed (end tag processed),
# at which point the node is attached to the parent node as a
# child and popped from the stack.
self._stack = [] # type: List[Node]
# _namespace_stack is another stack tracking the parsing
# context, which is generally the default namespace (None) but
# changes when parsing foreign objects (e.g. 'svg' when parsing
# an <svg>). The top element is always the current parsing
# context, so popping works differently from _stack: an element
# is popped as soon as the corresponding end tag is processed.
self._namespace_stack = [None] # type: List[Optional[str]]
def handle_starttag(
self, tag: str, attrs: Sequence[Tuple[str, Optional[str]]]
) -> None:
node = ElementNode(tag, attrs)
node._partial = True
self._stack.append(node)
namespace = (
tag.lower()
if _tag_encloses_foreign_namespace(tag)
else self._namespace_stack[-1] # Inherit parent namespace
)
node._namespace = namespace
self._namespace_stack.append(namespace)
# For void elements (not in a foreign context), immediately
# invoke the end tag handler (see handle_startendtag()).
if not namespace and _tag_is_void(tag):
self.handle_endtag(tag)
def handle_endtag(self, tag: str) -> None:
tag = tag.lower()
children = []
while self._stack and not self._stack[-1]._partial:
children.append(self._stack.pop())
if not self._stack:
raise DOMBuilderException(self.getpos(), "extra end tag: %s" % repr(tag))
parent = self._stack[-1]
if parent.tag != tag:
raise DOMBuilderException(
self.getpos(),
"expecting end tag %s, got %s" % (repr(parent.tag), repr(tag)),
)
parent.children = list(reversed(children))
parent._partial = False
for child in children:
child.parent = parent
self._namespace_stack.pop()
# Make parser behavior for explicitly and implicitly void elements
# (e.g., <hr> vs <hr/>) consistent. The former triggers
# handle_starttag only, whereas the latter triggers
# handle_startendtag (which by default triggers both handle_starttag
# and handle_endtag). See https://bugs.python.org/issue25258.
#
# An exception is foreign elements, which aren't considered void
# elements but can be explicitly marked as self-closing according to
# the HTML spec (e.g. <path/> is valid but <path> is not).
# Therefore, both handle_starttag and handle_endtag must be called,
# and handle_endtag should not be triggered from within
# handle_starttag in that case.
#
# Note that for simplicity we do not check whether the foreign
# element in question is allowed to be self-closing by spec. (The
# SVG spec unfortunately doesn't provide a readily available list of
# such elements.)
#
# https://html.spec.whatwg.org/multipage/syntax.html#foreign-elements
def handle_startendtag(
self, tag: str, attrs: Sequence[Tuple[str, Optional[str]]]
) -> None:
if self._namespace_stack[-1] or _tag_encloses_foreign_namespace(tag):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
else:
self.handle_starttag(tag, attrs)
def handle_data(self, text: str) -> None:
if not self._stack:
# Ignore text nodes before the first tag.
return
self._stack.append(TextNode(text))
@property
def root(self) -> "Node":
"""
Finishes processing and returns the root node.
Raises :class:`DOMBuilderException` if there is no root tag or
root tag is not closed yet.
"""
if not self._stack:
raise DOMBuilderException(self.getpos(), "no root tag")
if self._stack[0]._partial:
raise DOMBuilderException(self.getpos(), "root tag not closed yet")
return self._stack[0]
def parse_html(html: str, *, ParserClass: type = DOMBuilder) -> "Node":
"""
Parses HTML string, builds DOM, and returns root node.
The parser may raise :class:`DOMBuilderException`.
Args:
html: input HTML string
ParserClass: :class:`DOMBuilder` or a subclass
Returns:
Root note of the parsed tree. If the HTML string contains
multiple top-level elements, only the first is returned and the
rest are lost.
"""
builder = ParserClass() # type: DOMBuilder
builder.feed(html)
builder.close()
return builder.root
class SelectorParserException(Exception):
"""
Exception raised when the selector parser fails to parse an input.
Attributes:
s (:class:`str`):
The input string to be parsed.
cursor (:class:`int`):
Cursor position where the failure occurred.
why (:class:`str`):
Reason of the failure.
"""
def __init__(self, s: str, cursor: int, why: str) -> None:
self.s = s
self.cursor = cursor
self.why = why
def __str__(self) -> str: # pragma: no cover
return "selector parser aborted at character %d of %s: %s" % (
self.cursor,
repr(self.s),
self.why,
)
class SelectorGroup:
"""
Represents a group of CSS selectors.
A group of CSS selectors is simply a comma-separated list of
selectors. [#]_ See :class:`Selector` documentation for the scope of
support.
Typically, a :class:`SelectorGroup` is constructed from a string
(e.g., ``th.center, td.center``) using the factory function
:meth:`from_str`.
.. [#] https://www.w3.org/TR/selectors-3/#grouping
"""
def __init__(self, selectors: Iterable["Selector"]) -> None:
self._selectors = list(selectors)
def __repr__(self) -> str:
return "<SelectorGroup %s>" % repr(str(self))
def __str__(self) -> str:
return ", ".join(str(selector) for selector in self._selectors)
def __len__(self) -> int:
return len(self._selectors)
def __getitem__(self, index: int) -> "Selector":
return self._selectors[index]
def __iter__(self) -> Iterator["Selector"]:
return iter(self._selectors)
@classmethod
def from_str(cls, s: str) -> "SelectorGroup":
"""
Parses input string into a group of selectors.
:class:`SelectorParserException` is raised on invalid input. See
:class:`Selector` documentation for the scope of support.
Args:
s: input string
Returns:
Parsed group of selectors.
"""
i = 0
selectors = []
while i < len(s):
selector, i = Selector.from_str(s, i)
selectors.append(selector)
if not selectors:
raise SelectorParserException(s, i, "selector group is empty")
return cls(selectors)
def matches(self, node: "Node", root: Optional["Node"] = None) -> bool:
"""
Decides whether the group of selectors matches `node`.
The group of selectors matches `node` as long as one of the
selectors matches `node`.
If `root` is provided and child and/or descendant combinators
are involved, parent/ancestor lookup terminates at `root`.
"""
return any(selector.matches(node, root=root) for selector in self)
class Selector:
"""
Represents a CSS selector.
Recall that a CSS selector is a chain of one or more *sequences of
simple selectors* separated by *combinators*. [#selectors-3]_ This
concept is represented as a cons list of sequences of simple
selectors (in right to left order). This class in fact holds a
single sequence, with an optional combinator and reference to the
previous sequence.
For instance, ``main#main p.important.definition >
a.term[id][href]`` would be parsed into (schematically) the
following structure::
">" tag='a' classes=('term') attrs=([id], [href]) ~>
" " tag='p' classes=('important', 'definition') ~>
tag='main' id='main'
Each line is held in a separate instance of :class:`Selector`,
linked together by the :attr:`previous` attribute.
Supported grammar (from selectors level 3 [#selectors-3]_):
- Type selectors;
- Universal selectors;
- Class selectors;
- ID selectors;
- Attribute selectors;
- Combinators.
Unsupported grammar:
- Pseudo-classes;
- Pseudo-elements;
- Namespace prefixes (``ns|``, ``*|``, ``|``) in any part of any
selector.
Rationale:
- Pseudo-classes have too many variants, a few of which even
complete with an admittedly not-so-complex minilanguage. These add
up to a lot of code.
- Pseudo-elements are useless outside rendering contexts, hence out of
scope.
- Namespace support is too niche to be worth the parsing headache.
*Using namespace prefixes may confuse the parser!*
Note that the parser only loosely follows the spec and priotizes
ease of parsing (which includes readability and *writability* of
regexes), so some invalid selectors may be accepted (in fact, false
positives abound, but accepting valid inputs is a much more
important goal than rejecting invalid inputs for this library), and
some valid selectors may be rejected (but as long as you stick to
the scope outlined above and common sense you should be fine; the
false negatives shouldn't be used by actual human beings anyway).
In particular, whitespace character is simplified to ``\\s`` (ASCII
mode) despite CSS spec not counting U+000B (VT) as whitespace,
identifiers are simplified to ``[\\w-]+`` (ASCII mode), and strings
(attribute selector values can be either identifiers or strings)
allow escaped quotes (i.e., ``\\'`` inside single-quoted strings and
``\\"`` inside double-quoted strings) but everything else is
interpreted literally. The exact specs for CSS identifiers and
strings can be found at [#]_.
Certain selectors and combinators may be implemented in the parser
but not implemented in matching and/or selection APIs.
.. [#selectors-3] https://www.w3.org/TR/selectors-3/
.. [#] https://www.w3.org/TR/CSS21/syndata.html
Attributes:
tag (:class:`Optional`\\[:class:`str`]):
Type selector.
classes (:class:`List`\\[:class:`str`]):
Class selectors.
id (:class:`Optional`\\[:class:`str`]):
ID selector.
attrs (:class:`List`\\[:class:`AttributeSelector`]):
Attribute selectors.
combinator (:class:`Optional`\\[:class:`Combinator`]):
Combinator with the previous sequence of simple selectors in
chain.
previous (:class:`Optional`\\[:class:`Selector`]):
Reference to the previous sequence of simple selectors in
chain.
"""
def __init__(
self,
*,
tag: Optional[str] = None,
classes: Optional[Sequence[str]] = None,
id: Optional[str] = None,
attrs: Optional[Sequence["AttributeSelector"]] = None,
combinator: Optional["Combinator"] = None,
previous: Optional["Selector"] = None
) -> None:
self.tag = tag.lower() if tag else None
self.classes = list(classes or [])
self.id = id
self.attrs = list(attrs or [])
self.combinator = combinator
self.previous = previous
def __repr__(self) -> str:
return "<Selector %s>" % repr(str(self))
def __str__(self) -> str:
sequences = []
delimiters = []
seq = self
while True:
sequences.append(seq._sequence_str_())
if seq.previous:
if seq.combinator == Combinator.DESCENDANT:
delimiters.append(" ")
elif seq.combinator == Combinator.CHILD:
delimiters.append(" > ")
elif seq.combinator == Combinator.NEXT_SIBLING:
delimiters.append(" + ")
elif seq.combinator == Combinator.SUBSEQUENT_SIBLING:
delimiters.append(" ~ ")
else: # pragma: no cover
raise RuntimeError(
"unimplemented combinator: %s" % repr(self.combinator)
)
seq = seq.previous
else:
delimiters.append("")
break
return "".join(
delimiter + sequence
for delimiter, sequence in zip(reversed(delimiters), reversed(sequences))
)
# Format a single sequence of simple selectors, without combinator.
def _sequence_str_(self) -> str:
s = ""
if self.tag:
s += self.tag
if self.classes:
s += "".join(".%s" % class_ for class_ in self.classes)
if self.id:
s += "#%s" % self.id
if self.attrs:
s += "".join(str(attr) for attr in self.attrs)
return s if s else "*"
@classmethod
def from_str(cls, s: str, cursor: int = 0) -> Tuple["Selector", int]:
"""
Parses input string into selector.
This factory function only parses out one selector (up to a
comma or EOS), so partial consumption is allowed --- an optional
`cursor` is taken as input (0 by default) and the moved cursor
(either after the comma or at EOS) is returned as part of the
output.
:class:`SelectorParserException` is raised on invalid input. See
:class:`Selector` documentation for the scope of support.
If you need to completely consume a string representing
(potentially) a group of selectors, use
:meth:`SelectorGroup.from_str()`.
Args:
s: input string
cursor: initial cursor position on `s`
Returns:
A tuple containing the parsed selector and the moved the
cursor (either after a comma-delimiter, or at EOS).
"""
# Simple selectors.
TYPE_SEL = re.compile(r"[\w-]+", re.A)
UNIVERSAL_SEL = re.compile(r"\*")
ATTR_SEL = re.compile(
r"""\[
\s*(?P<attr>[\w-]+)\s*
(
(?P<op>[~|^$*]?=)\s*
(
(?P<val_identifier>[\w-]+)|
(?P<val_string>
(?P<quote>['"])
(?P<val_string_inner>.*?)
(?<!\\)(?P=quote)
)
)\s*
)?
\]""",
re.A | re.X,
)
CLASS_SEL = re.compile(r"\.([\w-]+)", re.A)
ID_SEL = re.compile(r"#([\w-]+)", re.A)
PSEUDO_CLASS_SEL = re.compile(r":[\w-]+(\([^)]+\))?", re.A)
PSEUDO_ELEM_SEL = re.compile(r"::[\w-]+", re.A)
# Combinators
DESCENDANT_COM = re.compile(r"\s+")
CHILD_COM = re.compile(r"\s*>\s*")
NEXT_SIB_COM = re.compile(r"\s*\+\s*")
SUB_SIB_COM = re.compile(r"\s*~\s*")
# Misc
WHITESPACE = re.compile(r"\s*")
END_OF_SELECTOR = re.compile(r"\s*($|,)")
tag = None
classes = []
id = None
attrs = []
combinator = None
selector = None
previous_combinator = None
i = cursor
# Skip leading whitespace
m = WHITESPACE.match(s, i)
if m:
i = m.end()
while i < len(s):
# Parse one simple selector.
#
# PEP 572 (assignment expressions; the one that burned Guido
# so much that he resigned as BDFL) would have been nice; it
# would have saved us from all the regex match
# reassignments, and worse still, the casts, since mypy
# complains about getting Optional[Match[str]] instead of
# Match[str].
if TYPE_SEL.match(s, i):
if tag:
raise SelectorParserException(s, i, "multiple type selectors found")
m = cast(Match[str], TYPE_SEL.match(s, i))
tag = m.group()
elif UNIVERSAL_SEL.match(s, i):
m = cast(Match[str], UNIVERSAL_SEL.match(s, i))
elif ATTR_SEL.match(s, i):
m = cast(Match[str], ATTR_SEL.match(s, i))
attr = m.group("attr")
op = m.group("op")
val_identifier = m.group("val_identifier")
quote = m.group("quote")
val_string_inner = m.group("val_string_inner")
if val_identifier is not None:
val = val_identifier
elif val_string_inner is not None:
val = val_string_inner.replace("\\" + quote, quote)
else:
val = None
if op is None:
type = AttributeSelectorType.BARE
elif op == "=":
type = AttributeSelectorType.EQUAL
elif op == "~=":
type = AttributeSelectorType.TILDE
elif op == "|=":
type = AttributeSelectorType.PIPE
elif op == "^=":
type = AttributeSelectorType.CARET
elif op == "$=":
type = AttributeSelectorType.DOLLAR
elif op == "*=":
type = AttributeSelectorType.ASTERISK
else: # pragma: no cover
raise SelectorParserException(
s,
i,
"unrecognized operator %s in attribute selector" % repr(op),
)
attrs.append(AttributeSelector(attr, val, type))
elif CLASS_SEL.match(s, i):
m = cast(Match[str], CLASS_SEL.match(s, i))
classes.append(m.group(1))
elif ID_SEL.match(s, i):
if id:
raise SelectorParserException(s, i, "multiple id selectors found")
m = cast(Match[str], ID_SEL.match(s, i))
id = m.group(1)
elif PSEUDO_CLASS_SEL.match(s, i):
raise SelectorParserException(s, i, "pseudo-classes not supported")
elif PSEUDO_ELEM_SEL.match(s, i):
raise SelectorParserException(s, i, "pseudo-elements not supported")
else:
raise SelectorParserException(
s, i, "expecting simple selector, found none"
)
i = m.end()
# Try to parse a combinator, or end the selector.
if CHILD_COM.match(s, i):
m = cast(Match[str], CHILD_COM.match(s, i))
combinator = Combinator.CHILD
elif NEXT_SIB_COM.match(s, i):
m = cast(Match[str], NEXT_SIB_COM.match(s, i))
combinator = Combinator.NEXT_SIBLING
elif SUB_SIB_COM.match(s, i):
m = cast(Match[str], SUB_SIB_COM.match(s, i))
combinator = Combinator.SUBSEQUENT_SIBLING
elif END_OF_SELECTOR.match(s, i):
m = cast(Match[str], END_OF_SELECTOR.match(s, i))
combinator = None
# Need to parse descendant combinator at the very end
# because it could be a prefix to all previous cases.
elif DESCENDANT_COM.match(s, i):
m = cast(Match[str], DESCENDANT_COM.match(s, i))
combinator = Combinator.DESCENDANT
else:
continue
i = m.end()
if combinator and i == len(s):
raise SelectorParserException(s, i, "unexpected end at combinator")
selector = cls(
tag=tag,
classes=classes,
id=id,
attrs=attrs,
combinator=previous_combinator,
previous=selector,
)
previous_combinator = combinator
# End of selector.
if combinator is None:
break
tag = None
classes = []
id = None
attrs = []
combinator = None
if not selector:
raise SelectorParserException(s, i, "selector is empty")
return selector, i
def matches(self, node: "Node", root: Optional["Node"] = None) -> bool:
"""
Decides whether the selector matches `node`.
Each sequence of simple selectors in the selector's chain must
be matched for a positive.
If `root` is provided and child and/or descendant combinators
are involved, parent/ancestor lookup terminates at `root`.
"""
if self.tag:
if not node.tag or node.tag != self.tag:
return False
if self.id:
if node.attrs.get("id") != self.id:
return False
if self.classes:
classes = node.classes
for class_ in self.classes:
if class_ not in classes:
return False
if self.attrs:
for attr_selector in self.attrs:
if not attr_selector.matches(node):
return False
if not self.previous:
return True
if self.combinator == Combinator.DESCENDANT:
return any(
self.previous.matches(ancestor, root=root)
for ancestor in node.ancestors()
)
elif self.combinator == Combinator.CHILD:
if node is root or node.parent is None:
return False
else:
return self.previous.matches(node.parent)
elif self.combinator == Combinator.NEXT_SIBLING:
sibling = node.previous_element_sibling()
if not sibling:
return False
else:
return self.previous.matches(sibling)
elif self.combinator == Combinator.SUBSEQUENT_SIBLING:
return any(
self.previous.matches(sibling, root=root)
for sibling in node.previous_siblings()
if isinstance(sibling, ElementNode)
)
else: # pragma: no cover
raise RuntimeError("unimplemented combinator: %s" % repr(self.combinator))
class AttributeSelector:
"""
Represents an attribute selector.
Attributes:
attr (:class:`str`)
val (:class:`Optional`\\[:class:`str`])
type (:class:`AttributeSelectorType`)
"""
def __init__(
self, attr: str, val: Optional[str], type: "AttributeSelectorType"
) -> None:
self.attr = attr.lower()
self.val = val
self.type = type
def __repr__(self) -> str:
return "<AttributeSelector %s>" % repr(str(self))
def __str__(self) -> str:
if self.type == AttributeSelectorType.BARE:
fmt = "[{attr}{val:.0}]"
elif self.type == AttributeSelectorType.EQUAL:
fmt = "[{attr}={val}]"
elif self.type == AttributeSelectorType.TILDE:
fmt = "[{attr}~={val}]"
elif self.type == AttributeSelectorType.PIPE:
fmt = "[{attr}|={val}]"
elif self.type == AttributeSelectorType.CARET:
fmt = "[{attr}^={val}]"
elif self.type == AttributeSelectorType.DOLLAR:
fmt = "[{attr}$={val}]"
elif self.type == AttributeSelectorType.ASTERISK:
fmt = "[{attr}*={val}]"
return fmt.format(attr=self.attr, val=repr(self.val))
def matches(self, node: "Node") -> bool:
val = node.attrs.get(self.attr)
if val is None:
return False
if self.type == AttributeSelectorType.BARE:
return True
elif self.type == AttributeSelectorType.EQUAL:
return val == self.val
elif self.type == AttributeSelectorType.TILDE:
return self.val in val.split()
elif self.type == AttributeSelectorType.PIPE:
return val == self.val or val.startswith("%s-" % self.val)
elif self.type == AttributeSelectorType.CARET:
return bool(self.val and val.startswith(self.val))
elif self.type == AttributeSelectorType.DOLLAR:
return bool(self.val and val.endswith(self.val))
elif self.type == AttributeSelectorType.ASTERISK:
return bool(self.val and self.val in val)
else: # pragma: no cover
raise RuntimeError("unimplemented attribute selector: %s" % repr(self.type))
# Enum: basis for poor man's algebraic data type.
class AttributeSelectorType(Enum):
"""
Attribute selector types.
Members correspond to the following forms of attribute selector:
- :attr:`BARE`: ``[attr]``;
- :attr:`EQUAL`: ``[attr=val]``;
- :attr:`TILDE`: ``[attr~=val]``;
- :attr:`PIPE`: ``[attr|=val]``;
- :attr:`CARET`: ``[attr^=val]``;
- :attr:`DOLLAR`: ``[attr$=val]``;
- :attr:`ASTERISK`: ``[attr*=val]``.
"""
# [attr]
BARE = 1
# [attr=val]
EQUAL = 2
# [attr~=val]
TILDE = 3
# [attr|=val]
PIPE = 4
# [attr^=val]
CARET = 5
# [attr$=val]
DOLLAR = 6
# [attr*=val]
ASTERISK = 7
class Combinator(Enum):
"""
Combinator types.
Members correspond to the following combinators:
- :attr:`DESCENDANT`: ``A B``;
- :attr:`CHILD`: ``A > B``;
- :attr:`NEXT_SIBLING`: ``A + B``;
- :attr:`SUBSEQUENT_SIBLING`: ``A ~ B``.
"""
# ' '
DESCENDANT = 1
# >
CHILD = 2
# +
NEXT_SIBLING = 3
# ~
SUBSEQUENT_SIBLING = 4
def _tag_is_void(tag: str) -> bool:
"""
Checks whether the tag corresponds to a void element.
https://www.w3.org/TR/html5/syntax.html#void-elements
https://html.spec.whatwg.org/multipage/syntax.html#void-elements
"""
return tag.lower() in (
"area",
"base",
"br",
"col",
"embed",
"hr",
"img",
"input",
"link",
"meta",
"param",
"source",
"track",
"wbr",
)
def _tag_encloses_foreign_namespace(tag: str) -> bool:
"""
Checks whether the tag encloses a foreign namespace (MathML or SVG).
https://html.spec.whatwg.org/multipage/syntax.html#foreign-elements
"""
return tag.lower() in ("math", "svg")
| 11,368 | 0 | 1,152 |
abcfacef7496538b1858c2631383079cdf3a039a | 3,654 | py | Python | projects/migrations/0011_auto_20180925_1227.py | CobwebOrg/cobweb-django | 14241326860620dbaa64f7eefc6d4b393f80d23c | [
"MIT"
] | 7 | 2017-09-14T18:52:58.000Z | 2020-05-18T21:01:20.000Z | projects/migrations/0011_auto_20180925_1227.py | CobwebOrg/cobweb-django | 14241326860620dbaa64f7eefc6d4b393f80d23c | [
"MIT"
] | 151 | 2017-09-14T18:46:02.000Z | 2022-02-10T09:18:44.000Z | projects/migrations/0011_auto_20180925_1227.py | CobwebOrg/cobweb-django | 14241326860620dbaa64f7eefc6d4b393f80d23c | [
"MIT"
] | 1 | 2017-10-29T19:37:29.000Z | 2017-10-29T19:37:29.000Z | # Generated by Django 2.1.1 on 2018-09-25 19:27
import itertools
from django.db import migrations, models
forward_map = {
'Daily': "daily",
'Weekly': "weekly",
'Monthly': "monthly",
}
reverse_map = {v: k for k, v in forward_map.items()}
| 37.285714 | 261 | 0.593596 | # Generated by Django 2.1.1 on 2018-09-25 19:27
import itertools
from django.db import migrations, models
forward_map = {
'Daily': "daily",
'Weekly': "weekly",
'Monthly': "monthly",
}
reverse_map = {v: k for k, v in forward_map.items()}
def migrate_data_forward(apps, schema_editor):
instances = itertools.chain(
apps.get_model('projects', 'Nomination').objects.all(),
apps.get_model('projects', 'Nomination').objects.all(),
)
for instance in instances:
if instance.crawl_frequency:
instance.crawl_frequency = forward_map.get(instance.crawl_frequency)
instance.save()
def migrate_data_backward(apps, schema_editor):
instances = itertools.chain(
apps.get_model('projects', 'Nomination').objects.all(),
apps.get_model('projects', 'Nomination').objects.all(),
)
for instance in instances:
if instance.crawl_frequency:
instance.crawl_frequency = reverse_map.get(instance.crawl_frequency)
instance.save()
class Migration(migrations.Migration):
dependencies = [
('projects', '0010_nomination_author'),
]
operations = [
migrations.AddField(
model_name='claim',
name='ignore_robots_txt',
field=models.BooleanField(default=False, verbose_name="ignore 'robots.txt'"),
),
migrations.AddField(
model_name='claim',
name='rights_considerations',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='nomination',
name='ignore_robots_txt',
field=models.BooleanField(default=False, verbose_name="ignore 'robots.txt'"),
),
migrations.AddField(
model_name='nomination',
name='rights_considerations',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='claim',
name='crawl_frequency',
field=models.CharField(blank=True, choices=[('one time', 'one time'), ('twice daily', 'twice daily'), ('daily', 'daily'), ('weekly', 'weekly'), ('monthly', 'monthly'), ('quarterly', 'quarterly'), ('annually', 'annually')], max_length=50, null=True),
),
migrations.AlterField(
model_name='claim',
name='follow_links',
field=models.IntegerField(blank=True, choices=[(1, 1), (2, 2)], null=True),
),
migrations.AlterField(
model_name='claim',
name='page_scope',
field=models.CharField(blank=True, choices=[('Page', 'Page'), ('Domain', 'Domain')], max_length=50, null=True),
),
migrations.AlterField(
model_name='nomination',
name='crawl_frequency',
field=models.CharField(blank=True, choices=[('one time', 'one time'), ('twice daily', 'twice daily'), ('daily', 'daily'), ('weekly', 'weekly'), ('monthly', 'monthly'), ('quarterly', 'quarterly'), ('annually', 'annually')], max_length=50, null=True),
),
migrations.AlterField(
model_name='nomination',
name='follow_links',
field=models.IntegerField(blank=True, choices=[(1, 1), (2, 2)], null=True),
),
migrations.AlterField(
model_name='nomination',
name='page_scope',
field=models.CharField(blank=True, choices=[('Page', 'Page'), ('Domain', 'Domain')], max_length=50, null=True),
),
migrations.RunPython(
migrate_data_forward,
migrate_data_backward,
),
]
| 739 | 2,592 | 69 |
4c2a2d97017ac28bdc292954f6578d55154df15b | 570 | py | Python | task_assignment/migrations/0003_auto_20201117_1929.py | resourceidea/resourceideaapi | 4cc7db98f981d8f2011c1995e23e8a8655e31f75 | [
"MIT"
] | 1 | 2020-05-30T22:27:59.000Z | 2020-05-30T22:27:59.000Z | task_assignment/migrations/0003_auto_20201117_1929.py | resourceidea/resourceideaapi | 4cc7db98f981d8f2011c1995e23e8a8655e31f75 | [
"MIT"
] | 15 | 2020-02-11T21:53:08.000Z | 2021-11-02T21:20:03.000Z | task_assignment/migrations/0003_auto_20201117_1929.py | resourceidea/resourceideaapi | 4cc7db98f981d8f2011c1995e23e8a8655e31f75 | [
"MIT"
] | 1 | 2020-08-27T10:57:47.000Z | 2020-08-27T10:57:47.000Z | # Generated by Django 3.1.3 on 2020-11-17 19:29
from django.db import migrations, models
| 30 | 194 | 0.596491 | # Generated by Django 3.1.3 on 2020-11-17 19:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task_assignment', '0002_taskassignment_engagement'),
]
operations = [
migrations.AlterField(
model_name='taskassignment',
name='status',
field=models.CharField(choices=[('NOT STARTED', 'NOT STARTED'), ('RUNNING', 'RUNNING'), ('IN REVIEW', 'IN REVIEW'), ('REVIEWED', 'REVIEWED'), ('CLOSED', 'CLOSED')], max_length=100),
),
]
| 0 | 450 | 25 |
1aefd89580fd01e530de03fa9ef48356f341c8fd | 2,111 | py | Python | app/token_test_very.py | jarryliu/queue-sim | b21c39ab1a73b57ca4b1e2045ace0878afde3ee5 | [
"MIT"
] | null | null | null | app/token_test_very.py | jarryliu/queue-sim | b21c39ab1a73b57ca4b1e2045ace0878afde3ee5 | [
"MIT"
] | null | null | null | app/token_test_very.py | jarryliu/queue-sim | b21c39ab1a73b57ca4b1e2045ace0878afde3ee5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from scheduler import Scheduler
from queue import Queue
from fixjob import FixJob
from tokenbucket import TokenBucket
from server import Server
import logging, sys
import numpy as np
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
### one token bucket
numJob = 1
testDuration = 500.0
bRate = 1000.0
bucketSize = 20.0
distrNameA = "bst1"
pRate = 900.0
fileName = "test.out"
if len(sys.argv) >= 2:
testDuration = float(sys.argv[1])
if len(sys.argv) >= 3:
pRate = float(sys.argv[2]) # arrive rate
if len(sys.argv) >= 4:
bRate = float(sys.argv[3]) # bucket limiting rate
if len(sys.argv) >= 5:
bucketSize = int(sys.argv[4]) # bucket size
if len(sys.argv) >= 6:
distrNameA = sys.argv[5]
if len(sys.argv) >= 7:
fileName = sys.argv[6]
#serviceDistr = ["wei", [1.0/mu]]
# 1000 per second
gRate = pRate / numJob
testInterval = []
#for j in xrange(60000):
testInterval += [0.05 for i in xrange(20*600)]
gen = []
queue = []
bucket = []
server = []
for i in xrange(numJob) :
gen.append(FixJob(i))
queue.append(Queue(i))
gen[-1].setOutput(queue[-1])
gen[-1].setIntDistr("bst", [50])
gen[-1].setIntList(testInterval)
#gen[-1].setSizeDistr("binorm", [1000.0])
gen[-1].setSizeDistr("cst", [1])
bucket.append(TokenBucket(i))
queue[-1].setOutput(bucket[-1])
#bucket[-1].setParameters(pRate/numJob, bucketSize/numJob)
bucket[-1].setParameters(bRate/numJob, bucketSize/numJob)
time = 0
while time < testDuration:
nextTimeList = []
itemList = []
for b in bucket:
nextTime, item = b.getNextTime()
nextTimeList.append(nextTime)
itemList.append(item)
index = [i for i in xrange(len(nextTimeList)) if nextTimeList[i] == min(nextTimeList)]
time = nextTimeList[index[0]]
logging.debug("Simulation time %f", time)
for i in index :
itemList[i].whoAmI()
itemList[i].runTime(time)
f = open(fileName, "a")
for q in queue:
#q.showStatistic(testDuration/2)
deq, enq = q.showQueueingTime(int(testDuration)*int(pRate)/2)
np.savetxt(f, (enq, deq))
f.close()
| 25.433735 | 90 | 0.657035 | #!/usr/bin/python
from scheduler import Scheduler
from queue import Queue
from fixjob import FixJob
from tokenbucket import TokenBucket
from server import Server
import logging, sys
import numpy as np
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
### one token bucket
numJob = 1
testDuration = 500.0
bRate = 1000.0
bucketSize = 20.0
distrNameA = "bst1"
pRate = 900.0
fileName = "test.out"
if len(sys.argv) >= 2:
testDuration = float(sys.argv[1])
if len(sys.argv) >= 3:
pRate = float(sys.argv[2]) # arrive rate
if len(sys.argv) >= 4:
bRate = float(sys.argv[3]) # bucket limiting rate
if len(sys.argv) >= 5:
bucketSize = int(sys.argv[4]) # bucket size
if len(sys.argv) >= 6:
distrNameA = sys.argv[5]
if len(sys.argv) >= 7:
fileName = sys.argv[6]
#serviceDistr = ["wei", [1.0/mu]]
# 1000 per second
gRate = pRate / numJob
testInterval = []
#for j in xrange(60000):
testInterval += [0.05 for i in xrange(20*600)]
gen = []
queue = []
bucket = []
server = []
for i in xrange(numJob) :
gen.append(FixJob(i))
queue.append(Queue(i))
gen[-1].setOutput(queue[-1])
gen[-1].setIntDistr("bst", [50])
gen[-1].setIntList(testInterval)
#gen[-1].setSizeDistr("binorm", [1000.0])
gen[-1].setSizeDistr("cst", [1])
bucket.append(TokenBucket(i))
queue[-1].setOutput(bucket[-1])
#bucket[-1].setParameters(pRate/numJob, bucketSize/numJob)
bucket[-1].setParameters(bRate/numJob, bucketSize/numJob)
time = 0
while time < testDuration:
nextTimeList = []
itemList = []
for b in bucket:
nextTime, item = b.getNextTime()
nextTimeList.append(nextTime)
itemList.append(item)
index = [i for i in xrange(len(nextTimeList)) if nextTimeList[i] == min(nextTimeList)]
time = nextTimeList[index[0]]
logging.debug("Simulation time %f", time)
for i in index :
itemList[i].whoAmI()
itemList[i].runTime(time)
f = open(fileName, "a")
for q in queue:
#q.showStatistic(testDuration/2)
deq, enq = q.showQueueingTime(int(testDuration)*int(pRate)/2)
np.savetxt(f, (enq, deq))
f.close()
| 0 | 0 | 0 |
a994e9a75a5f37fe1146d458441a725bac15aca9 | 4,991 | py | Python | scripts/hummingbird_gt_eval_baseline.py | GOMTAE/Hummingbird_PPO | e25646587b050d507febd20d9b11ef86c321d7e0 | [
"MIT"
] | null | null | null | scripts/hummingbird_gt_eval_baseline.py | GOMTAE/Hummingbird_PPO | e25646587b050d507febd20d9b11ef86c321d7e0 | [
"MIT"
] | null | null | null | scripts/hummingbird_gt_eval_baseline.py | GOMTAE/Hummingbird_PPO | e25646587b050d507febd20d9b11ef86c321d7e0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# ROS packages required
import rospy
import rospkg
# Dependencies required
import gym
import os
import numpy as np
import pandas as pd
import time
# from stable_baselines.common.policies import MlpPolicy, MlpLstmPolicy, MlpLnLstmPolicy
# from stable_baselines.common.vec_env import DummyVecEnv
# from stable_baselines import A2C, ACKTR, DDPG, PPO1, PPO2, SAC, TRPO, TD3, HER
# from stable_baselines.deepq.policies import MlpPolicy as mlp_dqn
# from stable_baselines.sac.policies import MlpPolicy as mlp_sac
# from stable_baselines.ddpg.policies import MlpPolicy as mlp_ddpg
# from stable_baselines.td3.policies import MlpPolicy as mlp_td3
# from stable_baselines.ddpg.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines3.common.utils import get_linear_fn
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines3 import DDPG, PPO, A2C, TD3, SAC
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import VecCheckNan
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.callbacks import CheckpointCallback
# For scheduler
from typing import Callable
# import our task environment
import hummingbird_hover_task_gt_env_ppo_baseline
# from openai_ros.task_envs.cartpole_stay_up import stay_up
# ROS ENV gets started automatically before the training
# from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment -- This has to be solved at the end
# change the directory
os.chdir('/home/ubuntu/catkin_ws/src/hummingbird_pkg/') #change directory
rospy.init_node('hummingbird_gt_eval_baseline', anonymous=True, log_level=rospy.FATAL)
# Create the Gym environment
environment_name = rospy.get_param('/hummingbird/task_and_robot_environment_name')
env = gym.make(environment_name)
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_0/" # Noisy / baseline gt
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_21/" # Noisy / baseline 1factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_29/" # Noisy / baseline 5factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_26/" # Noisy / baseline 10factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_28/" # Noisy / baseline 20factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_27/" # Noisy / baseline 100factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_0/" # GT / baseline / -0.5
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_1/" # GT / baseline / -1
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_3/" # GT / baseline / -2
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_20/" # Noisy / 3rotors
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_2/" # GT / 3rotors
###### versatile ######
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_5/" # 100 / 50
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_8/" # 350 / 100
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_10/" # 150 / 100
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_12/" # 250 / 100
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_13/" # 350 / 100
### noisy 3r ###
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_10/" # gt
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_20/" # f1
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_23/" # f5
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_24/" # f10
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_27/" # f10
log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_Enjoy/" # enjoy
# When calling model, make sure which env your drone is trained from
# model = PPO.load(log_dir + "PPO_hummingbird_hover")
model = PPO.load(log_dir + "PPO_hummingbird_hover_3rotor")
env = DummyVecEnv([lambda: Monitor(env)])
# env = VecNormalize.load(log_dir + "PPO_hummingbird_hover_vec_normalize.pkl", env)
env = VecNormalize.load(log_dir + "PPO_hummingbird_hover_vec_normalize_3rotor.pkl", env)
env.training = False
obs = env.reset()
for i in range(10000):
if i % 1000 == 0:
print(i)
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action) | 54.25 | 125 | 0.791425 | #!/usr/bin/env python
# ROS packages required
import rospy
import rospkg
# Dependencies required
import gym
import os
import numpy as np
import pandas as pd
import time
# from stable_baselines.common.policies import MlpPolicy, MlpLstmPolicy, MlpLnLstmPolicy
# from stable_baselines.common.vec_env import DummyVecEnv
# from stable_baselines import A2C, ACKTR, DDPG, PPO1, PPO2, SAC, TRPO, TD3, HER
# from stable_baselines.deepq.policies import MlpPolicy as mlp_dqn
# from stable_baselines.sac.policies import MlpPolicy as mlp_sac
# from stable_baselines.ddpg.policies import MlpPolicy as mlp_ddpg
# from stable_baselines.td3.policies import MlpPolicy as mlp_td3
# from stable_baselines.ddpg.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines3.common.utils import get_linear_fn
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines3 import DDPG, PPO, A2C, TD3, SAC
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import VecCheckNan
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.callbacks import CheckpointCallback
# For scheduler
from typing import Callable
# import our task environment
import hummingbird_hover_task_gt_env_ppo_baseline
# from openai_ros.task_envs.cartpole_stay_up import stay_up
# ROS ENV gets started automatically before the training
# from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment -- This has to be solved at the end
# change the directory
os.chdir('/home/ubuntu/catkin_ws/src/hummingbird_pkg/') #change directory
rospy.init_node('hummingbird_gt_eval_baseline', anonymous=True, log_level=rospy.FATAL)
# Create the Gym environment
environment_name = rospy.get_param('/hummingbird/task_and_robot_environment_name')
env = gym.make(environment_name)
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_0/" # Noisy / baseline gt
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_21/" # Noisy / baseline 1factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_29/" # Noisy / baseline 5factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_26/" # Noisy / baseline 10factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_28/" # Noisy / baseline 20factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_27/" # Noisy / baseline 100factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_0/" # GT / baseline / -0.5
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_1/" # GT / baseline / -1
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_3/" # GT / baseline / -2
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_20/" # Noisy / 3rotors
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_2/" # GT / 3rotors
###### versatile ######
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_5/" # 100 / 50
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_8/" # 350 / 100
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_10/" # 150 / 100
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_12/" # 250 / 100
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_13/" # 350 / 100
### noisy 3r ###
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_10/" # gt
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_20/" # f1
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_23/" # f5
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_24/" # f10
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_27/" # f10
log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_Enjoy/" # enjoy
# When calling model, make sure which env your drone is trained from
# model = PPO.load(log_dir + "PPO_hummingbird_hover")
model = PPO.load(log_dir + "PPO_hummingbird_hover_3rotor")
env = DummyVecEnv([lambda: Monitor(env)])
# env = VecNormalize.load(log_dir + "PPO_hummingbird_hover_vec_normalize.pkl", env)
env = VecNormalize.load(log_dir + "PPO_hummingbird_hover_vec_normalize_3rotor.pkl", env)
env.training = False
obs = env.reset()
for i in range(10000):
if i % 1000 == 0:
print(i)
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action) | 0 | 0 | 0 |
d93847f6ef3af83244a97a9210f3dd648304dbad | 188 | py | Python | allauth/socialaccount/providers/openstreetmap_provider/urls.py | Fuzzwah/django-allauth | 071cbef1388bb61a563d3e41197bd5b7c26664d2 | [
"MIT"
] | null | null | null | allauth/socialaccount/providers/openstreetmap_provider/urls.py | Fuzzwah/django-allauth | 071cbef1388bb61a563d3e41197bd5b7c26664d2 | [
"MIT"
] | null | null | null | allauth/socialaccount/providers/openstreetmap_provider/urls.py | Fuzzwah/django-allauth | 071cbef1388bb61a563d3e41197bd5b7c26664d2 | [
"MIT"
] | null | null | null | from allauth.socialaccount.providers.oauth_provider.urls import default_urlpatterns
from .provider import OpenStreetMapProvider
urlpatterns = default_urlpatterns(OpenStreetMapProvider)
| 26.857143 | 83 | 0.882979 | from allauth.socialaccount.providers.oauth_provider.urls import default_urlpatterns
from .provider import OpenStreetMapProvider
urlpatterns = default_urlpatterns(OpenStreetMapProvider)
| 0 | 0 | 0 |
18eb6d976ec53b622aad01ece05a68e4c90f0b6b | 1,309 | py | Python | tests/test_pdf_argus.py | ryuwd/zfit-physics | 595eb68add00ab367aed5885c48eb78d15dbda0e | [
"BSD-3-Clause"
] | null | null | null | tests/test_pdf_argus.py | ryuwd/zfit-physics | 595eb68add00ab367aed5885c48eb78d15dbda0e | [
"BSD-3-Clause"
] | null | null | null | tests/test_pdf_argus.py | ryuwd/zfit-physics | 595eb68add00ab367aed5885c48eb78d15dbda0e | [
"BSD-3-Clause"
] | null | null | null | """Example test for a pdf or function"""
import numpy as np
import pytest
import tensorflow as tf
import zfit
import zfit_physics as zphys
# Important, do the imports below
from zfit.core.testing import tester
# specify globals here. Do NOT add any TensorFlow but just pure python
param1_true = 0.3
param2_true = 1.2
# register the pdf here and provide sets of working parameter configurations
tester.register_pdf(pdf_class=zphys.pdf.Argus, params_factories=argus_params_factory)
| 31.166667 | 91 | 0.70741 | """Example test for a pdf or function"""
import numpy as np
import pytest
import tensorflow as tf
import zfit
import zfit_physics as zphys
# Important, do the imports below
from zfit.core.testing import tester
# specify globals here. Do NOT add any TensorFlow but just pure python
param1_true = 0.3
param2_true = 1.2
def test_standard():
# test special properties here
obs = zfit.Space('obs1', (-2, 6))
argus = zphys.pdf.Argus(m0=5., c=-3., p=0.5, obs=obs)
assert not any(np.isnan(argus.pdf(tf.linspace(0.1, 15., 100))))
lower = 0.
upper = 5.
argus_pdf = argus.pdf(tf.linspace(lower, upper, 1000001))
assert pytest.approx(zfit.run(tf.reduce_mean(argus_pdf) * (upper - lower)), 4e-2) == 1.
analytic_integral = zfit.run(argus.analytic_integrate(obs, norm_range=False))
numeric_integral = zfit.run(argus.numeric_integrate(obs, norm_range=False))
assert pytest.approx(analytic_integral, 4e-2) == numeric_integral
# register the pdf here and provide sets of working parameter configurations
def argus_params_factory():
m0 = zfit.Parameter('m0', 4.5)
c = zfit.Parameter('c', -2.3)
p = zfit.param.ConstantParameter('p', 0.5)
return {'m0':m0, 'c': c, 'p': p}
tester.register_pdf(pdf_class=zphys.pdf.Argus, params_factories=argus_params_factory)
| 775 | 0 | 46 |
ab32d8c50972507fc693927cba054c42c855b55a | 865 | py | Python | 2019/day15/day15p2.py | darkterbear/advent-of-code-2015 | 543d5a70c4b4c84081602cfa3d0ba05fe0693e54 | [
"MIT"
] | null | null | null | 2019/day15/day15p2.py | darkterbear/advent-of-code-2015 | 543d5a70c4b4c84081602cfa3d0ba05fe0693e54 | [
"MIT"
] | 2 | 2019-12-01T20:03:18.000Z | 2021-05-11T22:41:00.000Z | 2019/day15/day15p2.py | darkterbear/advent-of-code-2015 | 543d5a70c4b4c84081602cfa3d0ba05fe0693e54 | [
"MIT"
] | null | null | null | file = open('./map')
m = [[c for c in line[:-1]] for line in file]
start = findStart()
dist = 0
paths = {
start: []
}
queue = [start, None]
while queue:
loc = queue.pop(0)
if not loc:
if (len(queue) == 0 or not queue[0]):
break
dist += 1
queue.append(None)
continue
# print(loc, len(paths[loc]))
u = (loc[0] - 1, loc[1])
d = (loc[0] + 1, loc[1])
l = (loc[0], loc[1] - 1)
r = (loc[0], loc[1] + 1)
for target in [u, d, l, r]:
if m[target[0]][target[1]] != '#' and target not in paths:
paths[target] = paths[loc] + [loc]
queue.append(target)
# print(queue)
for loc in paths:
print(len(paths[loc]))
| 20.116279 | 66 | 0.473988 | file = open('./map')
m = [[c for c in line[:-1]] for line in file]
def findStart():
for y in range(len(m)):
for x in range(len(m[y])):
if m[y][x] == 'O':
return (y, x)
start = findStart()
dist = 0
paths = {
start: []
}
queue = [start, None]
while queue:
loc = queue.pop(0)
if not loc:
if (len(queue) == 0 or not queue[0]):
break
dist += 1
queue.append(None)
continue
# print(loc, len(paths[loc]))
u = (loc[0] - 1, loc[1])
d = (loc[0] + 1, loc[1])
l = (loc[0], loc[1] - 1)
r = (loc[0], loc[1] + 1)
for target in [u, d, l, r]:
if m[target[0]][target[1]] != '#' and target not in paths:
paths[target] = paths[loc] + [loc]
queue.append(target)
# print(queue)
for loc in paths:
print(len(paths[loc]))
| 119 | 0 | 23 |
1f08fa9ec314c1ab38545e061e2c627705f2f624 | 141 | py | Python | config.py | Jackqu/keypoints | f0cbc9616c595e1f2b5a36d726cdb4790cfb7df9 | [
"MIT"
] | 111 | 2018-01-18T08:35:53.000Z | 2022-02-28T19:05:25.000Z | config.py | Jackqu/keypoints | f0cbc9616c595e1f2b5a36d726cdb4790cfb7df9 | [
"MIT"
] | 7 | 2018-04-28T10:06:25.000Z | 2021-09-16T13:31:30.000Z | config.py | Jackqu/keypoints | f0cbc9616c595e1f2b5a36d726cdb4790cfb7df9 | [
"MIT"
] | 28 | 2018-03-28T05:29:26.000Z | 2022-03-29T20:36:47.000Z |
NUM_CLASSES = 14
IMG_HEIGHT = 353
IMG_WIDTH = 257
IMG_SMALL_HEIGHT = 120
IMG_SMALL_WIDTH = 96
RADIUS = 25
epochs = 10
batch_size = 64 | 11.75 | 22 | 0.723404 |
NUM_CLASSES = 14
IMG_HEIGHT = 353
IMG_WIDTH = 257
IMG_SMALL_HEIGHT = 120
IMG_SMALL_WIDTH = 96
RADIUS = 25
epochs = 10
batch_size = 64 | 0 | 0 | 0 |
bca9d203e6c0118420fb7bee7c4662f7c19c4a71 | 1,420 | py | Python | API/app.py | hoops92/MyDish-DS | e6a7c7214a3614febc78e0ba973439d976cad1c0 | [
"MIT"
] | null | null | null | API/app.py | hoops92/MyDish-DS | e6a7c7214a3614febc78e0ba973439d976cad1c0 | [
"MIT"
] | null | null | null | API/app.py | hoops92/MyDish-DS | e6a7c7214a3614febc78e0ba973439d976cad1c0 | [
"MIT"
] | null | null | null | import os
from flask import Flask, json, jsonify, request
from flask_cors import CORS
from flask_caching import Cache
from decouple import config
import logging
# setting up for local testing, wnat to be able to log the database
"""
We want to be able to test locally and log information for debugging
"""
# Local sqlite3 database
local_db_name = 'test.sqlite3'
def create_app(test_config=None):
"""
Creates app
"""
app = Flask()
app.config.from_mapping(
# Make sure to change debug to False in production env
DEBUG=config('DEBUG', default=False),
SECRET_KEY=config('SECRET_KEY', default='dev'), # CHANGE THIS!!!!
# For in-memory db: default='sqlite:///:memory:'),
DATABASE_URI=config('DATABASE_URI', 'sqlite:///' + \
os.path.join(os.getcwd(), local_db_name)),
LOGFILE=config('LOGFILE', os.path.join(
app.instance_path, 'logs/debug.log')),
CACHE_TYPE=config('CACHE_TYPE', 'simple'), # Configure caching
# Long cache times probably ok for ML api
CACHE_DEFAULT_TIMEOUT=config('CACHE_DEFAULT_TIMEOUT', 300),
TESTING=config('TESTING', default='TRUE')
)
# Enable CORS header support
CORS(app)
# Enable caching
cache = Cache(app)
# Blueprints: Connecting all the routes(Endpoints) we create.
app.register_blueprints(vision_routes)
return app
| 28.979592 | 74 | 0.661268 | import os
from flask import Flask, json, jsonify, request
from flask_cors import CORS
from flask_caching import Cache
from decouple import config
import logging
# setting up for local testing, wnat to be able to log the database
"""
We want to be able to test locally and log information for debugging
"""
# Local sqlite3 database
local_db_name = 'test.sqlite3'
def create_app(test_config=None):
"""
Creates app
"""
app = Flask()
app.config.from_mapping(
# Make sure to change debug to False in production env
DEBUG=config('DEBUG', default=False),
SECRET_KEY=config('SECRET_KEY', default='dev'), # CHANGE THIS!!!!
# For in-memory db: default='sqlite:///:memory:'),
DATABASE_URI=config('DATABASE_URI', 'sqlite:///' + \
os.path.join(os.getcwd(), local_db_name)),
LOGFILE=config('LOGFILE', os.path.join(
app.instance_path, 'logs/debug.log')),
CACHE_TYPE=config('CACHE_TYPE', 'simple'), # Configure caching
# Long cache times probably ok for ML api
CACHE_DEFAULT_TIMEOUT=config('CACHE_DEFAULT_TIMEOUT', 300),
TESTING=config('TESTING', default='TRUE')
)
# Enable CORS header support
CORS(app)
# Enable caching
cache = Cache(app)
# Blueprints: Connecting all the routes(Endpoints) we create.
app.register_blueprints(vision_routes)
return app
| 0 | 0 | 0 |
57b7cada042045057489fa6c29279ae9ba54f57f | 16,560 | py | Python | wokkel/test/test_xmppim.py | dustin/wokkel | 2dae171b77adb790fda54e5f66e4bfc40c3b28ac | [
"MIT"
] | 1 | 2015-11-04T10:33:54.000Z | 2015-11-04T10:33:54.000Z | wokkel/test/test_xmppim.py | dustin/wokkel | 2dae171b77adb790fda54e5f66e4bfc40c3b28ac | [
"MIT"
] | null | null | null | wokkel/test/test_xmppim.py | dustin/wokkel | 2dae171b77adb790fda54e5f66e4bfc40c3b28ac | [
"MIT"
] | null | null | null | # Copyright (c) 2003-2009 Ralph Meijer
# See LICENSE for details
"""
Tests for L{wokkel.xmppim}.
"""
from twisted.internet import defer
from twisted.trial import unittest
from twisted.words.protocols.jabber.jid import JID
from twisted.words.protocols.jabber.xmlstream import toResponse
from twisted.words.xish import domish, utility
from wokkel import xmppim
from wokkel.generic import ErrorStanza, parseXml
from wokkel.test.helpers import XmlStreamStub
NS_XML = 'http://www.w3.org/XML/1998/namespace'
NS_ROSTER = 'jabber:iq:roster'
class PresenceProtocolTest(unittest.TestCase):
"""
Tests for L{xmppim.PresenceProtocol}
"""
def test_errorReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type="error"/>"""
d = defer.Deferred()
self.protocol.errorReceived = errorReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_availableReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence/>"""
d = defer.Deferred()
self.protocol.availableReceived = availableReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_unavailableReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='unavailable'/>"""
d = defer.Deferred()
self.protocol.unavailableReceived = unavailableReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_subscribeReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='subscribe'/>"""
d = defer.Deferred()
self.protocol.subscribeReceived = subscribeReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_unsubscribeReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='unsubscribe'/>"""
d = defer.Deferred()
self.protocol.unsubscribeReceived = unsubscribeReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_subscribedReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='subscribed'/>"""
d = defer.Deferred()
self.protocol.subscribedReceived = subscribedReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_unsubscribedReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='unsubscribed'/>"""
d = defer.Deferred()
self.protocol.unsubscribedReceived = unsubscribedReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_probeReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='probe'/>"""
d = defer.Deferred()
self.protocol.probeReceived = probeReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_available(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.available(JID('user@example.com'),
show=u'chat',
status=u'Talk to me!',
priority=50)
element = self.output[-1]
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertIdentical(None, element.getAttribute('type'))
self.assertEquals(u'chat', unicode(element.show))
self.assertEquals(u'Talk to me!', unicode(element.status))
self.assertEquals(u'50', unicode(element.priority))
def test_availableLanguages(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.available(JID('user@example.com'),
show=u'chat',
statuses={None: u'Talk to me!',
'nl': u'Praat met me!'},
priority=50)
element = self.output[-1]
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertIdentical(None, element.getAttribute('type'))
self.assertEquals(u'chat', unicode(element.show))
statuses = {}
for status in element.elements():
if status.name == 'status':
lang = status.getAttribute((NS_XML, 'lang'))
statuses[lang] = unicode(status)
self.assertIn(None, statuses)
self.assertEquals(u'Talk to me!', statuses[None])
self.assertIn('nl', statuses)
self.assertEquals(u'Praat met me!', statuses['nl'])
self.assertEquals(u'50', unicode(element.priority))
def test_availableSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.available(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_unavailableDirected(self):
"""
Test sending of directed unavailable presence broadcast.
"""
self.protocol.unavailable(JID('user@example.com'))
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
def test_unavailableWithStatus(self):
"""
Test sending of directed unavailable presence broadcast with status.
"""
self.protocol.unavailable(JID('user@example.com'),
{None: 'Disconnected'})
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
self.assertEquals("Disconnected", unicode(element.status))
def test_unavailableBroadcast(self):
"""
Test sending of unavailable presence broadcast.
"""
self.protocol.unavailable(None)
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals(None, element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
def test_unavailableBroadcastNoRecipientParameter(self):
"""
Test sending of unavailable presence broadcast by not passing entity.
"""
self.protocol.unavailable()
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals(None, element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
def test_unavailableSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.unavailable(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_subscribeSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.subscribe(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_unsubscribeSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.unsubscribe(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_subscribedSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.subscribed(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_unsubscribedSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.unsubscribed(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_probeSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.probe(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
class RosterClientProtocolTest(unittest.TestCase):
"""
Tests for L{xmppim.RosterClientProtocol}.
"""
def test_removeItem(self):
"""
Removing a roster item is setting an item with subscription C{remove}.
"""
d = self.protocol.removeItem(JID('test@example.org'))
# Inspect outgoing iq request
iq = self.stub.output[-1]
self.assertEquals('set', iq.getAttribute('type'))
self.assertNotIdentical(None, iq.query)
self.assertEquals(NS_ROSTER, iq.query.uri)
children = list(domish.generateElementsQNamed(iq.query.children,
'item', NS_ROSTER))
self.assertEquals(1, len(children))
child = children[0]
self.assertEquals('test@example.org', child['jid'])
self.assertEquals('remove', child['subscription'])
# Fake successful response
response = toResponse(iq, 'result')
self.stub.send(response)
return d
| 33.865031 | 80 | 0.601027 | # Copyright (c) 2003-2009 Ralph Meijer
# See LICENSE for details
"""
Tests for L{wokkel.xmppim}.
"""
from twisted.internet import defer
from twisted.trial import unittest
from twisted.words.protocols.jabber.jid import JID
from twisted.words.protocols.jabber.xmlstream import toResponse
from twisted.words.xish import domish, utility
from wokkel import xmppim
from wokkel.generic import ErrorStanza, parseXml
from wokkel.test.helpers import XmlStreamStub
NS_XML = 'http://www.w3.org/XML/1998/namespace'
NS_ROSTER = 'jabber:iq:roster'
class PresenceClientProtocolTest(unittest.TestCase):
def setUp(self):
self.output = []
self.protocol = xmppim.PresenceClientProtocol()
self.protocol.parent = self
def send(self, obj):
self.output.append(obj)
def test_unavailableDirected(self):
"""
Test sending of directed unavailable presence broadcast.
"""
self.protocol.unavailable(JID('user@example.com'))
presence = self.output[-1]
self.assertEquals("presence", presence.name)
self.assertEquals(None, presence.uri)
self.assertEquals("user@example.com", presence.getAttribute('to'))
self.assertEquals("unavailable", presence.getAttribute('type'))
def test_unavailableWithStatus(self):
"""
Test sending of directed unavailable presence broadcast with status.
"""
self.protocol.unavailable(JID('user@example.com'),
{None: 'Disconnected'})
presence = self.output[-1]
self.assertEquals("presence", presence.name)
self.assertEquals(None, presence.uri)
self.assertEquals("user@example.com", presence.getAttribute('to'))
self.assertEquals("unavailable", presence.getAttribute('type'))
self.assertEquals("Disconnected", unicode(presence.status))
def test_unavailableBroadcast(self):
"""
Test sending of unavailable presence broadcast.
"""
self.protocol.unavailable(None)
presence = self.output[-1]
self.assertEquals("presence", presence.name)
self.assertEquals(None, presence.uri)
self.assertEquals(None, presence.getAttribute('to'))
self.assertEquals("unavailable", presence.getAttribute('type'))
def test_unavailableBroadcastNoEntityParameter(self):
"""
Test sending of unavailable presence broadcast by not passing entity.
"""
self.protocol.unavailable()
presence = self.output[-1]
self.assertEquals("presence", presence.name)
self.assertEquals(None, presence.uri)
self.assertEquals(None, presence.getAttribute('to'))
self.assertEquals("unavailable", presence.getAttribute('type'))
class AvailabilityPresenceTest(unittest.TestCase):
def test_fromElement(self):
xml = """<presence from='user@example.org' to='user@example.com'>
<show>chat</show>
<status>Let's chat!</status>
<priority>50</priority>
</presence>
"""
presence = xmppim.AvailabilityPresence.fromElement(parseXml(xml))
self.assertEquals(JID('user@example.org'), presence.sender)
self.assertEquals(JID('user@example.com'), presence.recipient)
self.assertTrue(presence.available)
self.assertEquals('chat', presence.show)
self.assertEquals({None: "Let's chat!"}, presence.statuses)
self.assertEquals(50, presence.priority)
class PresenceProtocolTest(unittest.TestCase):
"""
Tests for L{xmppim.PresenceProtocol}
"""
def setUp(self):
self.output = []
self.protocol = xmppim.PresenceProtocol()
self.protocol.parent = self
self.protocol.xmlstream = utility.EventDispatcher()
self.protocol.connectionInitialized()
def send(self, obj):
self.output.append(obj)
def test_errorReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type="error"/>"""
def errorReceived(error):
xmppim.PresenceProtocol.errorReceived(self.protocol, error)
try:
self.assertIsInstance(error, ErrorStanza)
except:
d.errback()
else:
d.callback(None)
d = defer.Deferred()
self.protocol.errorReceived = errorReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_availableReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence/>"""
def availableReceived(presence):
xmppim.PresenceProtocol.availableReceived(self.protocol, presence)
try:
self.assertIsInstance(presence, xmppim.AvailabilityPresence)
except:
d.errback()
else:
d.callback(None)
d = defer.Deferred()
self.protocol.availableReceived = availableReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_unavailableReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='unavailable'/>"""
def unavailableReceived(presence):
xmppim.PresenceProtocol.unavailableReceived(self.protocol, presence)
try:
self.assertIsInstance(presence, xmppim.AvailabilityPresence)
except:
d.errback()
else:
d.callback(None)
d = defer.Deferred()
self.protocol.unavailableReceived = unavailableReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_subscribeReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='subscribe'/>"""
def subscribeReceived(presence):
xmppim.PresenceProtocol.subscribeReceived(self.protocol, presence)
try:
self.assertIsInstance(presence, xmppim.SubscriptionPresence)
except:
d.errback()
else:
d.callback(None)
d = defer.Deferred()
self.protocol.subscribeReceived = subscribeReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_unsubscribeReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='unsubscribe'/>"""
def unsubscribeReceived(presence):
xmppim.PresenceProtocol.unsubscribeReceived(self.protocol, presence)
try:
self.assertIsInstance(presence, xmppim.SubscriptionPresence)
except:
d.errback()
else:
d.callback(None)
d = defer.Deferred()
self.protocol.unsubscribeReceived = unsubscribeReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_subscribedReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='subscribed'/>"""
def subscribedReceived(presence):
xmppim.PresenceProtocol.subscribedReceived(self.protocol, presence)
try:
self.assertIsInstance(presence, xmppim.SubscriptionPresence)
except:
d.errback()
else:
d.callback(None)
d = defer.Deferred()
self.protocol.subscribedReceived = subscribedReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_unsubscribedReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='unsubscribed'/>"""
def unsubscribedReceived(presence):
xmppim.PresenceProtocol.unsubscribedReceived(self.protocol,
presence)
try:
self.assertIsInstance(presence, xmppim.SubscriptionPresence)
except:
d.errback()
else:
d.callback(None)
d = defer.Deferred()
self.protocol.unsubscribedReceived = unsubscribedReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_probeReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='probe'/>"""
def probeReceived(presence):
xmppim.PresenceProtocol.probeReceived(self.protocol, presence)
try:
self.assertIsInstance(presence, xmppim.ProbePresence)
except:
d.errback()
else:
d.callback(None)
d = defer.Deferred()
self.protocol.probeReceived = probeReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_available(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.available(JID('user@example.com'),
show=u'chat',
status=u'Talk to me!',
priority=50)
element = self.output[-1]
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertIdentical(None, element.getAttribute('type'))
self.assertEquals(u'chat', unicode(element.show))
self.assertEquals(u'Talk to me!', unicode(element.status))
self.assertEquals(u'50', unicode(element.priority))
def test_availableLanguages(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.available(JID('user@example.com'),
show=u'chat',
statuses={None: u'Talk to me!',
'nl': u'Praat met me!'},
priority=50)
element = self.output[-1]
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertIdentical(None, element.getAttribute('type'))
self.assertEquals(u'chat', unicode(element.show))
statuses = {}
for status in element.elements():
if status.name == 'status':
lang = status.getAttribute((NS_XML, 'lang'))
statuses[lang] = unicode(status)
self.assertIn(None, statuses)
self.assertEquals(u'Talk to me!', statuses[None])
self.assertIn('nl', statuses)
self.assertEquals(u'Praat met me!', statuses['nl'])
self.assertEquals(u'50', unicode(element.priority))
def test_availableSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.available(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_unavailableDirected(self):
"""
Test sending of directed unavailable presence broadcast.
"""
self.protocol.unavailable(JID('user@example.com'))
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
def test_unavailableWithStatus(self):
"""
Test sending of directed unavailable presence broadcast with status.
"""
self.protocol.unavailable(JID('user@example.com'),
{None: 'Disconnected'})
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
self.assertEquals("Disconnected", unicode(element.status))
def test_unavailableBroadcast(self):
"""
Test sending of unavailable presence broadcast.
"""
self.protocol.unavailable(None)
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals(None, element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
def test_unavailableBroadcastNoRecipientParameter(self):
"""
Test sending of unavailable presence broadcast by not passing entity.
"""
self.protocol.unavailable()
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals(None, element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
def test_unavailableSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.unavailable(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_subscribeSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.subscribe(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_unsubscribeSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.unsubscribe(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_subscribedSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.subscribed(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_unsubscribedSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.unsubscribed(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_probeSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.probe(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
class RosterClientProtocolTest(unittest.TestCase):
"""
Tests for L{xmppim.RosterClientProtocol}.
"""
def setUp(self):
self.stub = XmlStreamStub()
self.protocol = xmppim.RosterClientProtocol()
self.protocol.xmlstream = self.stub.xmlstream
self.protocol.connectionInitialized()
def test_removeItem(self):
"""
Removing a roster item is setting an item with subscription C{remove}.
"""
d = self.protocol.removeItem(JID('test@example.org'))
# Inspect outgoing iq request
iq = self.stub.output[-1]
self.assertEquals('set', iq.getAttribute('type'))
self.assertNotIdentical(None, iq.query)
self.assertEquals(NS_ROSTER, iq.query.uri)
children = list(domish.generateElementsQNamed(iq.query.children,
'item', NS_ROSTER))
self.assertEquals(1, len(children))
child = children[0]
self.assertEquals('test@example.org', child['jid'])
self.assertEquals('remove', child['subscription'])
# Fake successful response
response = toResponse(iq, 'result')
self.stub.send(response)
return d
| 3,539 | 2,090 | 402 |
4d3b4899389408db330a5f97848edb14f5d87298 | 2,177 | py | Python | server/views/Mail/mail_config.py | gan-qi/auto-report-server | 788896a0b902244c6f2ddb9766d77f0836d15a31 | [
"MIT"
] | null | null | null | server/views/Mail/mail_config.py | gan-qi/auto-report-server | 788896a0b902244c6f2ddb9766d77f0836d15a31 | [
"MIT"
] | null | null | null | server/views/Mail/mail_config.py | gan-qi/auto-report-server | 788896a0b902244c6f2ddb9766d77f0836d15a31 | [
"MIT"
] | null | null | null | from server import api, db
from flask_restful import Resource
from flask import request, g
from server.models import MailConfig
class MailConfigClass(Resource):
"""邮箱设置,接受前端发来的邮箱信息放到数据库
"""
def get(self):
"""获取邮箱设置
"""
mail_config = MailConfig.query.filter_by(ownerId = g.userId).first()
if mail_config:
data = {
'fromName': mail_config.fromName,
'toName': mail_config.toName,
'fromEmail': mail_config.fromEmail,
'fromEmailKey': mail_config.fromEmailKey,
'toEmail': mail_config.toEmail
}
else:
data = {
'fromName': '',
'toName': '',
'fromEmail': '',
'fromEmailKey': '',
'toEmail': ''
}
return {
'code': 20000,
'data': data
}
def post(self):
"""
data = {
'fromName': '',
'toName': '',
'fromEmail': '',
'fromEmailKey': '',
'toEmail': ''
}
"""
data = request.get_json(force = True)
# 先判断该用户设置是否存在
check = MailConfig.query.filter_by(ownerId = g.userId).first()
if check:
check.fromName = data.get('fromName')
check.toName = data.get('toName')
check.fromEmail = data.get('fromEmail')
check.fromEmailKey = data.get('fromEmailKey')
check.toEmail = data.get('toEmail')
else:
config = MailConfig(
fromName = data.get('fromName'),
toName = data.get('toName'),
fromEmail = data.get('fromEmail'),
fromEmailKey = data.get('fromEmailKey'),
toEmail = data.get('toEmail'),
ownerId = g.userId
)
db.session.add(config)
db.session.commit()
return {
'code': 20000
}
api.add_resource(MailConfigClass, '/mailconfig')
| 28.272727 | 76 | 0.468075 | from server import api, db
from flask_restful import Resource
from flask import request, g
from server.models import MailConfig
class MailConfigClass(Resource):
"""邮箱设置,接受前端发来的邮箱信息放到数据库
"""
def option(self):
return {
'code': 20000
}
def get(self):
"""获取邮箱设置
"""
mail_config = MailConfig.query.filter_by(ownerId = g.userId).first()
if mail_config:
data = {
'fromName': mail_config.fromName,
'toName': mail_config.toName,
'fromEmail': mail_config.fromEmail,
'fromEmailKey': mail_config.fromEmailKey,
'toEmail': mail_config.toEmail
}
else:
data = {
'fromName': '',
'toName': '',
'fromEmail': '',
'fromEmailKey': '',
'toEmail': ''
}
return {
'code': 20000,
'data': data
}
def post(self):
"""
data = {
'fromName': '',
'toName': '',
'fromEmail': '',
'fromEmailKey': '',
'toEmail': ''
}
"""
data = request.get_json(force = True)
# 先判断该用户设置是否存在
check = MailConfig.query.filter_by(ownerId = g.userId).first()
if check:
check.fromName = data.get('fromName')
check.toName = data.get('toName')
check.fromEmail = data.get('fromEmail')
check.fromEmailKey = data.get('fromEmailKey')
check.toEmail = data.get('toEmail')
else:
config = MailConfig(
fromName = data.get('fromName'),
toName = data.get('toName'),
fromEmail = data.get('fromEmail'),
fromEmailKey = data.get('fromEmailKey'),
toEmail = data.get('toEmail'),
ownerId = g.userId
)
db.session.add(config)
db.session.commit()
return {
'code': 20000
}
api.add_resource(MailConfigClass, '/mailconfig')
| 49 | 0 | 27 |
d0f30f7247554a65f9b9ef7e72b9c2a4f47e7e48 | 272 | py | Python | chef.py | LukaIgnjatovic/freeCodeCamp.org---Learn-Python---Full-Course-for-Beginners | 6e4fc54203da15921bd6562c1fa7a915b45b4ad1 | [
"MIT"
] | 12 | 2018-11-05T10:52:19.000Z | 2022-03-08T07:12:44.000Z | chef.py | LukaIgnjatovic/freeCodeCamp.org---Learn-Python---Full-Course-for-Beginners | 6e4fc54203da15921bd6562c1fa7a915b45b4ad1 | [
"MIT"
] | null | null | null | chef.py | LukaIgnjatovic/freeCodeCamp.org---Learn-Python---Full-Course-for-Beginners | 6e4fc54203da15921bd6562c1fa7a915b45b4ad1 | [
"MIT"
] | 17 | 2019-01-13T08:09:45.000Z | 2022-03-14T22:51:44.000Z | # "Chef" class is defined with 3 functions.
| 22.666667 | 43 | 0.636029 | # "Chef" class is defined with 3 functions.
class Chef:
def make_chicken(self):
print("The chef makes a chicken.")
def make_salad(self):
print("The chef makes a salad.")
def make_special_dish(self):
print("The chef makes BBQ ribs.")
| 135 | -10 | 103 |
91d44195f45f45dbdd55a2d82d7522fefaba269d | 686 | py | Python | max_ai/nodes/cmd1.py | mat-heim/max_ros | e01e4f5b2db96d94865d80452d41b8dcf1412232 | [
"Apache-2.0"
] | null | null | null | max_ai/nodes/cmd1.py | mat-heim/max_ros | e01e4f5b2db96d94865d80452d41b8dcf1412232 | [
"Apache-2.0"
] | null | null | null | max_ai/nodes/cmd1.py | mat-heim/max_ros | e01e4f5b2db96d94865d80452d41b8dcf1412232 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from nlp import NlpClass
import sqlite3
import os
# import time
from datetime import datetime
dir = os.path.dirname(os.path.abspath(__file__))
RM = sqlite3.connect(dir + '/data/robbie_memory.db')
if __name__ == '__main__':
st = TerminalInput()
| 24.5 | 82 | 0.604956 | #!/usr/bin/env python
from nlp import NlpClass
import sqlite3
import os
# import time
from datetime import datetime
dir = os.path.dirname(os.path.abspath(__file__))
RM = sqlite3.connect(dir + '/data/robbie_memory.db')
class TerminalInput:
def __init__(self):
a = NlpClass()
debug = True
print "ENTERING TERMINAL INPUT"
while True:
s = raw_input("Enter speech: ")
a.nlpParse(s, debug)
dt = datetime.now()
#RM.execute("insert into RAW_INPUT (RAW, DATE) values (?, ?)",(s, dt))
#RM.commit()
# print sentenceAnalysisClass(s)
if __name__ == '__main__':
st = TerminalInput()
| 364 | -1 | 49 |
fb468cac07d94d44b17a43a3dfe4feaf910c3c49 | 2,495 | py | Python | examples/navierstokes/convergence/liddrivencavity2dRe100_30/scripts/order_convergence.py | barbagroup/petibm-examples | 794de3613967c14750c750aed386602c988cff05 | [
"BSD-3-Clause"
] | 2 | 2020-08-08T13:37:32.000Z | 2021-12-01T03:22:32.000Z | examples/navierstokes/convergence/liddrivencavity2dRe100_30/scripts/order_convergence.py | barbagroup/petibm-examples | 794de3613967c14750c750aed386602c988cff05 | [
"BSD-3-Clause"
] | null | null | null | examples/navierstokes/convergence/liddrivencavity2dRe100_30/scripts/order_convergence.py | barbagroup/petibm-examples | 794de3613967c14750c750aed386602c988cff05 | [
"BSD-3-Clause"
] | 2 | 2019-12-22T08:49:01.000Z | 2021-12-01T03:22:44.000Z | """Compute the observed order of convergence for the velocity and pressure."""
import numpy
import pathlib
import petibmpy
def observed_order_convergence(fields, grids, grid_ref, ratio):
"""Compute the observed order of convergence.
Parameters
----------
fields : tuple of numpy.ndarray objects
The field values on three consistently refined grids.
grids : tuple of tuple of numpy.ndarray objects
The gridline locations for three consistently refined grids.
grid_ref : tuple of numpy.ndarray objects
The reference gridlines used for interpolation.
ratio : float
The grid refinement ratio.
Returns
-------
float
The observed order of convergence.
"""
coarse = petibmpy.interpolate2d(fields[0], grids[0], grid_ref)
medium = petibmpy.interpolate2d(fields[1], grids[1], grid_ref)
fine = petibmpy.interpolate2d(fields[2], grids[2], grid_ref)
alpha = (numpy.log(numpy.linalg.norm(medium - coarse, ord=None) /
numpy.linalg.norm(fine - medium, ord=None)) /
numpy.log(ratio))
return alpha
# Set parameters.
rootdir = pathlib.Path(__file__).absolute().parents[1]
timestep = 500 # solution time-step index
field_names = ['p', 'u', 'v'] # name of the fields
ncells = [30, 90, 270, 810] # number of cells in each direction
ratio = 3 # refinement ratio between two consecutive grids
# Load the grid and field from files.
data = {}
for name in field_names:
subdata = {'grids': [], 'fields': []}
for n in ncells:
simudir = rootdir / str(n)
datadir = simudir / 'output'
grid = petibmpy.read_grid_hdf5(datadir / 'grid.h5', name)
filepath = datadir / f'{timestep:0>7}.h5'
field = petibmpy.read_field_hdf5(filepath, name)
subdata['grids'].append(grid)
subdata['fields'].append(field)
data[name] = subdata
# Compute the observed orders of convergence.
alphas = {}
for name in field_names:
grids, fields = data[name]['grids'], data[name]['fields']
# Compute order of convergence using the three coarsest grids.
# Fields are interpolated on the first grid.
alpha1 = observed_order_convergence(fields[:3], grids[:3], grids[0], ratio)
# Compute order of convergence using the three finest grids.
# Fields are interpolated on the first grid.
alpha2 = observed_order_convergence(fields[1:], grids[1:], grids[0], ratio)
alphas[name] = (alpha1, alpha2)
print(alphas)
| 34.178082 | 79 | 0.670541 | """Compute the observed order of convergence for the velocity and pressure."""
import numpy
import pathlib
import petibmpy
def observed_order_convergence(fields, grids, grid_ref, ratio):
"""Compute the observed order of convergence.
Parameters
----------
fields : tuple of numpy.ndarray objects
The field values on three consistently refined grids.
grids : tuple of tuple of numpy.ndarray objects
The gridline locations for three consistently refined grids.
grid_ref : tuple of numpy.ndarray objects
The reference gridlines used for interpolation.
ratio : float
The grid refinement ratio.
Returns
-------
float
The observed order of convergence.
"""
coarse = petibmpy.interpolate2d(fields[0], grids[0], grid_ref)
medium = petibmpy.interpolate2d(fields[1], grids[1], grid_ref)
fine = petibmpy.interpolate2d(fields[2], grids[2], grid_ref)
alpha = (numpy.log(numpy.linalg.norm(medium - coarse, ord=None) /
numpy.linalg.norm(fine - medium, ord=None)) /
numpy.log(ratio))
return alpha
# Set parameters.
rootdir = pathlib.Path(__file__).absolute().parents[1]
timestep = 500 # solution time-step index
field_names = ['p', 'u', 'v'] # name of the fields
ncells = [30, 90, 270, 810] # number of cells in each direction
ratio = 3 # refinement ratio between two consecutive grids
# Load the grid and field from files.
data = {}
for name in field_names:
subdata = {'grids': [], 'fields': []}
for n in ncells:
simudir = rootdir / str(n)
datadir = simudir / 'output'
grid = petibmpy.read_grid_hdf5(datadir / 'grid.h5', name)
filepath = datadir / f'{timestep:0>7}.h5'
field = petibmpy.read_field_hdf5(filepath, name)
subdata['grids'].append(grid)
subdata['fields'].append(field)
data[name] = subdata
# Compute the observed orders of convergence.
alphas = {}
for name in field_names:
grids, fields = data[name]['grids'], data[name]['fields']
# Compute order of convergence using the three coarsest grids.
# Fields are interpolated on the first grid.
alpha1 = observed_order_convergence(fields[:3], grids[:3], grids[0], ratio)
# Compute order of convergence using the three finest grids.
# Fields are interpolated on the first grid.
alpha2 = observed_order_convergence(fields[1:], grids[1:], grids[0], ratio)
alphas[name] = (alpha1, alpha2)
print(alphas)
| 0 | 0 | 0 |
0d0bbaa0b10a0cda93dd365c9f9a310823d37682 | 3,840 | py | Python | virtual/lib/python3.7/site-packages/l/project.py | candycrushpro/pitch-project | 8f850153fa3debcd858c5a58e7296d7cbda3918b | [
"MIT"
] | null | null | null | virtual/lib/python3.7/site-packages/l/project.py | candycrushpro/pitch-project | 8f850153fa3debcd858c5a58e7296d7cbda3918b | [
"MIT"
] | null | null | null | virtual/lib/python3.7/site-packages/l/project.py | candycrushpro/pitch-project | 8f850153fa3debcd858c5a58e7296d7cbda3918b | [
"MIT"
] | null | null | null | import errno
import os
import subprocess
from bp import abstract, generic
from bp.filepath import FilePath
from characteristic import Attribute, attributes
from zope.interface import implementer
# TODO: Really betterpath should have a separate interface for like,
# file systems, or listable things.
@implementer(abstract.IFilePath)
@attributes(
[
Attribute(name="_git_dir", default_value=None, exclude_from_repr=True),
Attribute(name="_path", exclude_from_repr=True),
Attribute(name="path", exclude_from_init=True),
],
)
@implementer(abstract.IFilePath)
@attributes(
[
Attribute(name="_hg_dir", default_value=None, exclude_from_repr=True),
Attribute(name="_path", exclude_from_repr=True),
Attribute(name="path", exclude_from_init=True),
],
)
for attribute in [
"basename",
"changed",
"createDirectory",
"exists",
"getAccessTime",
"getContent",
"getModificationTime",
"getStatusChangeTime",
"getsize",
"isdir",
"isfile",
"islink",
"open",
"path",
"realpath",
"remove",
"sep", # Apparently not in IFilePath
"setContent",
"sibling",
]:
proxy = _proxy_for_attribute(name=attribute)
setattr(GitPath, attribute, proxy)
setattr(HgPath, attribute, proxy)
| 26.666667 | 79 | 0.621094 | import errno
import os
import subprocess
from bp import abstract, generic
from bp.filepath import FilePath
from characteristic import Attribute, attributes
from zope.interface import implementer
def from_path(path):
git_dir = path.child(".git")
if git_dir.isdir():
return GitPath(git_dir=git_dir, path=path)
try:
git = subprocess.Popen(
["git", "rev-parse", "--is-inside-work-tree"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path.path,
)
except OSError as error:
# The cwd didn't exist
if error.errno != errno.ENOENT:
raise
else:
stdout, _ = git.communicate()
if stdout == "true\n":
return GitPath(path=path)
hg_dir = path.child(".hg")
if hg_dir.isdir():
return HgPath(hg_dir=hg_dir, path=path) # XXX
return path
# TODO: Really betterpath should have a separate interface for like,
# file systems, or listable things.
@implementer(abstract.IFilePath)
@attributes(
[
Attribute(name="_git_dir", default_value=None, exclude_from_repr=True),
Attribute(name="_path", exclude_from_repr=True),
Attribute(name="path", exclude_from_init=True),
],
)
class GitPath(object):
children = generic.genericChildren
segmentsFrom = generic.genericSegmentsFrom
walk = generic.genericWalk
def clonePath(self, path):
return GitPath(git_dir=self._git_dir, path=path)
def child(self, name):
child = self._path.child(name)
return GitPath(git_dir=self._git_dir, path=child)
def parent(self):
if self._path == self._git_dir:
return self
return self.clonePath(path=self._path.parent())
def listdir(self):
argv = ["git"]
if self._git_dir is not None:
argv.extend(["--git-dir", self._git_dir.path])
path = self.path + "/" if self.isdir() else ""
argv.extend(["ls-tree", "--name-only", "HEAD", path])
listdir = subprocess.check_output(argv).splitlines()
return [outputted.rpartition("/")[2] for outputted in listdir]
@implementer(abstract.IFilePath)
@attributes(
[
Attribute(name="_hg_dir", default_value=None, exclude_from_repr=True),
Attribute(name="_path", exclude_from_repr=True),
Attribute(name="path", exclude_from_init=True),
],
)
class HgPath(object):
children = generic.genericChildren
segmentsFrom = generic.genericSegmentsFrom
walk = generic.genericWalk
def __init__(self):
if self._hg_dir is None:
self._hg_dir = self._path
def clonePath(self, path):
return HgPath(hg_dir=self._hg_dir, path=path)
def child(self, name):
return self.clonePath(path=self._path.child(name))
def parent(self):
if self._path == self._hg_dir:
return self
return self.clonePath(path=self._path.parent())
def listdir(self):
paths = subprocess.check_output(
[
"hg", "--repository", self.path,
"files", "--include", "*", "--exclude", "*/*",
],
)
return (os.path.basename(path) for path in paths.splitlines())
def _proxy_for_attribute(name):
return property(lambda self: getattr(self._path, name))
for attribute in [
"basename",
"changed",
"createDirectory",
"exists",
"getAccessTime",
"getContent",
"getModificationTime",
"getStatusChangeTime",
"getsize",
"isdir",
"isfile",
"islink",
"open",
"path",
"realpath",
"remove",
"sep", # Apparently not in IFilePath
"setContent",
"sibling",
]:
proxy = _proxy_for_attribute(name=attribute)
setattr(GitPath, attribute, proxy)
setattr(HgPath, attribute, proxy)
| 1,954 | 480 | 90 |
d3bd3cf751b1a7d5708bf0b721922a9d0c969f7a | 1,210 | py | Python | tests/test_main.py | EmitKiwi/home-assistant | 0999e2ddc476f4bddf710005168b082f03a7cdc0 | [
"Apache-2.0"
] | 13 | 2017-02-01T13:25:34.000Z | 2022-01-26T01:30:39.000Z | tests/test_main.py | EmitKiwi/home-assistant | 0999e2ddc476f4bddf710005168b082f03a7cdc0 | [
"Apache-2.0"
] | 9 | 2017-07-26T18:05:32.000Z | 2021-12-05T14:16:34.000Z | tests/test_main.py | EmitKiwi/home-assistant | 0999e2ddc476f4bddf710005168b082f03a7cdc0 | [
"Apache-2.0"
] | 21 | 2017-07-26T17:09:40.000Z | 2022-03-27T22:37:22.000Z | """Test methods in __main__."""
from unittest.mock import patch, PropertyMock
from homeassistant import __main__ as main
@patch('sys.exit')
def test_validate_python(mock_exit):
"""Test validate Python version method."""
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(2, 7, 8))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 2, 0))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 4, 1))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 4, 2))):
main.validate_python()
assert mock_exit.called is False
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 5, 1))):
main.validate_python()
assert mock_exit.called is False
| 28.809524 | 66 | 0.657025 | """Test methods in __main__."""
from unittest.mock import patch, PropertyMock
from homeassistant import __main__ as main
@patch('sys.exit')
def test_validate_python(mock_exit):
"""Test validate Python version method."""
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(2, 7, 8))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 2, 0))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 4, 1))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 4, 2))):
main.validate_python()
assert mock_exit.called is False
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 5, 1))):
main.validate_python()
assert mock_exit.called is False
| 0 | 0 | 0 |
ed174d67719326e48b3e7fcaef1218c9469caa7b | 2,420 | py | Python | src/pytest_print/__init__.py | pytest-dev/pytest-print | 0866aa32927fcace66480913d9d740d45c92fdac | [
"MIT"
] | 40 | 2018-08-23T08:44:30.000Z | 2022-02-03T12:09:13.000Z | src/pytest_print/__init__.py | pytest-dev/pytest-print | 0866aa32927fcace66480913d9d740d45c92fdac | [
"MIT"
] | 6 | 2018-08-23T09:26:36.000Z | 2021-08-03T10:10:32.000Z | src/pytest_print/__init__.py | pytest-dev/pytest-print | 0866aa32927fcace66480913d9d740d45c92fdac | [
"MIT"
] | 5 | 2018-11-14T20:30:17.000Z | 2021-08-03T05:56:31.000Z | from datetime import datetime
from typing import Callable, Optional
import pytest
from _pytest.config.argparsing import Parser
from _pytest.fixtures import SubRequest
from _pytest.terminal import TerminalReporter
from .version import __version__
@pytest.fixture(name="printer")
def printer(request: SubRequest) -> Callable[[str], None]:
"""pytest plugin to print test progress steps in verbose mode"""
return create_printer(request)
@pytest.fixture(scope="session", name="printer_session")
def no_op(msg: str) -> None: # noqa: U100
"""Do nothing"""
__all__ = [
"__version__",
]
| 31.428571 | 110 | 0.683471 | from datetime import datetime
from typing import Callable, Optional
import pytest
from _pytest.config.argparsing import Parser
from _pytest.fixtures import SubRequest
from _pytest.terminal import TerminalReporter
from .version import __version__
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
group.addoption(
"--print-relative-time",
action="store_true",
dest="pytest_print_relative_time",
default=False,
help="Time in milliseconds when the print was invoked, relative to the time the fixture was created.",
)
group.addoption(
"--print",
action="store_true",
dest="pytest_print_on",
default=False,
help="By default the plugins if verbosity is greater than zero (-v flag), this forces on",
)
@pytest.fixture(name="printer")
def printer(request: SubRequest) -> Callable[[str], None]:
"""pytest plugin to print test progress steps in verbose mode"""
return create_printer(request)
@pytest.fixture(scope="session", name="printer_session")
def printer_session(request: SubRequest) -> Callable[[str], None]:
return create_printer(request)
def create_printer(request: SubRequest) -> Callable[[str], None]:
if request.config.getoption("pytest_print_on") or request.config.getoption("verbose") > 0:
terminal_reporter = request.config.pluginmanager.getplugin("terminalreporter")
if terminal_reporter is not None: # pragma: no branch
state = State(request.config.getoption("pytest_print_relative_time"), terminal_reporter)
return state.printer
return no_op
def no_op(msg: str) -> None: # noqa: U100
"""Do nothing"""
class State:
def __init__(self, print_relative: bool, reporter: TerminalReporter) -> None:
self._reporter = reporter
self._start = datetime.now() if print_relative else None
self._print_relative = print_relative
@property
def elapsed(self) -> Optional[float]:
if self._start is None:
return None # pragma: no cover
return (datetime.now() - self._start).total_seconds()
def printer(self, msg: str) -> None:
msg = "\t{}{}".format(f"{self.elapsed}\t" if self._print_relative else "", msg)
self._reporter.write_line(msg)
__slots__ = ("_start", "_print_relative", "_reporter")
__all__ = [
"__version__",
]
| 1,576 | 145 | 91 |
55f92793a22bd3a2267c7fffd32a635f6f8059f0 | 547 | py | Python | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine-dwh/db/__init__.py | sandrobonazzola/ovirt-dwh | f1c930df2274085eff324d20ddc526860a5d6f75 | [
"Apache-2.0"
] | 5 | 2016-01-23T06:05:16.000Z | 2022-02-05T13:58:47.000Z | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine-dwh/db/__init__.py | sandrobonazzola/ovirt-dwh | f1c930df2274085eff324d20ddc526860a5d6f75 | [
"Apache-2.0"
] | 17 | 2022-01-03T16:56:32.000Z | 2022-03-30T12:56:04.000Z | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine-dwh/db/__init__.py | sandrobonazzola/ovirt-dwh | f1c930df2274085eff324d20ddc526860a5d6f75 | [
"Apache-2.0"
] | 22 | 2015-01-04T06:28:08.000Z | 2022-02-14T07:42:25.000Z | #
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
from otopi import util
from . import connection
from . import engine_connection
from . import dbmsupgrade
from . import schema
from . import vacuum
@util.export
# vim: expandtab tabstop=4 shiftwidth=4
| 18.233333 | 45 | 0.758684 | #
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
from otopi import util
from . import connection
from . import engine_connection
from . import dbmsupgrade
from . import schema
from . import vacuum
@util.export
def createPlugins(context):
connection.Plugin(context=context)
engine_connection.Plugin(context=context)
dbmsupgrade.Plugin(context=context)
schema.Plugin(context=context)
vacuum.Plugin(context=context)
# vim: expandtab tabstop=4 shiftwidth=4
| 201 | 0 | 22 |
5600876fcd2306f2741007deb9e93dbb2d82b505 | 516 | py | Python | manage.py | michael-muga/MyBlog | d4821d16279ce063aef29d3cc39bdf9643f9a893 | [
"MIT"
] | null | null | null | manage.py | michael-muga/MyBlog | d4821d16279ce063aef29d3cc39bdf9643f9a893 | [
"MIT"
] | null | null | null | manage.py | michael-muga/MyBlog | d4821d16279ce063aef29d3cc39bdf9643f9a893 | [
"MIT"
] | null | null | null | from app import create_app,db
from app.models import User
from flask_script import Manager,Server
from flask_migrate import Migrate,MigrateCommand
from app.models import User, Blog, Comment, Quote
#creating app instance
app= create_app('production')
manager= Manager(app)
manager.add_command('server',Server)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.shell
if __name__ == '__main__':
manager.run() | 24.571429 | 49 | 0.76938 | from app import create_app,db
from app.models import User
from flask_script import Manager,Server
from flask_migrate import Migrate,MigrateCommand
from app.models import User, Blog, Comment, Quote
#creating app instance
app= create_app('production')
manager= Manager(app)
manager.add_command('server',Server)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.shell
def make_shell_context():
return dict(app = app, db = db, User=User)
if __name__ == '__main__':
manager.run() | 51 | 0 | 22 |
8943582321b54c449e92f79f4ebf0696a907b98d | 9,766 | py | Python | lib/model/interface_disc.py | Flyhigh2017/cs230_project | 93b9fa4452224b02dd336c69beab27d27a9e1579 | [
"MIT"
] | null | null | null | lib/model/interface_disc.py | Flyhigh2017/cs230_project | 93b9fa4452224b02dd336c69beab27d27a9e1579 | [
"MIT"
] | null | null | null | lib/model/interface_disc.py | Flyhigh2017/cs230_project | 93b9fa4452224b02dd336c69beab27d27a9e1579 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import model.vertical_disc as vc
import cv2
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
import tensorflow as tf
from utils.timer import Timer
from utils.cython_nms import nms, nms_new
from utils.boxes_grid import get_boxes_grid
from utils.blob import im_list_to_blob
from nets.vgg16 import vgg16
from model.config import cfg
from model.bbox_transform import clip_boxes, bbox_transform_inv
CLASSES = ('__background__', 'bone', 's')
net = vgg16(batch_size=1)
model = "/Users/anekisei/Documents/tf-faster-rcnn2/output/default/vertical/default/vgg16_faster_rcnn_iter_1500.ckpt"
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
net.create_architecture(sess, "TEST", len(CLASSES), tag='',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
saver = tf.train.Saver()
saver.restore(sess, model)
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
store = []
vertical_points = []
'''
shape[0] -- h -- y, shape[1] -- w -- x
'''
for i in range(dets.shape[0]):
scores = dets[i, -1]
if scores < thresh:
continue
x1 = int(dets[i,0]) # DOUBLE-CHECK THE DIMENSIONS
y1 = int(dets[i,1])
x2 = int(dets[i,2])
y2 = int(dets[i,3])
area = (y2 - y1)*(x2 - x1)
center_x = int((x1 + x2) / 2.0)
center_y = int((y1 + y2) / 2.0)
vertical_points.append((center_x, center_y))
return vertical_points
def test_vertical(img, thresh=0.05, sess=sess):
"""Test a Fast R-CNN network on an image database."""
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
# timers
im = contrast(img)
_t = {'im_detect' : Timer(), 'misc' : Timer()}
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
vertical_points = []
for j, cls in enumerate(CLASSES[1:]):
j += 1
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
vertical_points += vis_detections(im, cls, cls_dets, thresh=0.7)
# sort verticals
vertical_points = sorted(vertical_points, key=lambda vertical_points: vertical_points[1], reverse=False)#[:7]
#res_image, flag, disease_bone = vc.spine_contour(im, vertical_points)
vertical_points = filter_outliers(vertical_points)
res_image = vc.spine_contour(img, vertical_points)
res_image = Draw_bone(res_image,vertical_points)
res2 = vc.spine_contour(im, vertical_points)#contrast img
res2 = Draw_bone(res2,vertical_points)
_t['misc'].toc()
return res_image, res2#, flag, disease_bone
'''
if cv2.waitKey(1) & 0xff == 27:
break
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
#for cls_ind, cls in enumerate(CLASSES[1:]):
#vis_detections(im, class_name, dets, thresh=0.5)
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
'''
| 34.631206 | 132 | 0.64571 | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import model.vertical_disc as vc
import cv2
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
import tensorflow as tf
from utils.timer import Timer
from utils.cython_nms import nms, nms_new
from utils.boxes_grid import get_boxes_grid
from utils.blob import im_list_to_blob
from nets.vgg16 import vgg16
from model.config import cfg
from model.bbox_transform import clip_boxes, bbox_transform_inv
CLASSES = ('__background__', 'bone', 's')
net = vgg16(batch_size=1)
model = "/Users/anekisei/Documents/tf-faster-rcnn2/output/default/vertical/default/vgg16_faster_rcnn_iter_1500.ckpt"
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
net.create_architecture(sess, "TEST", len(CLASSES), tag='',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
saver = tf.train.Saver()
saver.restore(sess, model)
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def im_detect(sess, net, im):
blobs, im_scales = _get_blobs(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs['data']
# seems to have height, width, and image scales
# still not sure about the scale, maybe full image it is 1.
blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
_, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])
boxes = rois[:, 1:5] / im_scales[0]
# print(scores.shape, bbox_pred.shape, rois.shape, boxes.shape)
scores = np.reshape(scores, [scores.shape[0], -1])
bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
return scores, pred_boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def distance(point1,point2):
return np.sqrt((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
store = []
vertical_points = []
'''
shape[0] -- h -- y, shape[1] -- w -- x
'''
for i in range(dets.shape[0]):
scores = dets[i, -1]
if scores < thresh:
continue
x1 = int(dets[i,0]) # DOUBLE-CHECK THE DIMENSIONS
y1 = int(dets[i,1])
x2 = int(dets[i,2])
y2 = int(dets[i,3])
area = (y2 - y1)*(x2 - x1)
center_x = int((x1 + x2) / 2.0)
center_y = int((y1 + y2) / 2.0)
vertical_points.append((center_x, center_y))
return vertical_points
def contrast(img):
clahe = cv2.createCLAHE(clipLimit=3., tileGridSize=(4,4))
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # convert from BGR to LAB color space
l, a, b = cv2.split(lab) # split on 3 different channels
l2 = clahe.apply(l) # apply CLAHE to the L-channel
lab = cv2.merge((l2,a,b)) # merge channels
img2 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # convert from LAB to BGR
return img2
def filter_outliers(vertical_points): #filter out outliers
points = vertical_points[::-1] #down to up
i = 0
j = i + 1
delete = []
while i < len(points)-1 and j < len(points):
lower = points[i]
upper = points[j]
if np.abs(lower[0]-upper[0]) >= 50:
delete.append(upper)
j += 1
continue
i = j
j += 1
for item in delete:
vertical_points.remove(item)
return vertical_points
def Draw_bone(img, vertical_points):
vertical_points = vertical_points[::-1] #down to up
bones = ['S','L5','L4','L3','L2','L1','T12','T11']
length = min(len(bones), len(vertical_points))
for i in range(length):
classification = bones[i]
center_x,center_y = vertical_points[i]
cv2.circle(img, (center_x, center_y), 2, (0, 255, 255), -1)
cv2.putText(img, classification , (center_x - 10, center_y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1, cv2.CV_AA)
return img
def test_vertical(img, thresh=0.05, sess=sess):
"""Test a Fast R-CNN network on an image database."""
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
# timers
im = contrast(img)
_t = {'im_detect' : Timer(), 'misc' : Timer()}
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
vertical_points = []
for j, cls in enumerate(CLASSES[1:]):
j += 1
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
vertical_points += vis_detections(im, cls, cls_dets, thresh=0.7)
# sort verticals
vertical_points = sorted(vertical_points, key=lambda vertical_points: vertical_points[1], reverse=False)#[:7]
#res_image, flag, disease_bone = vc.spine_contour(im, vertical_points)
vertical_points = filter_outliers(vertical_points)
res_image = vc.spine_contour(img, vertical_points)
res_image = Draw_bone(res_image,vertical_points)
res2 = vc.spine_contour(im, vertical_points)#contrast img
res2 = Draw_bone(res2,vertical_points)
_t['misc'].toc()
return res_image, res2#, flag, disease_bone
'''
if cv2.waitKey(1) & 0xff == 27:
break
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
#for cls_ind, cls in enumerate(CLASSES[1:]):
#vis_detections(im, class_name, dets, thresh=0.5)
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
'''
| 2,464 | 0 | 114 |
15a8d9d82682eaada8124b0a8aa712cde5d41ac3 | 8,908 | py | Python | Sketchbots/sw/labqueue/support/imaging.py | rlugojr/ChromeWebLab | 60f964b3f283c15704b7a04b7bb50cb15791e2e4 | [
"Apache-2.0"
] | 306 | 2015-01-09T14:03:44.000Z | 2017-09-16T13:03:35.000Z | Sketchbots/sw/labqueue/support/imaging.py | rlugojr/ChromeWebLab | 60f964b3f283c15704b7a04b7bb50cb15791e2e4 | [
"Apache-2.0"
] | 90 | 2019-03-26T05:36:00.000Z | 2021-07-28T05:30:16.000Z | Sketchbots/sw/labqueue/support/imaging.py | rlugojr/ChromeWebLab | 60f964b3f283c15704b7a04b7bb50cb15791e2e4 | [
"Apache-2.0"
] | 119 | 2015-01-26T15:04:33.000Z | 2017-09-13T09:30:53.000Z | # Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenient base class for imaging
"""
from google.appengine.api import images
import logging
def generate_thumbnail(image_data, min_source_height, max_source_height, min_source_width, max_source_width, content_type, width, height, overlay_path, valign, top_crop_pct=None, bottom_crop_pct=None, left_crop_pct=None, right_crop_pct=None, crop_x=None, crop_y=None, post_crop_uniform_scale_pct=None):
""" Generate a thumbnail and return the image data as a
binary string. If unable to create the
thumbnail, will return None.
:min_source_height:
If specified, a thumbnail will only be generated if the incoming image
is at least this high.
:min_source_width:
If specified, a thumbnail will only be generated if the incoming image
is at least this wide.
:max_source_height:
If specified, a thumbnail will only be generated if the incoming image
is less than this many pixels high.
:max_source_width:
If specified, a thumbnail will only be generated if the incoming image
is less than this many pixels wide.
:image_data:
Image data, as a bytestring
:content_type:
The MIME content type of the image.
:width:
Width of the thumbnail
:height:
Height of the thumbnail
:overlay_path:
Full path to an image file to overlay on top of the image data, or None
to not use an overlay.
:valign:
A string, one of:
"top"
"bottom"
"middle"
describing how the image should be aligned along the
Y-axis when cropping.
:top_crop_pct:
:bottom_crop_pct:
Optional. Floats indicating how much from the top and bottom of the
original image to crop in before rescaling. Numbers between 0 and 1.0 incl.
:crop_x:
:crop_y:
Optional. If specified with width and height, will simply cut out a rectangle
of the incoming image which is width x height and has its upper-left corner
pegged to (crop_x, cropy_y).
NOTE: For crop_x and crop_y to work, the following other options must be None:
valign, top_crop_pct, bottom_crop_pct
:post_crop_uniform_scale_pct:
If not None, will scale image after cropping by the indicated percent. Should be None or a
float between 0.0 and 1.0
"""
# figure out the width/height of the image from the datastore
# img = images.Image(image_data=image_data)
# img.crop(left_x=0.25, top_y=0.25, right_x=0.25, bottom_y=0.25)
# img.resize(width=width, height=height)
# logging.info('(b) w=%i, h=%i' % (img.width, img.height))
# output = img.execute_transforms(output_encoding=img.format)
image = images.Image(image_data)
if min_source_height is not None and image.height < min_source_height:
return None
if max_source_height is not None and image.height > max_source_height:
return None
if min_source_width is not None and image.width < min_source_width:
return None
if max_source_width is not None and image.width > max_source_width:
return None
if content_type == 'image/png':
output_encoding = images.PNG
else:
output_encoding = images.JPEG
if crop_x is not None and crop_y is not None and valign is None and top_crop_pct is None and bottom_crop_pct is None and (image.width >= crop_x + width) and (image.height >= crop_y + height):
fw = float(image.width)
fh = float(image.height)
try:
output = images.crop(image_data, float(crop_x) / fw, float(crop_y) / fh, float(crop_x + width) / fw, float(crop_y + height) / fh, output_encoding=output_encoding)
except:
output = image_data
else:
if width > image.width and height > image.height:
output = image_data
# # this would result in scaling the image UP, that's no good
# if image.width > image.height:
# width = image.width
# else:
# height = image.height
#
# output = images.resize(image_data, width, height, output_encoding)
else:
output = rescale(image, width, height, halign='middle', valign=valign, top_crop_pct=top_crop_pct, bottom_crop_pct=bottom_crop_pct, left_crop_pct=left_crop_pct, right_crop_pct=right_crop_pct)
if post_crop_uniform_scale_pct is not None:
output = images.resize(output, width=int(width * post_crop_uniform_scale_pct), output_encoding=output_encoding)
if overlay_path is not None:
# read the overlay into memory
overlay_data = open(overlay_path,'r').read()
# composite the overlay onto the rescaled output
if content_type == 'image/png':
output_encoding = images.PNG
else:
output_encoding = images.JPEG
output = images.composite(
inputs=[
(output,0,0,1.0,images.CENTER_CENTER),
(overlay_data,0,0,1.0,images.CENTER_CENTER),
],
width=width,
height=height,
output_encoding=output_encoding
)
return output
# # Get a "file name" for a blobstore entity
# blob_file_name = files.blobstore.create(mime_type=content_type)
# # Open the blob and write the contents of the trasnfered file to it
# with files.open(blob_file_name, 'a') as target:
# target.write(output)
# # all done, let blobstore finish up
# files.finalize(blob_file_name)
#
# # retain a reference to the key
# k = files.blobstore.get_blob_key(blob_file_name)
# # logging.info(k)
# return k
def rescale(image, width, height, halign='middle', valign='middle', top_crop_pct=None, bottom_crop_pct=None, left_crop_pct=None, right_crop_pct=None, post_crop_uniform_scale_pct=None):
"""
From http://stackoverflow.com/questions/1944112/app-engine-cropping-to-a-specific-width-and-height
Resize then optionally crop a given image.
Attributes:
image: The image
width: The desired width
height: The desired height
halign: Acts like photoshop's 'Canvas Size' function, horizontally
aligning the crop to left, middle or right
valign: Verticallly aligns the crop to top, middle or bottom
:post_crop_uniform_scale_pct:
If not None, will scale image after cropping by the indicated percent. Should be None or a
float between 0.0 and 1.0
"""
#image = images.Image(img_data)
#logging.info(left_crop_pct)
if top_crop_pct is not None and bottom_crop_pct is not None:
if left_crop_pct is not None and right_crop_pct is not None:
image.crop(left_crop_pct,top_crop_pct,right_crop_pct,bottom_crop_pct)
else:
image.crop(0.0,top_crop_pct,1.0,bottom_crop_pct)
elif left_crop_pct is not None and right_crop_pct is not None:
image.crop(left_crop_pct,0.0,right_crop_pct,1.0)
desired_wh_ratio = float(width) / float(height)
wh_ratio = float(image.width) / float(image.height)
if desired_wh_ratio > wh_ratio:
# resize to width, then crop to height
image.resize(width=width)
output = image.execute_transforms()
if image.width < width or image.height < height:
return output
trim_y = (float(image.height - height) / 2) / image.height
if valign == 'top':
image.crop(0.0, 0.0, 1.0, 1 - (2 * trim_y))
elif valign == 'bottom':
# logging.info('----------')
# logging.info(image.height)
# logging.info(image.width)
# logging.info(height)
# logging.info(trim_y)
image.crop(0.0, (2 * trim_y), 1.0, 1.0)
else:
image.crop(0.0, trim_y, 1.0, 1 - trim_y)
else:
# resize to height, then crop to width
image.resize(height=height)
output = image.execute_transforms()
if image.width < width or image.height < height:
return output
trim_x = (float(image.width - width) / 2) / image.width
if halign == 'left':
image.crop(0.0, 0.0, 1 - (2 * trim_x), 1.0)
elif halign == 'right':
image.crop((2 * trim_x), 0.0, 1.0, 1.0)
else:
image.crop(trim_x, 0.0, 1 - trim_x, 1.0)
return image.execute_transforms()
| 38.396552 | 302 | 0.661652 | # Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenient base class for imaging
"""
from google.appengine.api import images
import logging
def generate_thumbnail(image_data, min_source_height, max_source_height, min_source_width, max_source_width, content_type, width, height, overlay_path, valign, top_crop_pct=None, bottom_crop_pct=None, left_crop_pct=None, right_crop_pct=None, crop_x=None, crop_y=None, post_crop_uniform_scale_pct=None):
""" Generate a thumbnail and return the image data as a
binary string. If unable to create the
thumbnail, will return None.
:min_source_height:
If specified, a thumbnail will only be generated if the incoming image
is at least this high.
:min_source_width:
If specified, a thumbnail will only be generated if the incoming image
is at least this wide.
:max_source_height:
If specified, a thumbnail will only be generated if the incoming image
is less than this many pixels high.
:max_source_width:
If specified, a thumbnail will only be generated if the incoming image
is less than this many pixels wide.
:image_data:
Image data, as a bytestring
:content_type:
The MIME content type of the image.
:width:
Width of the thumbnail
:height:
Height of the thumbnail
:overlay_path:
Full path to an image file to overlay on top of the image data, or None
to not use an overlay.
:valign:
A string, one of:
"top"
"bottom"
"middle"
describing how the image should be aligned along the
Y-axis when cropping.
:top_crop_pct:
:bottom_crop_pct:
Optional. Floats indicating how much from the top and bottom of the
original image to crop in before rescaling. Numbers between 0 and 1.0 incl.
:crop_x:
:crop_y:
Optional. If specified with width and height, will simply cut out a rectangle
of the incoming image which is width x height and has its upper-left corner
pegged to (crop_x, cropy_y).
NOTE: For crop_x and crop_y to work, the following other options must be None:
valign, top_crop_pct, bottom_crop_pct
:post_crop_uniform_scale_pct:
If not None, will scale image after cropping by the indicated percent. Should be None or a
float between 0.0 and 1.0
"""
# figure out the width/height of the image from the datastore
# img = images.Image(image_data=image_data)
# img.crop(left_x=0.25, top_y=0.25, right_x=0.25, bottom_y=0.25)
# img.resize(width=width, height=height)
# logging.info('(b) w=%i, h=%i' % (img.width, img.height))
# output = img.execute_transforms(output_encoding=img.format)
image = images.Image(image_data)
if min_source_height is not None and image.height < min_source_height:
return None
if max_source_height is not None and image.height > max_source_height:
return None
if min_source_width is not None and image.width < min_source_width:
return None
if max_source_width is not None and image.width > max_source_width:
return None
if content_type == 'image/png':
output_encoding = images.PNG
else:
output_encoding = images.JPEG
if crop_x is not None and crop_y is not None and valign is None and top_crop_pct is None and bottom_crop_pct is None and (image.width >= crop_x + width) and (image.height >= crop_y + height):
fw = float(image.width)
fh = float(image.height)
try:
output = images.crop(image_data, float(crop_x) / fw, float(crop_y) / fh, float(crop_x + width) / fw, float(crop_y + height) / fh, output_encoding=output_encoding)
except:
output = image_data
else:
if width > image.width and height > image.height:
output = image_data
# # this would result in scaling the image UP, that's no good
# if image.width > image.height:
# width = image.width
# else:
# height = image.height
#
# output = images.resize(image_data, width, height, output_encoding)
else:
output = rescale(image, width, height, halign='middle', valign=valign, top_crop_pct=top_crop_pct, bottom_crop_pct=bottom_crop_pct, left_crop_pct=left_crop_pct, right_crop_pct=right_crop_pct)
if post_crop_uniform_scale_pct is not None:
output = images.resize(output, width=int(width * post_crop_uniform_scale_pct), output_encoding=output_encoding)
if overlay_path is not None:
# read the overlay into memory
overlay_data = open(overlay_path,'r').read()
# composite the overlay onto the rescaled output
if content_type == 'image/png':
output_encoding = images.PNG
else:
output_encoding = images.JPEG
output = images.composite(
inputs=[
(output,0,0,1.0,images.CENTER_CENTER),
(overlay_data,0,0,1.0,images.CENTER_CENTER),
],
width=width,
height=height,
output_encoding=output_encoding
)
return output
# # Get a "file name" for a blobstore entity
# blob_file_name = files.blobstore.create(mime_type=content_type)
# # Open the blob and write the contents of the trasnfered file to it
# with files.open(blob_file_name, 'a') as target:
# target.write(output)
# # all done, let blobstore finish up
# files.finalize(blob_file_name)
#
# # retain a reference to the key
# k = files.blobstore.get_blob_key(blob_file_name)
# # logging.info(k)
# return k
def rescale(image, width, height, halign='middle', valign='middle', top_crop_pct=None, bottom_crop_pct=None, left_crop_pct=None, right_crop_pct=None, post_crop_uniform_scale_pct=None):
"""
From http://stackoverflow.com/questions/1944112/app-engine-cropping-to-a-specific-width-and-height
Resize then optionally crop a given image.
Attributes:
image: The image
width: The desired width
height: The desired height
halign: Acts like photoshop's 'Canvas Size' function, horizontally
aligning the crop to left, middle or right
valign: Verticallly aligns the crop to top, middle or bottom
:post_crop_uniform_scale_pct:
If not None, will scale image after cropping by the indicated percent. Should be None or a
float between 0.0 and 1.0
"""
#image = images.Image(img_data)
#logging.info(left_crop_pct)
if top_crop_pct is not None and bottom_crop_pct is not None:
if left_crop_pct is not None and right_crop_pct is not None:
image.crop(left_crop_pct,top_crop_pct,right_crop_pct,bottom_crop_pct)
else:
image.crop(0.0,top_crop_pct,1.0,bottom_crop_pct)
elif left_crop_pct is not None and right_crop_pct is not None:
image.crop(left_crop_pct,0.0,right_crop_pct,1.0)
desired_wh_ratio = float(width) / float(height)
wh_ratio = float(image.width) / float(image.height)
if desired_wh_ratio > wh_ratio:
# resize to width, then crop to height
image.resize(width=width)
output = image.execute_transforms()
if image.width < width or image.height < height:
return output
trim_y = (float(image.height - height) / 2) / image.height
if valign == 'top':
image.crop(0.0, 0.0, 1.0, 1 - (2 * trim_y))
elif valign == 'bottom':
# logging.info('----------')
# logging.info(image.height)
# logging.info(image.width)
# logging.info(height)
# logging.info(trim_y)
image.crop(0.0, (2 * trim_y), 1.0, 1.0)
else:
image.crop(0.0, trim_y, 1.0, 1 - trim_y)
else:
# resize to height, then crop to width
image.resize(height=height)
output = image.execute_transforms()
if image.width < width or image.height < height:
return output
trim_x = (float(image.width - width) / 2) / image.width
if halign == 'left':
image.crop(0.0, 0.0, 1 - (2 * trim_x), 1.0)
elif halign == 'right':
image.crop((2 * trim_x), 0.0, 1.0, 1.0)
else:
image.crop(trim_x, 0.0, 1 - trim_x, 1.0)
return image.execute_transforms()
| 0 | 0 | 0 |
6a8bec8f64b142433ee0c87fe869fc80aad24942 | 14,784 | py | Python | apps/cifar10/cifar10_train.py | AwakerMhy/moment_neural_network | 0889f9c0ca045605ff1e88035a7bb4698d9a9d1c | [
"MIT"
] | 1 | 2021-03-02T13:46:14.000Z | 2021-03-02T13:46:14.000Z | apps/cifar10/cifar10_train.py | AwakerMhy/moment_neural_network | 0889f9c0ca045605ff1e88035a7bb4698d9a9d1c | [
"MIT"
] | null | null | null | apps/cifar10/cifar10_train.py | AwakerMhy/moment_neural_network | 0889f9c0ca045605ff1e88035a7bb4698d9a9d1c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
from Mnn_Core.mnn_pytorch import *
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('runs/cifar10_experiment_1')
class Mnn_Classic(torch.nn.Module):
"""Some Information about Net"""
if __name__ == "__main__":
test = Train_Cifar10_Model()
test.test_model()
| 36.684864 | 118 | 0.547551 | # -*- coding: utf-8 -*-
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
from Mnn_Core.mnn_pytorch import *
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('runs/cifar10_experiment_1')
class Mnn_Classic(torch.nn.Module):
"""Some Information about Net"""
def __init__(self):
super(Mnn_Classic, self).__init__()
self.conv1 = torch.nn.Sequential(
torch.nn.Conv2d(3, 16, 3, padding=1), # 3*32*32 -> 16*32*32
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2) # 16*32*32 -> 16*16*16
)
self.conv2 = torch.nn.Sequential(
torch.nn.Conv2d(16, 32, 3, padding=1), # 16*16*16 -> 32*16*16
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2) # 32*16*16 -> 32*8*8
)
self.conv3 = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, 3, padding=1), # 32*8*8 -> 64*8*8
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2) # 64*8*8 -> 64*4*4
)
self.layer1 = Mnn_Layer_without_Rho(64 * 4 * 4, 64*4)
self.layer2 = Mnn_Layer_without_Rho(64*4, 10)
self.layer3 = Mnn_Linear_without_Corr(10, 10, bias=True)
self.add_noise = 0.0
self.mul_noise = 1.0
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(-1, 64 * 4 * 4)
y = torch.sqrt(torch.abs(x.clone()))*self.mul_noise + self.add_noise
x, y = self.layer1(x, y)
x, y = self.layer2(x, y)
x, y = self.layer3(x, y)
return x, y
class Mnn_FC(torch.nn.Module):
def __init__(self):
super(Mnn_FC, self).__init__()
self.layer1 = Mnn_Layer_without_Rho(3*32*32, 32*32)
self.layer2 = Mnn_Layer_without_Rho(32*32, 500)
self.layer3 = Mnn_Layer_without_Rho(500, 500)
self.layer4 = Mnn_Layer_without_Rho(500, 100)
self.output = Mnn_Linear_without_Corr(100, 10, bias=True)
def forward(self, ubar, sbar):
ubar, sbar = self.layer1(ubar, sbar)
ubar, sbar = self.layer2(ubar, sbar)
ubar, sbar = self.layer3(ubar, sbar)
ubar, sbar = self.layer4(ubar, sbar)
ubar, sbar = self.output(ubar, sbar)
return ubar, sbar
class Tradition_Net(torch.nn.Module):
def __init__(self):
super(Tradition_Net, self).__init__()
self.conv1 = nn.Conv2d(3,64,3,padding=1)
self.conv2 = nn.Conv2d(64,64,3,padding=1)
self.pool1 = nn.MaxPool2d(2, 2)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU()
self.conv3 = nn.Conv2d(64,128,3,padding=1)
self.conv4 = nn.Conv2d(128, 128, 3,padding=1)
self.pool2 = nn.MaxPool2d(2, 2, padding=1)
self.bn2 = nn.BatchNorm2d(128)
self.relu2 = nn.ReLU()
self.conv5 = nn.Conv2d(128,128, 3,padding=1)
self.conv6 = nn.Conv2d(128, 128, 3,padding=1)
self.conv7 = nn.Conv2d(128, 128, 1,padding=1)
self.pool3 = nn.MaxPool2d(2, 2, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.relu3 = nn.ReLU()
self.conv8 = nn.Conv2d(128, 256, 3,padding=1)
self.conv9 = nn.Conv2d(256, 256, 3, padding=1)
self.conv10 = nn.Conv2d(256, 256, 1, padding=1)
self.pool4 = nn.MaxPool2d(2, 2, padding=1)
self.bn4 = nn.BatchNorm2d(256)
self.relu4 = nn.ReLU()
self.conv11 = nn.Conv2d(256, 512, 3, padding=1)
self.conv12 = nn.Conv2d(512, 512, 3, padding=1)
self.conv13 = nn.Conv2d(512, 512, 1, padding=1)
self.pool5 = nn.MaxPool2d(2, 2, padding=1)
self.bn5 = nn.BatchNorm2d(512)
self.relu5 = nn.ReLU()
self.fc14 = nn.Linear(512*4*4,1024)
self.drop1 = nn.Dropout2d()
self.fc15 = nn.Linear(1024,1024)
self.drop2 = nn.Dropout2d()
self.fc16 = nn.Linear(1024,10)
def forward(self,x):
x = self.conv1(x)
x = self.conv2(x)
x = self.pool1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.pool2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.conv5(x)
x = self.conv6(x)
x = self.conv7(x)
x = self.pool3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.conv8(x)
x = self.conv9(x)
x = self.conv10(x)
x = self.pool4(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.conv11(x)
x = self.conv12(x)
x = self.conv13(x)
x = self.pool5(x)
x = self.bn5(x)
x = self.relu5(x)
x = x.view(-1, 512*4*4)
x = F.relu(self.fc14(x))
x = self.drop1(x)
x = F.relu(self.fc15(x))
x = self.drop2(x)
x = self.fc16(x)
return x
class Mnn_Auto_Encoder(nn.Module):
def __init__(self):
super(Mnn_Auto_Encoder, self).__init__()
self.encoder_layer1 = Mnn_Layer_without_Rho(3*32*32, 32*32)
self.encoder_layer2 = Mnn_Layer_without_Rho(32*32, 500)
self.encoder_layer3 = Mnn_Layer_without_Rho(500, 100)
self.decoder_layer1 = Mnn_Layer_without_Rho(100, 500)
self.decoder_layer2 = Mnn_Layer_without_Rho(500, 32*32)
self.decoder_layer3 = nn.Sequential(nn.Linear(32*32, 3*32*32),
nn.Sigmoid())
def encoder(self, ubar, sbar):
ubar, sbar = self.encoder_layer1(ubar, sbar)
ubar, sbar = self.encoder_layer2(ubar, sbar)
ubar, sbar = self.encoder_layer3(ubar, sbar)
return ubar, sbar
def decoder(self, ubar, sbar):
ubar, sbar = self.decoder_layer1(ubar, sbar)
ubar, sbar = self.decoder_layer2(ubar, sbar)
out = self.decoder_layer3(ubar)
return out
def forward(self, ubar, sbar):
batch = ubar.size()[0]
ubar, sbar = self.encoder(ubar, sbar)
out = self.decoder(ubar, sbar)
out = out.view(batch, 3, 32, 32)
return out
class Train_Cifar10_Model:
def __init__(self):
self.BATCH = 512
self.data_path = 'D:\Data_repos\Cifar10'
self.EPOCHS = 50
self.LR = 0.1
self.classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def train_mnn_classic(self, model, criterion, optimizer, trainloader, model_name, epochs=10, log_interval=50):
print('----- Train Start -----')
count = 0
for epoch in range(epochs):
running_loss = 0.0
for step, (batch_x, batch_y) in enumerate(trainloader):
batch_x = batch_x.type(torch.float64)
output = model(batch_x)[0]
optimizer.zero_grad()
loss = criterion(output, batch_y)
loss.backward()
optimizer.step()
running_loss += loss.item()
if step % log_interval == (log_interval - 1):
print('[%d, %5d] loss: %.4f' %
(epoch + 1, step + 1, running_loss / log_interval))
writer.add_scalar("loss/"+model_name, running_loss / log_interval, count)
running_loss = 0.0
correct = 0
total = 0
_, predict = torch.max(output.data, 1)
total += batch_y.size(0)
correct += (predict == batch_y).sum().item()
writer.add_scalar("accuracy/"+model_name, 100.0 * correct / total, count)
count += 1
print('Accuracy of the network on the %d tran images: %.3f %%' % (total, 100.0 * correct / total))
print('----- Train Finished -----')
def train_mnn_fc(self, model, criterion, optimizer, trainloader, model_name, epochs=10, log_interval=50):
print('----- Train Start -----')
count = 0
for epoch in range(epochs):
running_loss = 0.0
for step, (batch_x, batch_y) in enumerate(trainloader):
batch_x = batch_x.type(torch.float64)
batch_x = batch_x.view(-1, 3*32*32)
output = model(batch_x, torch.sqrt(torch.abs(batch_x.clone())))[0]
optimizer.zero_grad()
loss = criterion(output, batch_y)
loss.backward()
optimizer.step()
running_loss += loss.item()
if step % log_interval == (log_interval - 1):
print('[%d, %5d] loss: %.4f' %
(epoch + 1, step + 1, running_loss / log_interval))
writer.add_scalar("loss/"+model_name, running_loss / log_interval, count)
running_loss = 0.0
correct = 0
total = 0
_, predict = torch.max(output.data, 1)
total += batch_y.size(0)
correct += (predict == batch_y).sum().item()
writer.add_scalar("accuracy/"+model_name, 100.0 * correct / total, count)
count += 1
print('Accuracy of the network on the %d tran images: %.3f %%' % (total, 100.0 * correct / total))
print('----- Train Finished -----')
def train_mnn_ae(self, model, criterion, optimizer, trainloader, model_name, epochs=10, log_interval=50):
print('----- Train Start -----')
count = 0
for epoch in range(epochs):
running_loss = 0.0
for step, (batch_x, batch_y) in enumerate(trainloader):
batch_x = batch_x.type(torch.float64)
flatten_x = batch_x.view(-1, 3*32*32)
output = model(flatten_x, torch.sqrt(torch.abs(flatten_x.clone())))
optimizer.zero_grad()
loss = criterion(output, batch_x)
loss.backward()
optimizer.step()
running_loss += loss.item()
if step % log_interval == (log_interval - 1):
print('[%d, %5d] loss: %.4f' %
(epoch + 1, step + 1, running_loss / log_interval))
writer.add_scalar("loss/" + model_name, running_loss / log_interval, count)
count += 1
running_loss = 0.0
print('----- Train Finished -----')
def select_n(self, data, labels, start=10):
'''
Selects n random datapoints and their corresponding labels from a dataset
'''
assert len(data) == len(labels)
return data[start: start + 10], labels[start: start+10]
def test_classic(self, model, testloader, model_name="Mnn_classic"):
print('------ Test {} Start -----'.format(model_name))
correct = 0
total = 0
with torch.no_grad():
for test_x, test_y in testloader:
images, labels = test_x, test_y
images = images.type(torch.float64)
output = model(images)[0]
_, predicted = torch.max(output.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
print("total samples: {:}, num of correct: {:}".format(total, correct))
print('Accuracy of the network is: %.4f %%' % accuracy)
return accuracy
def test_fc(self, model, testloader, model_name="Mnn_Fc"):
print('------ Test {:} Start -----'.format(model_name))
correct = 0
total = 0
with torch.no_grad():
for test_x, test_y in testloader:
images, labels = test_x, test_y
images = images.type(torch.float64)
images = images.view(-1, 3*32*32)
sbar = torch.sqrt(torch.abs(images.clone()))
output = model(images, sbar)[0]
_, predicted = torch.max(output.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
print("total samples: {:}, num of correct: {:}".format(total, correct))
print('Accuracy of the network is: %.4f %%' % accuracy)
return accuracy
def train_model(self):
EPOCHS = self.EPOCHS
BATCH_SIZE = self.BATCH
data_path = self.data_path
LR = self.LR
transform_train = transforms.Compose([
transforms.ToTensor()
])
trainset = torchvision.datasets.CIFAR10(
root=data_path,
train=True,
download=False,
transform=transform_train
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=BATCH_SIZE, shuffle=True)
net = Mnn_Classic()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
self.train_mnn_classic(net, criterion, optimizer, trainloader, epochs=EPOCHS, model_name="Mnn_Classic")
torch.save(net, "mnn_classic.pt")
net = Mnn_FC()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
self.train_mnn_fc(net, criterion, optimizer, trainloader, epochs=EPOCHS, model_name="Mnn_FC")
torch.save(net, "mnn_fc.pt")
net = Mnn_Auto_Encoder()
criterion = torch.nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
self.train_mnn_ae(net, criterion, optimizer, trainloader, epochs=EPOCHS, model_name="Mnn_AE")
torch.save(net, "mnn_ae.pt")
def test_model(self):
BATCH_SIZE = self.BATCH
data_path = self.data_path
transform = transforms.Compose([
transforms.ToTensor()
])
testset = torchvision.datasets.CIFAR10(
root=data_path,
train=False,
download=False,
transform=transform
)
trainset = torchvision.datasets.CIFAR10(
root=data_path,
train=True,
download=False,
transform=transform
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=BATCH_SIZE, shuffle=True)
testloader = torch.utils.data.DataLoader(
testset, batch_size=BATCH_SIZE, shuffle=True)
# 类别标签
net = torch.load("mnn_classic.pt")
net.eval()
self.test_classic(net, trainloader)
self.test_classic(net, testloader)
net = torch.load("mnn_fc.pt")
net.eval()
self.test_fc(net, trainloader)
self.test_fc(net, testloader)
if __name__ == "__main__":
test = Train_Cifar10_Model()
test.test_model()
| 13,505 | 517 | 359 |
c471b84933da4ef5831111a3071096d0a46c2389 | 2,030 | py | Python | userbot/utils/git_api.py | HitaloSama/PaperplaneMinimal | 5cf45ca4ae90ad4a52ee6d6dc679053a69fbed32 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 9 | 2020-06-11T18:47:48.000Z | 2021-11-08T18:05:37.000Z | userbot/utils/git_api.py | HitaloSama/PaperplaneMinimal | 5cf45ca4ae90ad4a52ee6d6dc679053a69fbed32 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2020-08-28T18:37:46.000Z | 2020-09-25T15:32:29.000Z | userbot/utils/git_api.py | HitaloSama/PaperplaneMinimal | 5cf45ca4ae90ad4a52ee6d6dc679053a69fbed32 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 8 | 2020-06-14T02:08:41.000Z | 2020-12-15T13:25:15.000Z | # Copyright 2020 nunopenim @github
#
# Licensed under the PEL (Penim Enterprises License), v1.0
#
# You may not use this file or any of the content within it, unless in
# compliance with the PE License
"""
used to get github api information
for github module
"""
import json
import urllib.request as url
VERSION = "1.1.0"
APIURL = "http://api.github.com/repos/"
# Repo-wise stuff
# Release-wise stuff
# Asset-wise stuff
| 18.454545 | 70 | 0.679803 | # Copyright 2020 nunopenim @github
#
# Licensed under the PEL (Penim Enterprises License), v1.0
#
# You may not use this file or any of the content within it, unless in
# compliance with the PE License
"""
used to get github api information
for github module
"""
import json
import urllib.request as url
VERSION = "1.1.0"
APIURL = "http://api.github.com/repos/"
def vercheck() -> str:
return str(VERSION)
# Repo-wise stuff
def getData(repoURL):
try:
with url.urlopen(APIURL + repoURL + "/releases") as data_raw:
repoData = json.loads(data_raw.read().decode())
return repoData
except:
return None
def getReleaseData(repoData, index):
if index < len(repoData):
return repoData[index]
else:
return None
# Release-wise stuff
def getAuthor(releaseData):
if releaseData is None:
return None
return releaseData["author"]["login"]
def getAuthorUrl(releaseData):
if releaseData is None:
return None
return releaseData["author"]["html_url"]
def getReleaseName(releaseData):
if releaseData is None:
return None
return releaseData["name"]
def getReleaseTag(releaseData):
if releaseData is None:
return None
return releaseData["tag_name"]
def getReleaseDate(releaseData):
if releaseData is None:
return None
return releaseData["published_at"]
def getAssetsSize(releaseData):
if releaseData is None:
return None
return len(releaseData["assets"])
def getAssets(releaseData):
if releaseData is None:
return None
return releaseData["assets"]
def getBody(releaseData): # changelog stuff
if releaseData is None:
return None
return releaseData["body"]
# Asset-wise stuff
def getReleaseFileName(asset):
return asset["name"]
def getReleaseFileURL(asset):
return asset["browser_download_url"]
def getDownloadCount(asset):
return asset["download_count"]
def getSize(asset):
return asset["size"]
| 1,242 | 0 | 345 |
a6a257af762cb06d3666ac0d9f3c574a0a39aa80 | 6,423 | py | Python | hillfit/fitting.py | himoto/hillfit | 3648f39116ebf16c831596221bd19a2ffa4f8482 | [
"MIT"
] | null | null | null | hillfit/fitting.py | himoto/hillfit | 3648f39116ebf16c831596221bd19a2ffa4f8482 | [
"MIT"
] | 14 | 2021-12-01T06:35:07.000Z | 2022-03-31T05:51:38.000Z | hillfit/fitting.py | himoto/hillfit | 3648f39116ebf16c831596221bd19a2ffa4f8482 | [
"MIT"
] | 1 | 2022-01-15T02:56:12.000Z | 2022-01-15T02:56:12.000Z | import os
import re
from typing import List, Optional, Union
import numpy as np
from matplotlib import pyplot as plt
from pandas import DataFrame
from scipy.optimize import curve_fit
from sigfig import round
from sklearn.metrics import r2_score
| 33.984127 | 242 | 0.536665 | import os
import re
from typing import List, Optional, Union
import numpy as np
from matplotlib import pyplot as plt
from pandas import DataFrame
from scipy.optimize import curve_fit
from sigfig import round
from sklearn.metrics import r2_score
class HillFit(object):
def __init__(
self,
x_data: Union[List[float], np.ndarray],
y_data: Union[List[float], np.ndarray],
*,
bottom_param: bool = True,
) -> None:
self.x_data = np.array(x_data)
self.y_data = np.array(y_data)
self.bottom_param = bottom_param
if self.x_data[0] > self.x_data[-1]:
raise ValueError(
"The first point {self.x_data[0]} and the last point {self.x_data[-1]} are not amenable with the scipy.curvefit function of HillFit."
)
def _equation(self, x: np.ndarray, *params) -> np.ndarray:
self.top = params[0]
self.bottom = params[1] if self.bottom_param else 0
self.ec50 = params[2]
self.nH = params[3]
return self.bottom + (self.top - self.bottom) * x**self.nH / (
self.ec50**self.nH + x**self.nH
)
def _get_param(self) -> List[float]:
min_data = np.amin(self.y_data)
max_data = np.amax(self.y_data)
h = abs(max_data - min_data)
param_initial = [max_data, min_data, 0.5 * (self.x_data[-1] - self.x_data[0]), 1]
param_bounds = (
[max_data - 0.5 * h, min_data - 0.5 * h, self.x_data[0] * 0.1, 0.01],
[max_data + 0.5 * h, min_data + 0.5 * h, self.x_data[-1] * 10, 100],
)
popt, _ = curve_fit(
self._equation,
self.x_data,
self.y_data,
p0=param_initial,
bounds=param_bounds,
)
if not self.bottom_param:
popt[1] = 0
return [float(param) for param in popt]
def regression(
self,
x_fit,
y_fit,
x_label,
y_label,
title,
sigfigs,
log_x,
print_r_sqr,
view_figure,
*params,
) -> None:
corrected_y_data = self._equation(self.x_data, *params)
self.r_2 = r2_score(self.y_data, corrected_y_data)
# define the regression plot
if self.generate_figure:
plt.rcParams["figure.figsize"] = (11, 7)
plt.rcParams["figure.dpi"] = 150
self.figure, self.ax = plt.subplots()
self.ax.plot(x_fit, y_fit, label="Hill fit")
self.ax.scatter(self.x_data, self.y_data, label="raw_data")
if log_x:
self.ax.set_xscale("log")
self.ax.set_xlabel(x_label)
self.ax.set_ylabel(y_label)
self.ax.set_title(title)
self.ax.legend(loc="lower right")
if print_r_sqr:
# define the coordinates location of the printed R^2 on the figure
y_coordinate = 0.7 * y_fit[-1]
if y_coordinate < y_fit[0]:
y_coordinate = 1 * y_fit[0]
x_coordinate = 0.8 * x_fit[-1]
if x_coordinate < x_fit[0]:
x_coordinate = 2 * x_fit[0]
self.ax.text(
x_coordinate,
y_coordinate,
"R\N{superscript two}: " + f"{round(self.r_2, sigfigs)}",
)
if view_figure:
self.figure.show()
def fitting(
self,
x_label: str = "x",
y_label: str = "y",
title: str = "Fitted Hill equation",
sigfigs: int = 6,
log_x: bool = False,
print_r_sqr: bool = True,
generate_figure: bool = True,
view_figure: bool = True,
):
self.generate_figure = generate_figure
self.x_fit = np.logspace(
np.log10(self.x_data[0]), np.log10(self.x_data[-1]), len(self.y_data)
)
params = self._get_param()
self.y_fit = self._equation(self.x_fit, *params)
self.equation = f"{round(self.bottom, sigfigs)} + ({round(self.top, sigfigs)}-{round(self.bottom, sigfigs)})*x**{(round(self.nH, sigfigs))} / ({round(self.ec50, sigfigs)}**{(round(self.nH, sigfigs))} + x**{(round(self.nH, sigfigs))})"
self.regression(
self.x_fit,
self.y_fit,
x_label,
y_label,
title,
sigfigs,
log_x,
print_r_sqr,
view_figure,
*params,
)
def export(
self, export_directory: Optional[str] = None, export_name: Optional[str] = None
) -> None:
# define the unique export path
if export_directory is None:
export_directory = os.getcwd()
if export_name is None:
export_name = "-".join([re.sub(" ", "_", str(x)) for x in ["Hillfit", "reg"]])
count = 0
export_path = os.path.join(export_directory, export_name)
while os.path.exists(export_path):
count += 1
export_name = re.sub("([0-9]+)$", str(count), export_name)
if not re.search("(-[0-9]+$)", export_name):
export_name += f"-{count}"
export_path = os.path.join(export_directory, export_name)
os.mkdir(export_path)
# export the figure
if self.generate_figure:
self.figure.savefig(os.path.join(export_path, "regression.svg"))
# export the raw data
df = DataFrame(index=range(len(self.x_data)))
df["x"] = self.x_data
df["y"] = self.y_data
df.to_csv(os.path.join(export_path, "raw_data.csv"))
# export the fitted data
df2 = DataFrame(index=range(len(self.x_fit)))
df2["x_fit"] = self.x_fit
df2["y_fit"] = self.y_fit
df2.to_csv(os.path.join(export_path, "fitted_data.csv"))
# export the fitted equation
formatted_equation = re.sub("(\*\*)", "^", self.equation)
string = "\n".join(
[
f"Fitted Hill equation (R\N{superscript two} of {round(self.r_2, 6)}): {formatted_equation}",
f"top = {self.top}",
f"bottom = {self.bottom}",
f"ec50 = {self.ec50}",
f"nH = {self.nH}",
],
)
with open(os.path.join(export_path, "equation.txt"), "w") as output:
output.writelines(string)
| 5,991 | 1 | 184 |
aa40ed7131c116cb4901c177d85fea163da84b4f | 14,035 | py | Python | torchbiggraph/operators.py | stillmatic/PyTorch-BigGraph | d7d6576281faa54ec5850e204ffc07b1268fdb04 | [
"BSD-3-Clause"
] | 3,189 | 2019-04-01T23:25:40.000Z | 2022-03-29T10:26:22.000Z | torchbiggraph/operators.py | KonstantinKlepikov/PyTorch-BigGraph | db9d1478211dcf74a24b88dae1348588c5f645fb | [
"BSD-3-Clause"
] | 238 | 2019-04-02T07:19:55.000Z | 2022-03-22T11:03:06.000Z | torchbiggraph/operators.py | KonstantinKlepikov/PyTorch-BigGraph | db9d1478211dcf74a24b88dae1348588c5f645fb | [
"BSD-3-Clause"
] | 457 | 2019-04-01T23:50:09.000Z | 2022-03-28T14:48:12.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Dict, List, NamedTuple, Optional, Sequence, Union
import torch
import torch.nn as nn
from torchbiggraph.plugin import PluginRegistry
from torchbiggraph.types import FloatTensorType, LongTensorType, Side
from torchbiggraph.util import match_shape
class AbstractOperator(nn.Module, ABC):
"""Perform the same operation on many vectors.
Given a tensor containing a set of vectors, perform the same operation on
all of them, with a common set of parameters. The dimension of these vectors
will be given at initialization (so that any parameter can be initialized).
The input will be a tensor with at least one dimension. The last dimension
will contain the vectors. The output is a tensor that will have the same
size as the input.
"""
@abstractmethod
OPERATORS = PluginRegistry[AbstractOperator]()
@OPERATORS.register_as("none")
@OPERATORS.register_as("diagonal")
@OPERATORS.register_as("translation")
@OPERATORS.register_as("linear")
@OPERATORS.register_as("affine")
# FIXME This adapts from the pre-D14024710 format; remove eventually.
@OPERATORS.register_as("complex_diagonal")
class AbstractDynamicOperator(nn.Module, ABC):
"""Perform different operations on many vectors.
The inputs are a tensor containing a set of vectors and another tensor
specifying, for each vector, which operation to apply to it. The output has
the same size as the first input and contains the outputs of the operations
applied to the input vectors. The different operations are identified by
integers in a [0, N) range. They are all of the same type (say, translation)
but each one has its own set of parameters. The dimension of the vectors and
the total number of operations that need to be supported are provided at
initialization. The first tensor can have any number of dimensions (>= 1).
"""
@abstractmethod
DYNAMIC_OPERATORS = PluginRegistry[AbstractDynamicOperator]()
@DYNAMIC_OPERATORS.register_as("none")
@DYNAMIC_OPERATORS.register_as("diagonal")
@DYNAMIC_OPERATORS.register_as("translation")
@DYNAMIC_OPERATORS.register_as("linear")
@DYNAMIC_OPERATORS.register_as("affine")
# FIXME This adapts from the pre-D14024710 format; remove eventually.
@DYNAMIC_OPERATORS.register_as("complex_diagonal")
| 39.313725 | 88 | 0.678803 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Dict, List, NamedTuple, Optional, Sequence, Union
import torch
import torch.nn as nn
from torchbiggraph.plugin import PluginRegistry
from torchbiggraph.types import FloatTensorType, LongTensorType, Side
from torchbiggraph.util import match_shape
class AbstractOperator(nn.Module, ABC):
"""Perform the same operation on many vectors.
Given a tensor containing a set of vectors, perform the same operation on
all of them, with a common set of parameters. The dimension of these vectors
will be given at initialization (so that any parameter can be initialized).
The input will be a tensor with at least one dimension. The last dimension
will contain the vectors. The output is a tensor that will have the same
size as the input.
"""
def __init__(self, dim: int):
super().__init__()
self.dim = dim
@abstractmethod
def forward(self, embeddings: FloatTensorType) -> FloatTensorType:
pass
def get_operator_params_for_reg(self) -> Optional[FloatTensorType]:
raise NotImplementedError("Regularizer not implemented for this operator")
def prepare_embs_for_reg(self, embs: FloatTensorType) -> FloatTensorType:
return embs.abs()
OPERATORS = PluginRegistry[AbstractOperator]()
@OPERATORS.register_as("none")
class IdentityOperator(AbstractOperator):
def forward(self, embeddings: FloatTensorType) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
return embeddings
def get_operator_params_for_reg(self) -> Optional[FloatTensorType]:
return None
@OPERATORS.register_as("diagonal")
class DiagonalOperator(AbstractOperator):
def __init__(self, dim: int):
super().__init__(dim)
self.diagonal = nn.Parameter(torch.ones((self.dim,)))
def forward(self, embeddings: FloatTensorType) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
return self.diagonal.to(device=embeddings.device) * embeddings
def get_operator_params_for_reg(self) -> Optional[FloatTensorType]:
return self.diagonal.abs()
@OPERATORS.register_as("translation")
class TranslationOperator(AbstractOperator):
def __init__(self, dim: int):
super().__init__(dim)
self.translation = nn.Parameter(torch.zeros((self.dim,)))
def forward(self, embeddings: FloatTensorType) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
return embeddings + self.translation.to(device=embeddings.device)
def get_operator_params_for_reg(self) -> Optional[FloatTensorType]:
return self.translation.abs()
@OPERATORS.register_as("linear")
class LinearOperator(AbstractOperator):
def __init__(self, dim: int):
super().__init__(dim)
self.linear_transformation = nn.Parameter(torch.eye(self.dim))
def forward(self, embeddings: FloatTensorType) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
# We add a dimension so that matmul performs a matrix-vector product.
return torch.matmul(
self.linear_transformation.to(device=embeddings.device),
embeddings.unsqueeze(-1),
).squeeze(-1)
@OPERATORS.register_as("affine")
class AffineOperator(AbstractOperator):
def __init__(self, dim: int):
super().__init__(dim)
self.linear_transformation = nn.Parameter(torch.eye(self.dim))
self.translation = nn.Parameter(torch.zeros((self.dim,)))
def forward(self, embeddings: FloatTensorType) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
# We add a dimension so that matmul performs a matrix-vector product.
return (
torch.matmul(
self.linear_transformation.to(device=embeddings.device),
embeddings.unsqueeze(-1),
).squeeze(-1)
+ self.translation.to(device=embeddings.device)
)
# FIXME This adapts from the pre-D14024710 format; remove eventually.
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
param_key = "%slinear_transformation" % prefix
old_param_key = "%srotation" % prefix
if old_param_key in state_dict:
state_dict[param_key] = (
state_dict.pop(old_param_key).transpose(-1, -2).contiguous()
)
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
@OPERATORS.register_as("complex_diagonal")
class ComplexDiagonalOperator(AbstractOperator):
def __init__(self, dim: int):
super().__init__(dim)
if dim % 2 != 0:
raise ValueError(
"Need even dimension as 1st half is real "
"and 2nd half is imaginary coordinates"
)
self.real = nn.Parameter(torch.ones((self.dim // 2,)))
self.imag = nn.Parameter(torch.zeros((self.dim // 2,)))
def forward(self, embeddings: FloatTensorType) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
real_a = embeddings[..., : self.dim // 2]
imag_a = embeddings[..., self.dim // 2 :]
real_b = self.real.to(device=embeddings.device)
imag_b = self.imag.to(device=embeddings.device)
prod = torch.empty_like(embeddings)
prod[..., : self.dim // 2] = real_a * real_b - imag_a * imag_b
prod[..., self.dim // 2 :] = real_a * imag_b + imag_a * real_b
return prod
def get_operator_params_for_reg(self) -> Optional[FloatTensorType]:
return torch.sqrt(self.real ** 2 + self.imag ** 2)
def prepare_embs_for_reg(self, embs: FloatTensorType) -> FloatTensorType:
assert embs.shape[-1] == self.dim
real, imag = embs[..., : self.dim // 2], embs[..., self.dim // 2 :]
return torch.sqrt(real ** 2 + imag ** 2)
class AbstractDynamicOperator(nn.Module, ABC):
"""Perform different operations on many vectors.
The inputs are a tensor containing a set of vectors and another tensor
specifying, for each vector, which operation to apply to it. The output has
the same size as the first input and contains the outputs of the operations
applied to the input vectors. The different operations are identified by
integers in a [0, N) range. They are all of the same type (say, translation)
but each one has its own set of parameters. The dimension of the vectors and
the total number of operations that need to be supported are provided at
initialization. The first tensor can have any number of dimensions (>= 1).
"""
def __init__(self, dim: int, num_operations: int):
super().__init__()
self.dim = dim
self.num_operations = num_operations
@abstractmethod
def forward(
self, embeddings: FloatTensorType, operator_idxs: LongTensorType
) -> FloatTensorType:
pass
def get_operator_params_for_reg(
self, operator_idxs: LongTensorType
) -> Optional[FloatTensorType]:
raise NotImplementedError("Regularizer not implemented for this operator")
def prepare_embs_for_reg(self, embs: FloatTensorType) -> FloatTensorType:
return embs.abs()
DYNAMIC_OPERATORS = PluginRegistry[AbstractDynamicOperator]()
@DYNAMIC_OPERATORS.register_as("none")
class IdentityDynamicOperator(AbstractDynamicOperator):
def forward(
self, embeddings: FloatTensorType, operator_idxs: LongTensorType
) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
match_shape(operator_idxs, *embeddings.size()[:-1])
return embeddings
def get_operator_params_for_reg(
self, operator_idxs: LongTensorType
) -> Optional[FloatTensorType]:
return None
@DYNAMIC_OPERATORS.register_as("diagonal")
class DiagonalDynamicOperator(AbstractDynamicOperator):
def __init__(self, dim: int, num_operations: int):
super().__init__(dim, num_operations)
self.diagonals = nn.Parameter(torch.ones((self.num_operations, self.dim)))
def forward(
self, embeddings: FloatTensorType, operator_idxs: LongTensorType
) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
match_shape(operator_idxs, *embeddings.size()[:-1])
return self.diagonals.to(device=embeddings.device)[operator_idxs] * embeddings
def get_operator_params_for_reg(
self, operator_idxs: LongTensorType
) -> Optional[FloatTensorType]:
return self.diagonals.to(device=operator_idxs.device)[operator_idxs].abs()
@DYNAMIC_OPERATORS.register_as("translation")
class TranslationDynamicOperator(AbstractDynamicOperator):
def __init__(self, dim: int, num_operations: int):
super().__init__(dim, num_operations)
self.translations = nn.Parameter(torch.zeros((self.num_operations, self.dim)))
def forward(
self, embeddings: FloatTensorType, operator_idxs: LongTensorType
) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
match_shape(operator_idxs, *embeddings.size()[:-1])
return (
embeddings + self.translations.to(device=embeddings.device)[operator_idxs]
)
def get_operator_params_for_reg(
self, operator_idxs: LongTensorType
) -> Optional[FloatTensorType]:
return self.translations.to(device=operator_idxs.device)[operator_idxs].abs()
@DYNAMIC_OPERATORS.register_as("linear")
class LinearDynamicOperator(AbstractDynamicOperator):
def __init__(self, dim: int, num_operations: int):
super().__init__(dim, num_operations)
self.linear_transformations = nn.Parameter(
torch.diag_embed(torch.ones(()).expand(num_operations, dim))
)
def forward(
self, embeddings: FloatTensorType, operator_idxs: LongTensorType
) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
match_shape(operator_idxs, *embeddings.size()[:-1])
# We add a dimension so that matmul performs a matrix-vector product.
return torch.matmul(
self.linear_transformations.to(device=embeddings.device)[operator_idxs],
embeddings.unsqueeze(-1),
).squeeze(-1)
@DYNAMIC_OPERATORS.register_as("affine")
class AffineDynamicOperator(AbstractDynamicOperator):
def __init__(self, dim: int, num_operations: int):
super().__init__(dim, num_operations)
self.linear_transformations = nn.Parameter(
torch.diag_embed(torch.ones(()).expand(num_operations, dim))
)
self.translations = nn.Parameter(torch.zeros((self.num_operations, self.dim)))
def forward(
self, embeddings: FloatTensorType, operator_idxs: LongTensorType
) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
match_shape(operator_idxs, *embeddings.size()[:-1])
# We add a dimension so that matmul performs a matrix-vector product.
return (
torch.matmul(
self.linear_transformations.to(device=embeddings.device)[operator_idxs],
embeddings.unsqueeze(-1),
).squeeze(-1)
+ self.translations.to(device=embeddings.device)[operator_idxs]
)
# FIXME This adapts from the pre-D14024710 format; remove eventually.
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
param_key = "%slinear_transformations" % prefix
old_param_key = "%srotations" % prefix
if old_param_key in state_dict:
state_dict[param_key] = (
state_dict.pop(old_param_key).transpose(-1, -2).contiguous()
)
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
@DYNAMIC_OPERATORS.register_as("complex_diagonal")
class ComplexDiagonalDynamicOperator(AbstractDynamicOperator):
def __init__(self, dim: int, num_operations: int):
super().__init__(dim, num_operations)
if dim % 2 != 0:
raise ValueError(
"Need even dimension as 1st half is real "
"and 2nd half is imaginary coordinates"
)
self.real = nn.Parameter(torch.ones((self.num_operations, self.dim // 2)))
self.imag = nn.Parameter(torch.zeros((self.num_operations, self.dim // 2)))
def forward(
self, embeddings: FloatTensorType, operator_idxs: LongTensorType
) -> FloatTensorType:
match_shape(embeddings, ..., self.dim)
match_shape(operator_idxs, *embeddings.size()[:-1])
real_a = embeddings[..., : self.dim // 2]
imag_a = embeddings[..., self.dim // 2 :]
real_b = self.real.to(device=embeddings.device)[operator_idxs]
imag_b = self.imag.to(device=embeddings.device)[operator_idxs]
prod = torch.empty_like(embeddings)
prod[..., : self.dim // 2] = real_a * real_b - imag_a * imag_b
prod[..., self.dim // 2 :] = real_a * imag_b + imag_a * real_b
return prod
def get_operator_params_for_reg(self, operator_idxs) -> Optional[FloatTensorType]:
return torch.sqrt(self.real[operator_idxs] ** 2 + self.imag[operator_idxs] ** 2)
def prepare_embs_for_reg(self, embs: FloatTensorType) -> FloatTensorType:
assert embs.shape[-1] == self.dim
real, imag = embs[..., : self.dim // 2], embs[..., self.dim // 2 :]
return torch.sqrt(real ** 2 + imag ** 2)
def instantiate_operator(
operator: str, side: Side, num_dynamic_rels: int, dim: int
) -> Optional[Union[AbstractOperator, AbstractDynamicOperator]]:
if num_dynamic_rels > 0:
dynamic_operator_class = DYNAMIC_OPERATORS.get_class(operator)
return dynamic_operator_class(dim, num_dynamic_rels)
elif side is Side.LHS:
return None
else:
operator_class = OPERATORS.get_class(operator)
return operator_class(dim)
| 9,688 | 336 | 1,405 |
e38ac759da7b0376eef33a8032c272eb47b30f0e | 1,359 | py | Python | purchase/admin.py | rajeshr188/one | 05f580a019dfda7dc45db2b49f526926914799ca | [
"MIT"
] | null | null | null | purchase/admin.py | rajeshr188/one | 05f580a019dfda7dc45db2b49f526926914799ca | [
"MIT"
] | 10 | 2020-02-11T23:31:15.000Z | 2022-03-11T23:34:34.000Z | purchase/admin.py | rajeshr188/one | 05f580a019dfda7dc45db2b49f526926914799ca | [
"MIT"
] | null | null | null | from django.contrib import admin
from django import forms
from .models import Invoice, InvoiceItem, Payment
admin.site.register(Invoice, InvoiceAdmin)
admin.site.register(InvoiceItem, InvoiceItemAdmin)
admin.site.register(Payment, PaymentAdmin)
| 27.18 | 116 | 0.693157 | from django.contrib import admin
from django import forms
from .models import Invoice, InvoiceItem, Payment
class InvoiceAdminForm(forms.ModelForm):
class Meta:
model = Invoice
fields = '__all__'
class InvoiceAdmin(admin.ModelAdmin):
form = InvoiceAdminForm
list_display = ['slug', 'created', 'last_updated', 'rate', 'balancetype', 'paymenttype', 'balance', 'status']
readonly_fields = ['slug', 'created', 'last_updated', 'rate', 'balancetype', 'paymenttype', 'balance', 'status']
admin.site.register(Invoice, InvoiceAdmin)
class InvoiceItemAdminForm(forms.ModelForm):
class Meta:
model = InvoiceItem
fields = '__all__'
class InvoiceItemAdmin(admin.ModelAdmin):
form = InvoiceItemAdminForm
list_display = ['weight', 'touch', 'total', 'is_return', 'quantity']
readonly_fields = ['weight', 'touch', 'total', 'is_return', 'quantity']
admin.site.register(InvoiceItem, InvoiceItemAdmin)
class PaymentAdminForm(forms.ModelForm):
class Meta:
model = Payment
fields = '__all__'
class PaymentAdmin(admin.ModelAdmin):
form = PaymentAdminForm
list_display = ['slug', 'created', 'last_updated', 'type', 'total', 'description']
readonly_fields = ['slug', 'created', 'last_updated', 'type', 'total', 'description']
admin.site.register(Payment, PaymentAdmin)
| 0 | 966 | 138 |
a8963e370013a12fd378aa5d0c0f8696d3ca16bd | 879 | py | Python | input/get_input.py | mesielepush/Demi | c108d52c8e44949bc8bb67c0aef733a8772015f0 | [
"MIT"
] | null | null | null | input/get_input.py | mesielepush/Demi | c108d52c8e44949bc8bb67c0aef733a8772015f0 | [
"MIT"
] | null | null | null | input/get_input.py | mesielepush/Demi | c108d52c8e44949bc8bb67c0aef733a8772015f0 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import joblib
base_accounts_dir = os.path.join(os.path.abspath('TweetSuite'),'accounts')
base_accounts = [account for account in os.listdir(base_accounts_dir) if account != 'vault']
accounts_dir = os.path.abspath('accounts')
getInput(base_accounts) | 32.555556 | 96 | 0.614334 | import os
import pandas as pd
import joblib
base_accounts_dir = os.path.join(os.path.abspath('TweetSuite'),'accounts')
base_accounts = [account for account in os.listdir(base_accounts_dir) if account != 'vault']
accounts_dir = os.path.abspath('accounts')
def getInput(base_accounts):
for account in base_accounts:
print('#################### ', account)
data = joblib.load(os.path.join(base_accounts_dir,account))
id_to_drop = []
for ind, text in enumerate(data.text):
if text.startswith('RT') or text.startswith('http'):
id_to_drop.append(data.index[ind])
else: pass
if len(id_to_drop) > 1:
data = data.drop(id_to_drop, axis=0)
joblib.dump(data,os.path.join(accounts_dir,account[:-17] + '_tweets.pkl'))
getInput(base_accounts) | 571 | 0 | 23 |
a869cf5fceb7b1eb3ee0126a1b8d83f5d914c1c7 | 469 | py | Python | test/test_hcunit_fail.py | codedsk/hubcheck | 2ff506eb56ba00f035300862f8848e4168452a17 | [
"MIT"
] | 1 | 2016-02-13T13:42:23.000Z | 2016-02-13T13:42:23.000Z | test/test_hcunit_fail.py | codedsk/hubcheck | 2ff506eb56ba00f035300862f8848e4168452a17 | [
"MIT"
] | null | null | null | test/test_hcunit_fail.py | codedsk/hubcheck | 2ff506eb56ba00f035300862f8848e4168452a17 | [
"MIT"
] | null | null | null | import unittest
import pytest
import sys
import hubcheck
pytestmark = [ pytest.mark.fail]
if __name__ == '__main__':
# unittest.main(verbosity=0)
tr = unittest.TextTestRunner(stream=sys.stdout,verbosity=0)
unittest.main(testRunner=tr,exit=False)
| 18.76 | 69 | 0.686567 | import unittest
import pytest
import sys
import hubcheck
pytestmark = [ pytest.mark.fail]
class hcunit_failure(hubcheck.testcase.TestCase):
def test_failure(self):
"""
a test that will fail
"""
self.assertFalse(True,"this is an example of a test failure")
if __name__ == '__main__':
# unittest.main(verbosity=0)
tr = unittest.TextTestRunner(stream=sys.stdout,verbosity=0)
unittest.main(testRunner=tr,exit=False)
| 0 | 182 | 23 |
1c49fb62426836b9756f2971c833979c6b552fae | 2,657 | py | Python | tests/test_cdtw.py | dizcza/cdtw-python | a83fffd6fc222a1691f07421fd4dbf46dc19e0aa | [
"MIT"
] | null | null | null | tests/test_cdtw.py | dizcza/cdtw-python | a83fffd6fc222a1691f07421fd4dbf46dc19e0aa | [
"MIT"
] | null | null | null | tests/test_cdtw.py | dizcza/cdtw-python | a83fffd6fc222a1691f07421fd4dbf46dc19e0aa | [
"MIT"
] | null | null | null | import unittest
import math
import numpy as np
from cdtw.dtw import *
from numpy.testing import assert_array_equal, assert_array_almost_equal
try:
import dtaidistance
DTAIDISTANCE_INSTALLED = True
except ImportError:
DTAIDISTANCE_INSTALLED = False
if __name__ == '__main__':
unittest.main()
| 33.2125 | 78 | 0.596161 | import unittest
import math
import numpy as np
from cdtw.dtw import *
from numpy.testing import assert_array_equal, assert_array_almost_equal
try:
import dtaidistance
DTAIDISTANCE_INSTALLED = True
except ImportError:
DTAIDISTANCE_INSTALLED = False
class TestCDTW(unittest.TestCase):
def test_empty(self):
self.assertRaises(ValueError, dtw_mat, [], [1.0, 2.0])
self.assertRaises(ValueError, dtw_dist, [], [1.0, 2.0])
def test_one_point(self):
self.assertEqual(dtw_dist([1.0], [5.0]), 4.0)
cost_mat = dtw_mat([1.0], [5.0])
assert_array_equal(cost_mat, [[4.0]])
assert_array_equal(dtw_path(cost_mat), [(0, 0)])
def test_simple(self):
x = [1, 2, 3, 4, 5]
y = [2, 3, 4]
cost_mat_expected = np.sqrt([
[1, 5, 14],
[1, 2, 6],
[2, 1, 2],
[6, 2, 1],
[15, 6, 2]
])
path_expected = [(0, 0), (1, 0), (2, 1), (3, 2), (4, 2)]
cost_mat = dtw_mat(x, y)
self.assertAlmostEqual(dtw_dist(x, y), math.sqrt(2.0), places=6)
assert_array_almost_equal(cost_mat, cost_mat_expected)
assert_array_equal(dtw_path(cost_mat), path_expected)
def test_order_does_not_matter(self):
np.random.seed(0)
x = np.random.randn(100)
y = np.random.randn(300)
assert_array_almost_equal(dtw_mat(x, y), dtw_mat(y, x).T)
self.assertAlmostEqual(dtw_dist(x, y), dtw_dist(y, x))
def test_dtw_distance_path(self):
np.random.seed(0)
x = np.random.randn(10)
y = np.random.randn(30)
cost_mat = dtw_mat(x, y)
self.assertAlmostEqual(cost_mat[-1, -1], dtw_dist(x, y), places=6)
path = dtw_path(cost_mat)
assert_array_equal(path[0], (0, 0))
assert_array_equal(path[-1], (len(x) - 1, len(y) - 1))
@unittest.skipUnless(DTAIDISTANCE_INSTALLED, "dtaidistance not installed")
def test_dtaidistance(self):
np.random.seed(0)
x = np.random.randn(100).astype(np.float32)
y = np.random.randn(30).astype(np.float32)
self.assertAlmostEqual(dtw_dist(x, y),
dtaidistance.dtw.distance(x, y),
places=6)
_, cost_mat_expected = dtaidistance.dtw.warping_paths(x, y)
cost_mat = dtw_mat(x, y)
assert_array_almost_equal(cost_mat, cost_mat_expected[1:, 1:],
decimal=5)
path_expected = dtaidistance.dtw.best_path(cost_mat_expected)
assert_array_equal(dtw_path(cost_mat), path_expected)
if __name__ == '__main__':
unittest.main()
| 2,068 | 254 | 23 |
79b5dbd16445a2100f888cc4a828e85f56c7c609 | 15,108 | py | Python | okcupyd/details.py | sphericalcow/okcupyd | ae0a99d248c515eea9a6d21a9c89f51e299b33f5 | [
"MIT"
] | null | null | null | okcupyd/details.py | sphericalcow/okcupyd | ae0a99d248c515eea9a6d21a9c89f51e299b33f5 | [
"MIT"
] | null | null | null | okcupyd/details.py | sphericalcow/okcupyd | ae0a99d248c515eea9a6d21a9c89f51e299b33f5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import inspect
import logging
import re
from six import string_types
from . import helpers
from . import magicnumbers
from . import util
from .magicnumbers import maps, language_map
from .xpath import xpb
log = logging.getLogger(__name__)
class Detail(object):
"""Represent a detail belonging to an okcupid.com profile."""
NO_DEFAULT = object()
@classmethod
@classmethod
@classmethod
@classmethod
@staticmethod
@property
_doc_format = 'The {0} detail of an okcupid.com user\'s profile.'
@id_name.setter
class Details(object):
"""Represent the details belonging to an okcupid.com profile."""
@classmethod
_profile_details_xpb = xpb.div(id='profile_details').dl
_basics = [maps.orientation,
maps.gender,
maps.status,
util.IndexedREMap(),
maps.bodytype]
_backgrounds = [maps.ethnicities,
util.REMap.from_string_pairs([(a.replace('+',r'\+'),b)
for a, b in language_map.iteritems()]),
maps.education_level,
maps.religion]
_misc = [util.IndexedREMap('smokes'),
util.IndexedREMap('drink'),
util.IndexedREMap('drugs'),
maps.diet,
util.IndexedREMap('kid'),
util.IndexedREMap('dogs','cats'),
maps.sign]
@classmethod
@util.cached_property
@property
bodytype = Detail.mapping_updater(maps.bodytype)
gender = Detail.mapping_updater(maps.gender)
orientation = Detail.mapping_updater(maps.orientation)
smokes = Detail.mapping_updater(maps.smokes, id_name='smoking')
drugs = Detail.mapping_updater(maps.drugs)
drinks = Detail.mapping_updater(maps.drinks, id_name='drinking')
job = Detail.mapping_updater(maps.job)
status = Detail.mapping_updater(maps.status)
monogamy = Detail(id_name='monogamous', updater=lambda id_name, value: {
'monogamous': maps.monogamy[value],
'monogamyflex': maps.strictness[value]
})
children = Detail(updater=lambda id_name, value: {
'children': maps.has_kids[value],
'children2': maps.wants_kids[value]
})
education = Detail(updater=lambda id_name, value: {
'educationstatus': maps.education_status[value],
'educationlevel': maps.education_level[value]
})
pets = Detail(updater=lambda id_name, value: {
'cats': maps.cats[value],
'dogs': maps.dogs[value]
})
diet = Detail(updater=lambda id_name, value: {
'diet': maps.diet[value],
'dietserious': maps.diet_strictness[value]
})
religion = Detail(updater=lambda id_name, value: {
'religion': maps.religion[value],
'religionserious': maps.seriousness[value]
})
sign = Detail(updater=lambda id_name, value: {
'sign': maps.sign[value],
'sign_status': maps.importance[value]
})
height = Detail(updater=lambda id_name, value: {
'centimeters': int(round(magicnumbers.parse_height_string(value)))
})
for id_name, detail in Details.name_detail_pairs():
if detail.id_name is None:
detail.id_name = id_name
is_declarative_detail = lambda x: (isinstance(x, type) and
issubclass(x, DeclarativeDetail))
for id_name, declarative_detail in inspect.getmembers(
Details, is_declarative_detail
):
detail = Detail(presenter=declarative_detail.presenter,
updater=declarative_detail.updater,
id_name=id_name)
setattr(Details, id_name, detail)
| 36.938875 | 155 | 0.51668 | # -*- coding: utf-8 -*-
import inspect
import logging
import re
from six import string_types
from . import helpers
from . import magicnumbers
from . import util
from .magicnumbers import maps, language_map
from .xpath import xpb
log = logging.getLogger(__name__)
class Detail(object):
"""Represent a detail belonging to an okcupid.com profile."""
NO_DEFAULT = object()
@classmethod
def comma_separated_presenter(cls, text):
return text.strip().split(', ')
@classmethod
def mapping_multi_updater(cls, mapping):
def updater(id_name, value):
if value is None:
value = ()
return {id_name: [mapping[item.lower()] for item in value]}
return updater
@classmethod
def auto_indexed_updater(cls, *options):
return cls.mapping_updater({
option: index
for index, option in enumerate(options, 1)
}, default=0)
@classmethod
def mapping_updater(cls, mapping, id_name=None):
return cls(id_name=id_name,
updater=magicnumbers.MappingUpdater(mapping))
@staticmethod
def default_updater(id_name, value):
return {id_name: value}
def __init__(self, id_name=None, presenter=None, updater=None):
self.id_name = id_name
self.presenter = presenter or (lambda x: None if u'\u2014' in x
else helpers.replace_chars(x.strip()))
self.updater = updater or self.default_updater
@property
def id_name(self):
return self._id_name
_doc_format = 'The {0} detail of an okcupid.com user\'s profile.'
@id_name.setter
def id_name(self, value):
self._id_name = value
self.__doc__ = self._doc_format.format(self.id_name)
def update(self, value):
if isinstance(value, string_types):
value = value.lower()
return self.updater(self.id_name, value)
def __get__(self, details, klass):
if details is None:
return self
return self.presenter(details.id_to_display_name_value.get(self.id_name, u'\u2014'))
def __set__(self, details, value):
details.update(self.update(value))
class DeclarativeDetail(object):
updater = None
presenter = None
class Details(object):
"""Represent the details belonging to an okcupid.com profile."""
@classmethod
def name_detail_pairs(cls):
is_detail = lambda x: isinstance(x, Detail)
return inspect.getmembers(cls, is_detail)
def __init__(self, profile):
self.profile = profile
_profile_details_xpb = xpb.div(id='profile_details').dl
def refresh(self):
util.cached_property.bust_caches(self)
_basics = [maps.orientation,
maps.gender,
maps.status,
util.IndexedREMap(),
maps.bodytype]
_backgrounds = [maps.ethnicities,
util.REMap.from_string_pairs([(a.replace('+',r'\+'),b)
for a, b in language_map.iteritems()]),
maps.education_level,
maps.religion]
_misc = [util.IndexedREMap('smokes'),
util.IndexedREMap('drink'),
util.IndexedREMap('drugs'),
maps.diet,
util.IndexedREMap('kid'),
util.IndexedREMap('dogs','cats'),
maps.sign]
@classmethod
def _parse(cls, details, section):
if section == 'basics':
# Ordered subset of Orientation, Gender, Status, Height, Bodytype
# Orientation and Gender can be lists
output = {0: [], # orientation
1: [], # gender
2: [], # status
3: [], # height
4: []} # bodytype
details = details.split(', ')
current = 0
for D in details:
DL = D.lower()
for n in xrange(current, len(cls._basics)):
try:
found = cls._basics[n]._get_nodefault(DL)
if found:
current = n
output[n].append(D)
break
except KeyError:
pass
else:
if current <= 3 and any(char.isdigit() for char in D):
output[3].append(D)
current = 3
else:
raise RuntimeError(u"Parsing error in basics section: %s not recognized"%(D))
return {'orientation' : ', '.join(output[0]),
'gender' : ', '.join(output[1]),
'status' : ', '.join(output[2]),
'height' : ', '.join(output[3]),
'bodytype' : ', '.join(output[4])}
elif section == 'background':
# Ordered subset of Ethnicity, Languages, Education, Religion
# Ethnicity and Languages can be lists
output = {0: [], # ethnicity
1: [], # languages
2: [], # education
3: []} # religion
details = details.replace(' and',',').split(', ')
current = 0
for D in details:
D = D.replace('Speaks','').strip()
DL = D.replace('some','').replace('fluently','').\
replace('Working on','').replace('Attended','').\
replace('Dropped out of','').lower().strip()
if not DL: continue
for n in xrange(current, len(cls._backgrounds)):
try:
found = cls._backgrounds[n]._get_nodefault(DL)
if found:
current = n
output[n].append(D)
break
except KeyError:
pass
else:
raise RuntimeError(u"Parsing error in background section: %s not recognized"%(D))
return {'ethnicities' : ', '.join(output[0]),
'languages' : ', '.join(output[1]),
'education' : ', '.join(output[2]),
'religion' : ', '.join(output[3])}
elif section == 'misc':
# Smokes, Drinks, Drugs, Diet, Kids, Pets, Sign
# Pets can be a list
output = {0: [], # smokes
1: [], # drinks
2: [], # drugs
3: [], # diet
4: [], # kids
5: [], # pets
6: []} # sign
details = details.split(', ')
current = 0
for D in details:
DL = D.lower().strip()
for n in xrange(current, len(cls._misc)):
try:
found = cls._misc[n]._get_nodefault(DL)
if found:
current = n
output[n].append(D)
break
except KeyError:
pass
else:
pass
#raise RuntimeError(u"Parsing error in misc section: %s not recognized"%(D))
return {'smoking' : ', '.join(output[0]),
'drinking' : ', '.join(output[1]),
'drugs' : ', '.join(output[2]),
'diet' : ', '.join(output[3]),
'children' : ', '.join(output[4]),
'pets' : ', '.join(output[5]),
'sign' : ', '.join(output[6])}
@util.cached_property
def id_to_display_name_value(self):
output = {}
if self.profile.is_logged_in_user:
for element in self._profile_details_xpb.apply_(
self.profile.profile_tree
):
value_element = xpb.dd.one_(element)
if not 'id' in value_element.attrib:
continue
id_name = value_element.attrib['id'].replace('ajax_', '')
value = value_element.text_content()
output[id_name] = value
else:
for section in ('basics','background','misc'):
try:
output.update(self._parse(xpb.table.with_classes('details2015-section',section).get_text_(self.profile.profile_tree).strip(), section))
except IndexError:
# section might not be present
pass
for k,v in output.iteritems():
if not v:
output[k] = u"\u2014"
return output
@property
def as_dict(self):
return {name: getattr(self, name)
for name, _ in self.name_detail_pairs()}
def convert_and_update(self, data):
klass = type(self)
server_bound = {}
for key, value in data.items():
detail = getattr(klass, key)
server_bound.update(detail.update(value))
return self.update(server_bound)
def update(self, data):
log.debug(data)
response = self.profile.authcode_post('profileedit2', data=data)
self.profile.refresh()
self.refresh()
return response
bodytype = Detail.mapping_updater(maps.bodytype)
gender = Detail.mapping_updater(maps.gender)
orientation = Detail.mapping_updater(maps.orientation)
smokes = Detail.mapping_updater(maps.smokes, id_name='smoking')
drugs = Detail.mapping_updater(maps.drugs)
drinks = Detail.mapping_updater(maps.drinks, id_name='drinking')
job = Detail.mapping_updater(maps.job)
status = Detail.mapping_updater(maps.status)
monogamy = Detail(id_name='monogamous', updater=lambda id_name, value: {
'monogamous': maps.monogamy[value],
'monogamyflex': maps.strictness[value]
})
children = Detail(updater=lambda id_name, value: {
'children': maps.has_kids[value],
'children2': maps.wants_kids[value]
})
education = Detail(updater=lambda id_name, value: {
'educationstatus': maps.education_status[value],
'educationlevel': maps.education_level[value]
})
pets = Detail(updater=lambda id_name, value: {
'cats': maps.cats[value],
'dogs': maps.dogs[value]
})
diet = Detail(updater=lambda id_name, value: {
'diet': maps.diet[value],
'dietserious': maps.diet_strictness[value]
})
religion = Detail(updater=lambda id_name, value: {
'religion': maps.religion[value],
'religionserious': maps.seriousness[value]
})
sign = Detail(updater=lambda id_name, value: {
'sign': maps.sign[value],
'sign_status': maps.importance[value]
})
height = Detail(updater=lambda id_name, value: {
'centimeters': int(round(magicnumbers.parse_height_string(value)))
})
class ethnicities(DeclarativeDetail):
@staticmethod
def presenter(text):
return [ethnicity
for ethnicity in Detail.comma_separated_presenter(text)
if any(char.isalpha() for char in ethnicity)]
@staticmethod
def updater(id_name, value):
if value is None:
value = ()
ethnicities = [maps.ethnicities[item.lower()] for item in value]
if len(ethnicities) < 1:
ethnicities = 10
return {id_name: ethnicities}
class income(DeclarativeDetail):
levels = list(enumerate(
(20000, 30000, 40000, 50000, 60000, 70000,
80000, 100000, 150000, 250000, 500000, 1000000)
))
comma_sep_number = '([0-9]{1,3}(?:,[0-9]{3})*)'
range_matcher = re.compile(u'\$?{0}-\$?{0}'.format(comma_sep_number),
flags=re.UNICODE)
lt_matcher = re.compile("less than \$?{0}".format(comma_sep_number),
flags=re.UNICODE)
gt_matcher = re.compile("more than \$?{0}".format(comma_sep_number),
flags=re.UNICODE)
@classmethod
def updater(cls, id_name, value):
if value is None:
return {'income': 0}
if isinstance(value, string_types):
for matcher, sign in ((cls.range_matcher, 1),
(cls.lt_matcher, -1),
(cls.gt_matcher, 1)):
match = matcher.match(value)
if match:
matched_income = int(match.group(1).replace(',', ''))
value = matched_income + 100 * sign
break
for index, level in cls.levels:
if value < level:
break
else:
index += 1
update = index + 1
return {'income': update}
class languages(DeclarativeDetail):
language_matcher = re.compile('(.*?) \((.*?)\)')
language_to_number = magicnumbers.language_map_2
level = util.IndexedREMap('fluently', 'okay', 'poorly')
@classmethod
def presenter(cls, value):
language_strings = value.split(',')
languages = []
for language_string in language_strings:
match = cls.language_matcher.match(language_string.strip())
if match:
languages.append((match.group(1).lower(),
match.group(2).lower()))
else:
languages.append((language_string.strip(), None))
return languages
@classmethod
def updater(cls, id_name, languages):
data = {}
number = 0
for number, (language, level) in enumerate(languages, 1):
language_number = cls.language_to_number[language.lower()]
level = level or ''
level_number = cls.level[level.lower()]
data['cont_lang_{0}'.format(number)] = language_number
data['language{0}status'.format(number)] = level_number
number += 1
for i in range(number, 6):
data['cont_lang_{0}'.format(i)] = ''
data['language{0}status'.format(i)] = ''
return data
for id_name, detail in Details.name_detail_pairs():
if detail.id_name is None:
detail.id_name = id_name
is_declarative_detail = lambda x: (isinstance(x, type) and
issubclass(x, DeclarativeDetail))
for id_name, declarative_detail in inspect.getmembers(
Details, is_declarative_detail
):
detail = Detail(presenter=declarative_detail.presenter,
updater=declarative_detail.updater,
id_name=id_name)
setattr(Details, id_name, detail)
| 9,660 | 1,140 | 606 |
7c5874f2c4499dce85163728826381b6e68c8416 | 7,654 | py | Python | env/lib/python3.9/site-packages/spline/components/bash.py | AdnanKhan27/nicstestbed | d3136e23fda8bd09706eb55d9a8c44ff0ad90730 | [
"MIT"
] | 30 | 2017-12-05T11:12:06.000Z | 2021-11-06T18:27:58.000Z | env/lib/python3.9/site-packages/spline/components/bash.py | AdnanKhan27/nicstestbed | d3136e23fda8bd09706eb55d9a8c44ff0ad90730 | [
"MIT"
] | 112 | 2017-10-15T12:13:38.000Z | 2021-01-12T22:29:58.000Z | env/lib/python3.9/site-packages/spline/components/bash.py | AdnanKhan27/nicstestbed | d3136e23fda8bd09706eb55d9a8c44ff0ad90730 | [
"MIT"
] | 6 | 2018-08-12T17:01:52.000Z | 2021-08-17T06:05:24.000Z | """Executing a bash script."""
# Copyright (c) 2017 Thomas Lehmann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# pylint: disable=too-many-instance-attributes
import contextlib
import sys
import os
import shlex
import subprocess # nosec
import tempfile
from spline.tools.filters import render
from spline.tools.logger import Logger
from spline.tools.event import Event
@contextlib.contextmanager
def managed_process(process):
"""Wrapper for subprocess.Popen to work across various Python versions, when using the with syntax."""
try:
yield process
finally:
for stream in [process.stdout, process.stdin, process.stderr]:
if stream:
stream.close()
process.wait()
class Bash(object):
"""Wrapper for Bash execution."""
def __init__(self, config):
"""
Initialize with Bash code and optional environment variables.
Args:
config(ShellConfig): options for configuring Bash environment and behavior
"""
self.event = Event.create(__name__)
self.logger = Logger.get_logger(__name__)
self.config = config
self.success = True
self.env = {}
self.env.update(config.env)
self.stdout = subprocess.PIPE
self.stderr = subprocess.STDOUT
self.shell = False
self.exit_code = 0
@staticmethod
def creator(_, config):
"""
Creator function for creating an instance of a Bash.
Args:
config (ShellConfig): options for configuring Bash environment and behavior
Returns:
Bash: instance of class Bash
"""
return Bash(config)
def update_environment_variables(self, filename):
"""Updating OS environment variables and current script path and filename."""
self.env.update(os.environ.copy())
self.env.update({'PIPELINE_BASH_FILE': filename})
def get_temporary_scripts_path(self):
"""
Get path for temporary scripts.
Returns:
str: path for temporary scripts or None if not set
"""
result = None
if len(self.config.temporary_scripts_path) > 0:
if os.path.isdir(self.config.temporary_scripts_path):
result = self.config.temporary_scripts_path
return result
def create_file_for(self, script):
"""
Create a temporary, executable bash file.
It also does render given script (string) with the model and
the provided environment variables and optional also an item
when using the B{with} field.
Args:
script (str): either pather and filename or Bash code.
Returns:
str: path and filename of a temporary file.
"""
temp = tempfile.NamedTemporaryFile(
prefix="pipeline-script-", mode='w+t', suffix=".sh", delete=False,
dir=self.get_temporary_scripts_path())
self.update_environment_variables(temp.name)
rendered_script = render(script, model=self.config.model, env=self.env, item=self.config.item,
variables=self.config.variables)
if rendered_script is None:
self.success = False
temp.close()
os.remove(temp.name)
return None
to_file_map = {2: lambda s: s.encode('utf-8'), 3: lambda s: s}
if all(ord(ch) < 128 for ch in rendered_script) and os.path.isfile(rendered_script):
with open(rendered_script) as handle:
content = str(handle.read())
temp.writelines(content)
else:
temp.write(u"#!/bin/bash\n%s" % self.render_bash_options())
temp.write(to_file_map[sys.version_info.major](rendered_script))
temp.close()
# make Bash script executable
os.chmod(temp.name, 0o700)
return temp.name
def render_bash_options(self):
"""Rendering Bash options."""
options = ''
if self.config.debug:
options += "set -x\n"
if self.config.strict:
options += "set -euo pipefail\n"
return options
def process_script(self, filename):
"""Running the Bash code."""
try:
with managed_process(subprocess.Popen(shlex.split("bash %s" % filename),
stdout=self.stdout, stderr=self.stderr,
shell=self.shell, env=self.env)) as process: # nosec
for line in iter(process.stdout.readline, ' '):
if not line:
break
yield line[0:-1].decode('utf-8')
process.wait()
self.exit_code = process.returncode
self.success = (process.returncode == 0)
if not self.config.internal:
if process.returncode == 0:
self.logger.info("Exit code has been %d", process.returncode)
else:
self.logger.error("Exit code has been %d", process.returncode)
except OSError as exception:
self.exit_code = 1
self.success = False
yield str(exception)
def process_file(self, filename):
"""Processing one file."""
if self.config.dry_run:
if not self.config.internal:
self.logger.info("Dry run mode for script %s", filename)
with open(filename) as handle:
for line in handle:
yield line[0:-1] if line[-1] == '\n' else line
else:
if not self.config.internal:
self.logger.info("Running script %s", filename)
for line in self.process_script(filename):
yield line
def process(self):
"""Running the Bash code."""
temp_filename = self.create_file_for(self.config.script)
if len(self.config.title) > 0:
self.logger.info(render(self.config.title, model=self.config.model, env=self.env,
item=self.config.item, variables=self.config.variables))
if temp_filename is not None:
try:
for line in self.process_file(temp_filename):
yield line
finally:
# removing script
os.remove(temp_filename)
if not self.config.internal:
if self.exit_code == 0:
self.event.succeeded()
else:
self.event.failed(exit_code=self.exit_code)
| 37.336585 | 106 | 0.605304 | """Executing a bash script."""
# Copyright (c) 2017 Thomas Lehmann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# pylint: disable=too-many-instance-attributes
import contextlib
import sys
import os
import shlex
import subprocess # nosec
import tempfile
from spline.tools.filters import render
from spline.tools.logger import Logger
from spline.tools.event import Event
@contextlib.contextmanager
def managed_process(process):
"""Wrapper for subprocess.Popen to work across various Python versions, when using the with syntax."""
try:
yield process
finally:
for stream in [process.stdout, process.stdin, process.stderr]:
if stream:
stream.close()
process.wait()
class Bash(object):
"""Wrapper for Bash execution."""
def __init__(self, config):
"""
Initialize with Bash code and optional environment variables.
Args:
config(ShellConfig): options for configuring Bash environment and behavior
"""
self.event = Event.create(__name__)
self.logger = Logger.get_logger(__name__)
self.config = config
self.success = True
self.env = {}
self.env.update(config.env)
self.stdout = subprocess.PIPE
self.stderr = subprocess.STDOUT
self.shell = False
self.exit_code = 0
@staticmethod
def creator(_, config):
"""
Creator function for creating an instance of a Bash.
Args:
config (ShellConfig): options for configuring Bash environment and behavior
Returns:
Bash: instance of class Bash
"""
return Bash(config)
def update_environment_variables(self, filename):
"""Updating OS environment variables and current script path and filename."""
self.env.update(os.environ.copy())
self.env.update({'PIPELINE_BASH_FILE': filename})
def get_temporary_scripts_path(self):
"""
Get path for temporary scripts.
Returns:
str: path for temporary scripts or None if not set
"""
result = None
if len(self.config.temporary_scripts_path) > 0:
if os.path.isdir(self.config.temporary_scripts_path):
result = self.config.temporary_scripts_path
return result
def create_file_for(self, script):
"""
Create a temporary, executable bash file.
It also does render given script (string) with the model and
the provided environment variables and optional also an item
when using the B{with} field.
Args:
script (str): either pather and filename or Bash code.
Returns:
str: path and filename of a temporary file.
"""
temp = tempfile.NamedTemporaryFile(
prefix="pipeline-script-", mode='w+t', suffix=".sh", delete=False,
dir=self.get_temporary_scripts_path())
self.update_environment_variables(temp.name)
rendered_script = render(script, model=self.config.model, env=self.env, item=self.config.item,
variables=self.config.variables)
if rendered_script is None:
self.success = False
temp.close()
os.remove(temp.name)
return None
to_file_map = {2: lambda s: s.encode('utf-8'), 3: lambda s: s}
if all(ord(ch) < 128 for ch in rendered_script) and os.path.isfile(rendered_script):
with open(rendered_script) as handle:
content = str(handle.read())
temp.writelines(content)
else:
temp.write(u"#!/bin/bash\n%s" % self.render_bash_options())
temp.write(to_file_map[sys.version_info.major](rendered_script))
temp.close()
# make Bash script executable
os.chmod(temp.name, 0o700)
return temp.name
def render_bash_options(self):
"""Rendering Bash options."""
options = ''
if self.config.debug:
options += "set -x\n"
if self.config.strict:
options += "set -euo pipefail\n"
return options
def process_script(self, filename):
"""Running the Bash code."""
try:
with managed_process(subprocess.Popen(shlex.split("bash %s" % filename),
stdout=self.stdout, stderr=self.stderr,
shell=self.shell, env=self.env)) as process: # nosec
for line in iter(process.stdout.readline, ' '):
if not line:
break
yield line[0:-1].decode('utf-8')
process.wait()
self.exit_code = process.returncode
self.success = (process.returncode == 0)
if not self.config.internal:
if process.returncode == 0:
self.logger.info("Exit code has been %d", process.returncode)
else:
self.logger.error("Exit code has been %d", process.returncode)
except OSError as exception:
self.exit_code = 1
self.success = False
yield str(exception)
def process_file(self, filename):
"""Processing one file."""
if self.config.dry_run:
if not self.config.internal:
self.logger.info("Dry run mode for script %s", filename)
with open(filename) as handle:
for line in handle:
yield line[0:-1] if line[-1] == '\n' else line
else:
if not self.config.internal:
self.logger.info("Running script %s", filename)
for line in self.process_script(filename):
yield line
def process(self):
"""Running the Bash code."""
temp_filename = self.create_file_for(self.config.script)
if len(self.config.title) > 0:
self.logger.info(render(self.config.title, model=self.config.model, env=self.env,
item=self.config.item, variables=self.config.variables))
if temp_filename is not None:
try:
for line in self.process_file(temp_filename):
yield line
finally:
# removing script
os.remove(temp_filename)
if not self.config.internal:
if self.exit_code == 0:
self.event.succeeded()
else:
self.event.failed(exit_code=self.exit_code)
| 0 | 0 | 0 |
06f76399a4d1b48754d402871a82545a21ae40c5 | 4,565 | py | Python | lab4.py | UnstoppableGuy/Optimization-and-control-methods | 1c690023aeb11c575e9737b407fb7c848aa3a2a8 | [
"MIT"
] | null | null | null | lab4.py | UnstoppableGuy/Optimization-and-control-methods | 1c690023aeb11c575e9737b407fb7c848aa3a2a8 | [
"MIT"
] | null | null | null | lab4.py | UnstoppableGuy/Optimization-and-control-methods | 1c690023aeb11c575e9737b407fb7c848aa3a2a8 | [
"MIT"
] | null | null | null | import sys
import numpy as np
from lab1 import input_matrix, input_vector
# from lab4 import first_and_second_step_simplex_method
def double_simplex(c, b, a_matrix, j_vector):
"""Сreates an optimal unfeasible plan and then converts
it to a feasible one without violating optimality.
Args:
c (np.array): vector of values
b (np.array): vector of values
a_matrix (np.array): matrix composed of the coefficients
of the original system
j_vector (np.array): vector of values
Returns:
any: feasible plan (list) or message (str)
"""
m, n = a_matrix.shape
j_vector -= 1
y = get_initial_y(c, a_matrix, j_vector)
x_0 = [0 for _ in range(n)]
while True:
not_J = np.delete(np.arange(n), j_vector)
B = np.linalg.inv(a_matrix[:, j_vector])
kappa = B.dot(b)
if all(kappa >= 0):
for j, _kappa in zip(j_vector, kappa):
x_0[j] = _kappa
print(str(list(map(lambda _x: round(float(_x), 3), list(x_0)))
).replace('[', '').replace(']', ''), "- план")
print(f"План: \t{' '.join(map(str,list(x_0)))}")
return x_0
k = np.argmin(kappa)
delta_y = B[k]
mu = delta_y.dot(a_matrix)
sigma = []
for i in not_J:
if mu[i] >= 0:
sigma.append(np.inf)
else:
sigma.append((c[i] - a_matrix[:, i].dot(y)) / mu[i])
sigma_0_ind = not_J[np.argmin(sigma)]
sigma_0 = min(sigma)
if sigma_0 == np.inf:
print("Задача не имеет решения, т.к. пусто множество ее\
допустимых планов.")
return "Задача не имеет решения"
y += sigma_0 * delta_y
j_vector[k] = sigma_0_ind
def test1():
"""test case 1
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, -4, 1, 0],
[-2, -2, -2, 0, 1]
])
b = np.array([-1, -1.5])
c = np.array([-4, -3, -7, 0, 0])
J = np.array([4, 5])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def test2():
"""test case 2
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, 1, -7, 0, 0, 0, 2],
[4, 2, 1, 0, 1, 5, -1, -5],
[1, 1, 0, -1, 0, 3, -1, 1]
])
b = np.array([-2, 4, 3])
c = np.array([2, 2, 1, -10, 1, 4, -2, -3])
J = np.array([2, 5, 7])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def test3():
"""test case 3
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, 1, -7, 0, 0, 0, 2],
[-4, 2, 1, 0, 1, 5, -1, 5],
[1, 1, 0, 1, 4, 3, 1, 1]
])
b = np.array([-2, 8, -2])
c = np.array([12, -2, -6, 20, -18, -5, -7, -20])
J = np.array([2, 4, 6])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def test4():
"""test case 4
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, 10, -7, 1, 0, 0, 2],
[-4, 2, 3, 0, 5, 1, -1, 0],
[1, 1, 0, 1, -4, 3, -1, 1]
])
b = np.array([-2, -5, 2])
c = np.array([10, -2, -38, 16, -9, -9, -5, -7])
J = np.array([2, 8, 5])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
if __name__ == "__main__":
simplex()
| 26.540698 | 77 | 0.500767 | import sys
import numpy as np
from lab1 import input_matrix, input_vector
# from lab4 import first_and_second_step_simplex_method
def double_simplex(c, b, a_matrix, j_vector):
"""Сreates an optimal unfeasible plan and then converts
it to a feasible one without violating optimality.
Args:
c (np.array): vector of values
b (np.array): vector of values
a_matrix (np.array): matrix composed of the coefficients
of the original system
j_vector (np.array): vector of values
Returns:
any: feasible plan (list) or message (str)
"""
m, n = a_matrix.shape
j_vector -= 1
y = get_initial_y(c, a_matrix, j_vector)
x_0 = [0 for _ in range(n)]
while True:
not_J = np.delete(np.arange(n), j_vector)
B = np.linalg.inv(a_matrix[:, j_vector])
kappa = B.dot(b)
if all(kappa >= 0):
for j, _kappa in zip(j_vector, kappa):
x_0[j] = _kappa
print(str(list(map(lambda _x: round(float(_x), 3), list(x_0)))
).replace('[', '').replace(']', ''), "- план")
print(f"План: \t{' '.join(map(str,list(x_0)))}")
return x_0
k = np.argmin(kappa)
delta_y = B[k]
mu = delta_y.dot(a_matrix)
sigma = []
for i in not_J:
if mu[i] >= 0:
sigma.append(np.inf)
else:
sigma.append((c[i] - a_matrix[:, i].dot(y)) / mu[i])
sigma_0_ind = not_J[np.argmin(sigma)]
sigma_0 = min(sigma)
if sigma_0 == np.inf:
print("Задача не имеет решения, т.к. пусто множество ее\
допустимых планов.")
return "Задача не имеет решения"
y += sigma_0 * delta_y
j_vector[k] = sigma_0_ind
def get_initial_y(c, a_matrix, j_vector):
return (c[j_vector]).dot(np.linalg.inv(a_matrix[:, j_vector]))
def test1():
"""test case 1
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, -4, 1, 0],
[-2, -2, -2, 0, 1]
])
b = np.array([-1, -1.5])
c = np.array([-4, -3, -7, 0, 0])
J = np.array([4, 5])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def test2():
"""test case 2
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, 1, -7, 0, 0, 0, 2],
[4, 2, 1, 0, 1, 5, -1, -5],
[1, 1, 0, -1, 0, 3, -1, 1]
])
b = np.array([-2, 4, 3])
c = np.array([2, 2, 1, -10, 1, 4, -2, -3])
J = np.array([2, 5, 7])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def test3():
"""test case 3
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, 1, -7, 0, 0, 0, 2],
[-4, 2, 1, 0, 1, 5, -1, 5],
[1, 1, 0, 1, 4, 3, 1, 1]
])
b = np.array([-2, 8, -2])
c = np.array([12, -2, -6, 20, -18, -5, -7, -20])
J = np.array([2, 4, 6])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def test4():
"""test case 4
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, 10, -7, 1, 0, 0, 2],
[-4, 2, 3, 0, 5, 1, -1, 0],
[1, 1, 0, 1, -4, 3, -1, 1]
])
b = np.array([-2, -5, 2])
c = np.array([10, -2, -38, 16, -9, -9, -5, -7])
J = np.array([2, 8, 5])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def simplex():
if 'test' in sys.argv:
test1()
test2()
test3()
test4()
else:
m, n = map(int, input('Введите количество строк и столбцов').split())
matrix_a = input_matrix(m)
vector_b = input_vector(m)
vector_c = input_vector(m)
vector_j = input_vector(m)
double_simplex(c=vector_c, a_matrix=matrix_a,
b=vector_b, j_vector=vector_j)
# vector_x = first_and_second_step_simplex_method(
# matrix_a, vector_b, vector_c)
# if vector_x is None:
# print("Unbounded")
# elif len(vector_x) == 0:
# print("No solution")
# else:
# print(f"Bounded\n{' '.join(map(str, vector_x))}")
if __name__ == "__main__":
simplex()
| 857 | 0 | 46 |
72c6c0ad298834b4519f5b56809e20963f45a9f1 | 46 | py | Python | raw_input.py | Lana-Pa/Getting-Started-with-Python | c4822755a579b6723cc966412bd06496870d118b | [
"Apache-2.0"
] | null | null | null | raw_input.py | Lana-Pa/Getting-Started-with-Python | c4822755a579b6723cc966412bd06496870d118b | [
"Apache-2.0"
] | null | null | null | raw_input.py | Lana-Pa/Getting-Started-with-Python | c4822755a579b6723cc966412bd06496870d118b | [
"Apache-2.0"
] | null | null | null | name = raw_input("Enter")
print"Hello " + name | 23 | 25 | 0.695652 | name = raw_input("Enter")
print"Hello " + name | 0 | 0 | 0 |
93436186b71e85441eb5cb8c2426ffbf75b102a4 | 373 | py | Python | my_module/metadata.py | SurekaStoeckigt/Mac | f9f8be4b331cb19ff36ec640650687b6da7693bd | [
"MIT"
] | 1 | 2015-11-27T14:29:04.000Z | 2015-11-27T14:29:04.000Z | my_module/metadata.py | SurekaStoeckigt/Mac | f9f8be4b331cb19ff36ec640650687b6da7693bd | [
"MIT"
] | null | null | null | my_module/metadata.py | SurekaStoeckigt/Mac | f9f8be4b331cb19ff36ec640650687b6da7693bd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'airport challenge'
project = "Airport Challenge in Python"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'Just some experimentation!'
authors = ['George Maddocks']
license = 'MIT'
| 24.866667 | 66 | 0.707775 | # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'airport challenge'
project = "Airport Challenge in Python"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'Just some experimentation!'
authors = ['George Maddocks']
license = 'MIT'
| 0 | 0 | 0 |
f45dc59f60c2f5ae1b6440a570f3de2f51094880 | 24,327 | py | Python | synthesize_with_latent_space.py | noetits/ophelia | 49f4b1495bbe6c768806cf3f1b0415f73e06008c | [
"Apache-2.0"
] | 26 | 2020-05-18T15:49:55.000Z | 2022-03-30T16:38:56.000Z | synthesize_with_latent_space.py | noetits/ophelia | 49f4b1495bbe6c768806cf3f1b0415f73e06008c | [
"Apache-2.0"
] | null | null | null | synthesize_with_latent_space.py | noetits/ophelia | 49f4b1495bbe6c768806cf3f1b0415f73e06008c | [
"Apache-2.0"
] | 8 | 2020-08-27T03:37:00.000Z | 2022-02-19T08:03:04.000Z | from utils import *
from data_load import load_data
import pandas as pd
import sys
import os
import glob
from argparse import ArgumentParser
import imp
import numpy as np
from utils import spectrogram2wav
# from scipy.io.wavfile import write
import soundfile as sf
import tqdm
from concurrent.futures import ProcessPoolExecutor
import tensorflow as tf
from architectures import Text2MelGraph, SSRNGraph, Graph_style_unsupervised
from synthesize import make_mel_batch, split_batch, synth_mel2mag
from configuration import load_config
import logger_setup
from logging import info
import logging
logging.getLogger('matplotlib.font_manager').disabled = True
from data_load import *
import numpy as np
from synthesize import *
import pickle
import matplotlib.pyplot as plt
def mi_regression_feat_embed(X, feat_df):
'''
X corresponds to latent embeddings
feat_df is y, i.e. the acoustic features
We want to see how much the acoustic features are predictable from the latent embeddings to
check that they contain information about expressiveness.
'''
from sklearn.feature_selection import mutual_info_regression
y=feat_df.values
mi_embed=np.zeros((y.shape[-1],X.shape[-1]))
#import pdb;pdb.set_trace()
for idx in range(y.shape[-1]):
mi_embed[idx,:]=mutual_info_regression(X, y[:,idx])
mi_embed=pd.DataFrame(mi_embed)
mi_embed.index=feat_df.columns
return mi_embed
def corr_feat_embed(embed_dfs, feat_df, titles=[]):
'''
This function computes correlations between a set of features and each dimension of the embeddings.
'''
# rc('ytick', labelsize=8) #change text size
n_feat = feat_df.shape[-1]
corr_embeds=[]
mi_embeds=[]
for i,embed_df in enumerate(embed_dfs):
embed_size = embed_df.shape[-1]
### Correlation matrix ###
# feat_embed = pd.concat([feat_df, embed_df], axis=1)
feat_embed=feat_df.copy()
for i in range(embed_df.shape[-1]):
feat_embed[str(i)]=embed_df.iloc[:,i]
corr=feat_embed.astype(float).corr().abs()
# # mi_embed=np.zeros((n_feat,embed_size))
# # for dim in range(embed_size):
# # mi = mutual_info_regression(feat_df, embed_df[dim])
# # mi_embed[:,dim]=mi
# # mi_embed=pd.DataFrame(mi_embed)
# # mi_embed.index=feat_df.columns
### get one matrix for corr, F and mi with vad
corr_embed=corr.iloc[:-embed_size,-embed_size:].abs()
for i in range(embed_size):
print('max corr '+str(i)+' : '+str(np.max(corr_embed.iloc[:,i])))
corr_embeds.append(corr_embed)
# mi_embeds.append(mi_embed)
return corr_embeds, mi_embeds
if __name__=="__main__":
main_work()
| 40.209917 | 191 | 0.66272 | from utils import *
from data_load import load_data
import pandas as pd
import sys
import os
import glob
from argparse import ArgumentParser
import imp
import numpy as np
from utils import spectrogram2wav
# from scipy.io.wavfile import write
import soundfile as sf
import tqdm
from concurrent.futures import ProcessPoolExecutor
import tensorflow as tf
from architectures import Text2MelGraph, SSRNGraph, Graph_style_unsupervised
from synthesize import make_mel_batch, split_batch, synth_mel2mag
from configuration import load_config
import logger_setup
from logging import info
import logging
logging.getLogger('matplotlib.font_manager').disabled = True
from data_load import *
import numpy as np
from synthesize import *
import pickle
import matplotlib.pyplot as plt
def compute_opensmile_features(hp, conf_path='./tools/opensmile-2.3.0/config/gemaps/eGeMAPSv01a.conf', audio_extension='.wav', mode='train'):
conf_name=conf_path.split('/')[-1].split('.')[0]
dataset=load_data(hp, audio_extension=audio_extension, mode=mode)
data=dataset['fpaths']
dfs=[]
for di, d in tqdm(enumerate(data)):
print(str(di) + ' out of ' + str(len(data)))
id_sentence=os.path.basename(d).split('.')[0]
wave_path=d
feature_path=os.path.join(hp.featuredir,'opensmile_features',conf_name)
if (not os.path.exists(feature_path)):
os.makedirs(feature_path)
features_file=os.path.join(feature_path,id_sentence+'.csv')
# opensmile only supports wave files (in 16 bit PCM), so if it is not (e.g. flac), we use librosa to load audio file and write temp.wav
if wave_path.split('.')[-1]!='wav':
y, sr = librosa.load(wave_path, sr=None)
wave_path='temp.wav'
sf.write(wave_path, y, sr, subtype='PCM_16')
if not os.path.isfile(features_file): # if the file doesn't exist, compute features with opensmile
opensmile_binary_path='./tools/opensmile-2.3.0/bin/linux_x64_standalone_static/'
command = opensmile_binary_path+"SMILExtract -I {input_file} -C {conf_file} --csvoutput {output_file}".format(
input_file=wave_path,
conf_file=conf_path,
output_file=features_file)
os.system(command)
#import pdb;pdb.set_trace()
dfs.append(pd.read_csv(features_file, sep=';').iloc[0].iloc[2:]) # discard two first useless elements (name and frametime)
feat_df=pd.concat(dfs, axis=1).transpose()
feat_df.to_csv(os.path.join(feature_path,'feat_df_'+mode+'.csv'))
def gather_opensmile_features(hp, conf_path='./tools/opensmile-2.3.0/config/gemaps/eGeMAPSv01a.conf', audio_extension='.wav', mode='train'):
conf_name=conf_path.split('/')[-1].split('.')[0]
dataset=load_data(hp, audio_extension=audio_extension, mode=mode)
data=dataset['fpaths']
dfs=[]
for di, d in tqdm(enumerate(data)):
#print(str(di) + ' out of ' + str(len(data)))
id_sentence=os.path.basename(d).split('.')[0]
wave_path=d
feature_path=os.path.join(hp.featuredir,'opensmile_features',conf_name)
features_file=os.path.join(feature_path,id_sentence+'.csv')
dfs.append(pd.read_csv(features_file, sep=';').iloc[0].iloc[2:]) # discard two first useless elements (name and frametime)
feat_df=pd.concat(dfs, axis=1).transpose()
feat_df.to_csv(os.path.join(feature_path,'feat_df_'+mode+'.csv'))
def mi_regression_feat_embed(X, feat_df):
'''
X corresponds to latent embeddings
feat_df is y, i.e. the acoustic features
We want to see how much the acoustic features are predictable from the latent embeddings to
check that they contain information about expressiveness.
'''
from sklearn.feature_selection import mutual_info_regression
y=feat_df.values
mi_embed=np.zeros((y.shape[-1],X.shape[-1]))
#import pdb;pdb.set_trace()
for idx in range(y.shape[-1]):
mi_embed[idx,:]=mutual_info_regression(X, y[:,idx])
mi_embed=pd.DataFrame(mi_embed)
mi_embed.index=feat_df.columns
return mi_embed
def regression_feat_embed(X, feat_df):
from sklearn.linear_model import LinearRegression
y=feat_df.values
reg = LinearRegression().fit(X, y)
coeff=reg.coef_
coeff_df = pd.DataFrame(coeff)
coeff_df.index = feat_df.columns
return reg, coeff_df
def test_regression(model, X, feat_df):
y=feat_df.values
y_pred=model.predict(X)
corrs_embed=np.zeros(y.shape[-1])
for idx in range(y.shape[-1]):
corrs_embed[idx]=np.corrcoef([y_pred[:,idx],y[:,idx].astype(float)])[0,1]
corrs_embed_df=pd.DataFrame(corrs_embed)
corrs_embed_df.index=feat_df.columns
return corrs_embed_df
def corr_feat_embed(embed_dfs, feat_df, titles=[]):
'''
This function computes correlations between a set of features and each dimension of the embeddings.
'''
# rc('ytick', labelsize=8) #change text size
n_feat = feat_df.shape[-1]
corr_embeds=[]
mi_embeds=[]
for i,embed_df in enumerate(embed_dfs):
embed_size = embed_df.shape[-1]
### Correlation matrix ###
# feat_embed = pd.concat([feat_df, embed_df], axis=1)
feat_embed=feat_df.copy()
for i in range(embed_df.shape[-1]):
feat_embed[str(i)]=embed_df.iloc[:,i]
corr=feat_embed.astype(float).corr().abs()
# # mi_embed=np.zeros((n_feat,embed_size))
# # for dim in range(embed_size):
# # mi = mutual_info_regression(feat_df, embed_df[dim])
# # mi_embed[:,dim]=mi
# # mi_embed=pd.DataFrame(mi_embed)
# # mi_embed.index=feat_df.columns
### get one matrix for corr, F and mi with vad
corr_embed=corr.iloc[:-embed_size,-embed_size:].abs()
for i in range(embed_size):
print('max corr '+str(i)+' : '+str(np.max(corr_embed.iloc[:,i])))
corr_embeds.append(corr_embed)
# mi_embeds.append(mi_embed)
return corr_embeds, mi_embeds
def select_features(corrs_embed_df, feat_df, intra_corr_thresh=0.8, corr_thresh=0.3):
intra_feat_corrs=feat_df.corr()
selected_indices=[]
sorted_corrs=corrs_embed_df.sort_values(0)[::-1]
for i in range(len(sorted_corrs)):
row=sorted_corrs.iloc[i]
#print(row.name)
# we check the correlations of the current feature with previous features
bigger=intra_feat_corrs[sorted_corrs.index].T.iloc[:i,:][row.name].abs()>intra_corr_thresh
too_much_correlated_with_previous=bigger.sum()>0
if not too_much_correlated_with_previous:
selected_indices.append(i)
selected=sorted_corrs.iloc[selected_indices]
high_corrs=selected.abs()>corr_thresh
selected_high_corrs=selected[high_corrs].dropna()
return selected_high_corrs
def load_features(hp, conf_path='./tools/opensmile-2.3.0/config/gemaps/eGeMAPSv01a.conf'):
import glob
conf_name=conf_path.split('/')[-1].split('.')[0]
feature_path=os.path.join(hp.featuredir,'opensmile_features',conf_name)
paths=glob.glob(feature_path+'/*')
if paths==[]:
sys.exit('There is no feature file')
dfs=[]
for path in tqdm(paths):
dfs.append(pd.read_csv(path, sep=';').iloc[0].iloc[2:]) # discard two first useless elements (name and frametime)
feat_df=pd.concat(dfs, axis=1).transpose()
#feat_df.index = pd.read_csv(path, sep=';').iloc[0].iloc[2:].index
return feat_df
def get_emo_cats(hp):
if hp.data_info:
data_info=pd.read_csv(hp.data_info)
dataset=load_data(hp)
fpaths, text_lengths, texts = dataset['fpaths'], dataset['text_lengths'], dataset['texts']
fnames = [os.path.basename(fpath) for fpath in fpaths]
emo_cats=[data_info[data_info.id==fname.split('.')[0]]['emotion'].values[0] for fname in fnames]
return emo_cats
else:
return None
def compute_unsupervised_embeddings(hp, g, model_type, mode='train'):
dataset=load_data(hp, mode=mode)
fpaths, text_lengths, texts = dataset['fpaths'], dataset['text_lengths'], dataset['texts']
label_lengths, audio_lengths = dataset['label_lengths'], dataset['audio_lengths'] ## might be []
fnames = [os.path.basename(fpath) for fpath in fpaths]
melfiles = ["{}/{}".format(hp.coarse_audio_dir, fname.replace("wav", "npy")) for fname in fnames]
codes=extract_emo_code(hp, melfiles, g, model_type)
return codes
def save_embeddings(codes, logdir, filename='emo_codes', mode='train'):
np.save(os.path.join(logdir,filename+'_'+mode+'.npy'),codes)
def load_embeddings(logdir, filename='emo_codes', mode='train'):
codes=np.load(os.path.join(logdir,filename+'_'+mode+'.npy'))
return codes
def save(var, logdir, filename='code_reduction_model_pca'):
pickle.dump(var, open(os.path.join(logdir,filename+'.pkl'), 'wb'))
def load(logdir, filename='code_reduction_model_pca'):
var = pickle.load(open(os.path.join(logdir,filename+'.pkl'), 'rb'))
return var
def embeddings_reduction(embed, method='pca'):
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
print('Reducing with method '+method)
if method == 'pca':
model = PCA(n_components=2)
results = model.fit_transform(embed)
elif method == 'tsne':
model = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
results = model.fit_transform(embed)
elif method == 'umap':
import umap
model=umap.UMAP()
results = model.fit_transform(embed)
else:
print('Wrong dimension reduction method')
return model, results
def scatter_plot(matrice, c=None, s=20, alpha=1):
import matplotlib.pyplot as plt
import matplotlib
#matplotlib.use('TkAgg')
plt.cla()
scatter=plt.scatter(matrice[:,0], matrice[:,1], c=c, s=s, alpha=alpha, vmin=c.mean()-3*c.std(), vmax=c.mean()+3*c.std())
plt.colorbar()
return scatter
def plot_gradients(coeff,corr, ax=plt.gca()):
import matplotlib
matplotlib.use('Agg')
# V=coeff.values
# ax=plt.gca()
origin = [0,0] # origin point
# origin = [0], [0] # origin point
# q=ax.quiver(*origin, V[:,0], V[:,1])
from adjustText import adjust_text
texts=[]
for i in range(len(corr)):
# if (corr.loc[coeff.index[i]].round(2).iloc[0])>0.5:
grad=coeff[coeff.index==corr.index[i]].values[0]
x=[origin[0], grad[0]]
y = [origin[1], grad[1]]
ax.plot(x, y, lw=2)
# ax.legend()
# ax.annotate(coeff.index[i], xy=(grad[0], grad[1]), xycoords='data')
feat_name=corr.index[i].replace('_sma3', ' ').replace('nz','').replace('_','').replace('amean','mean').replace('semitoneFrom27.5Hz','')
texts.append(ax.text(grad[0], grad[1], feat_name+' '+str(corr.iloc[i].iloc[0].round(2)), fontsize=9))
adjust_text(texts, force_text=0.05, autoalign='xy', arrowprops=dict(arrowstyle="->", color='b', lw=1))
plt.show()
def add_margin(ax,x=0.05,y=0.05):
# This will, by default, add 5% to the x and y margins. You
# can customise this using the x and y arguments when you call it.
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xmargin = (xlim[1]-xlim[0])*x
ymargin = (ylim[1]-ylim[0])*y
ax.set_xlim(xlim[0]-xmargin,xlim[1]+xmargin)
ax.set_ylim(ylim[0]-ymargin,ylim[1]+ymargin)
def abbridge_column_names(df):
feats=[]
for i in range(len(df.columns)):
feat_name=df.columns[i].replace('_sma3', ' ').replace('nz','').replace('_','').replace('amean','mean').replace('semitoneFrom27.5Hz','').replace('stddev','std').replace('Stddev','std')
feats.append(feat_name)
df.columns=feats
return df
def main_work():
# ============= Process command line ============
a = ArgumentParser()
a.add_argument('-c', dest='config', required=True, type=str)
a.add_argument('-m', dest='model_type', required=True, choices=['t2m', 'unsup'])
a.add_argument('-t', dest='task', required=True, choices=['acoustic_analysis','compute_codes', 'reduce_codes', 'compute_opensmile_features', 'show_plot','ICE_TTS','ICE_TTS_server'])
a.add_argument('-r', dest='reduction_method', required=False, choices=['pca', 'tsne', 'umap'])
a.add_argument('-p', dest='port', required=False, type=int, default=5000)
a.add_argument('-s', dest='set', required=False, type=str, default='train')
opts = a.parse_args()
print('opts')
print(opts)
# ===============================================
model_type = opts.model_type
method=opts.reduction_method
hp = load_config(opts.config)
logdir = hp.logdir + "-" + model_type
port=opts.port
mode=opts.set
config_name=opts.config.split('/')[-1].split('.')[0]
logger_setup.logger_setup(logdir)
info('Command line: %s'%(" ".join(sys.argv)))
print(logdir)
task=opts.task
if task=='compute_codes':
if model_type=='t2m':
g = Text2MelGraph(hp, mode="synthesize"); print("Graph 1 (t2m) loaded")
elif model_type=='unsup':
g = Graph_style_unsupervised(hp, mode="synthesize"); print("Graph 1 (unsup) loaded")
codes=compute_unsupervised_embeddings(hp, g, model_type, mode=mode)
save_embeddings(codes, logdir, mode=mode)
#emo_cats=get_emo_cats(hp)
#save(emo_cats, logdir, filename='emo_cats')
elif task=='reduce_codes':
try:
embed=load_embeddings(logdir, mode=mode)[:,0,:]
except IndexError: # I may have changed the shape of the matrix ...
embed=load_embeddings(logdir, mode=mode)
#import pdb;pdb.set_trace()
model, results=embeddings_reduction(embed, method=method)
save_embeddings(results, logdir, filename='emo_codes_'+method, mode=mode)
save(model, logdir, filename='code_reduction_model_'+method)
elif task=='compute_opensmile_features':
compute_opensmile_features(hp, audio_extension='.wav', mode=mode)
elif task=='show_plot':
embed=load_embeddings(logdir, filename='emo_codes_'+method)
scatter_plot(embed)
elif task=='ICE_TTS':
from interface import ICE_TTS
embed=load_embeddings(logdir)[:,0,:]
embed_reduc=load_embeddings(logdir, filename='emo_codes_'+method)
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
ice=ICE_TTS(hp, embed_reduc, embed)
ice.show()
sys.exit(app.exec_())
elif task=='ICE_TTS_server':
# import pdb;pdb.set_trace()
from server.ice_tts_server import ICE_TTS_server
try:
embed=load_embeddings(logdir, mode=mode)[:,0,:]
except IndexError: # I may have changed the shape of the matrix ...
embed=load_embeddings(logdir, mode=mode)
print('Loading embeddings')
embed_reduc=load_embeddings(logdir, filename='emo_codes_'+method, mode=mode)
from itertools import product
train_codes_pca=np.load(os.path.join(logdir,'emo_codes_pca_train.npy'))
pca_model=pickle.load(open(os.path.join(logdir,'code_reduction_model_pca.pkl'), 'rb'))
min_xy=train_codes_pca.min(axis=0)
max_xy=train_codes_pca.max(axis=0)
xs=np.mgrid[min_xy[0]:max_xy[0]:100j]
ys=np.mgrid[min_xy[1]:max_xy[1]:100j]
X=np.array(list(product(xs, ys)))
codes=pca_model.inverse_transform(X)
# X=np.load('X.npy')
# codes=np.load('codes.npy')
print('Loading emo cats')
emo_cats=get_emo_cats(hp)
#emo_cats=load(logdir, filename='emo_cats')
#import pdb;pdb.set_trace()
ice=ICE_TTS_server(hp, X, codes, emo_cats, model_type=model_type, port=port)
# ice=ICE_TTS_server(hp, embed_reduc, embed, emo_cats, model_type=model_type, port=port)
#ice=ICE_TTS_server(hp, embed_reduc, embed, model_type=model_type)
#ice=ICE_TTS_server(hp, embed_reduc, embed, n_polar_axes=4, model_type=model_type)
elif task=='acoustic_analysis':
directory='results/'+config_name
if not os.path.exists(directory):
os.makedirs(directory)
import seaborn as sns
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.linear_model import LinearRegression
from pandas.plotting import scatter_matrix
# from pandas.plotting._matplotlib.misc import scatter_matrix
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
import itertools
print('MODE', mode)
try:
embed=load_embeddings(logdir, mode=mode)[:,0,:]
embed_valid=load_embeddings(logdir, mode='validation')[:,0,:]
except IndexError: # I may have changed the shape of the matrix ...
embed=load_embeddings(logdir, mode=mode)
embed_valid=load_embeddings(logdir, mode='validation')
conf_name='eGeMAPSv01a'
feature_path=os.path.join(hp.featuredir,'opensmile_features',conf_name,'feat_df_'+mode+'.csv')
feat_df=pd.read_csv(feature_path)
feat_df=feat_df.drop(columns=['Unnamed: 0'])
feature_path=os.path.join(hp.featuredir,'opensmile_features',conf_name,'feat_df_'+'validation'+'.csv')
feat_df_valid=pd.read_csv(feature_path)
#import pdb;pdb.set_trace()
feat_df_valid=feat_df_valid.drop(columns=['Unnamed: 0'])
feat_df=abbridge_column_names(feat_df)
feat_df_valid=abbridge_column_names(feat_df_valid)
# Mean normalization (with same mean and variance computed from training data)
feat_df=(feat_df-feat_df.mean())/feat_df.std()
feat_df_valid=(feat_df_valid-feat_df.mean())/feat_df.std()
model, coeff_df = regression_feat_embed(pd.DataFrame(embed), feat_df)
corrs_embed_df=test_regression(model, pd.DataFrame(embed_valid), feat_df_valid)
print('Correlations:')
print(corrs_embed_df.sort_values(0)[::-1][:20])
corrs_embed_df.sort_values(0)[::-1][:20].to_csv(directory+'/correlations.csv')
selected=select_features(corrs_embed_df, feat_df_valid, intra_corr_thresh=0.7, corr_thresh=0.3)
print(selected.to_latex().replace('\_sma3', ' ').replace('nz','').replace('\_','').replace('amean','mean').replace('semitoneFrom27.5Hz',''))
selected.to_csv(directory+'/selected_correlations.csv')
# print('Gradients:')
# print(coeff_df)
#method='pca'
embed_reduc=load_embeddings(logdir, filename='emo_codes_'+method, mode=mode)
embed_reduc_valid=load_embeddings(logdir, filename='emo_codes_'+method, mode='validation')
model_reduc, coeff_reduc_df = regression_feat_embed(pd.DataFrame(embed_reduc), feat_df)
corrs_embed_reduc_df=test_regression(model_reduc, pd.DataFrame(embed_reduc_valid), feat_df_valid)
print('Correlations:')
print(corrs_embed_reduc_df.sort_values(0)[::-1][:20])
corrs_embed_df.sort_values(0)[::-1][:20].to_csv(directory+'/correlations_reduc.csv')
selected_reduc=select_features(corrs_embed_reduc_df, feat_df_valid, intra_corr_thresh=0.7, corr_thresh=0.25)
print(selected.to_latex().replace('\_sma3', ' ').replace('nz','').replace('\_','').replace('amean','mean').replace('semitoneFrom27.5Hz',''))
selected_reduc.to_csv(directory+'/selected_correlations_reduc.csv')
feat_predictions_df=pd.DataFrame(model.predict(embed))
feat_predictions_df.index=feat_df.index
feat_predictions_df.columns=feat_df.columns
feat_df[selected.index]
feat_predictions_df[selected.index]
# just checking it seems correct
# print(pearsonr(feat_df[selected.index]['F0semitoneFrom27.5Hz_sma3nz_percentile50.0'],feat_predictions_df[selected.index]['F0semitoneFrom27.5Hz_sma3nz_percentile50.0'] ))
# selected_feats=selected.index.to_list()
# fig, axs = plt.subplots(nrows=sc.shape[0], ncols=sc.shape[1], figsize=(100, 100))
# for pair in itertools.product(range(len(selected)), repeat=2):
# x=feat_df[selected_feats[pair[0]]]
# y=feat_predictions_df[selected_feats[pair[1]]]
# axs[pair[0], pair[1]].scatter(x, y, alpha=0.2)
# fig.savefig('figures/scatter_matrix.png')
h=100
selected_feats=selected.index.to_list()
fig, axs = plt.subplots(nrows=len(selected), ncols=1, figsize=(h/len(selected)*3, h))
for i in range(len(selected)):
x=feat_df[selected_feats[i]]
y=feat_predictions_df[selected_feats[i]]
axs[i].scatter(x, y, alpha=0.2)
fig.savefig(directory+'/scatter_plots_feats.png')
#print(corrs_embed_reduc_df)
print('Gradients:')
print(coeff_reduc_df)
coeff_reduc_df.to_csv(directory+'/gradients.csv')
normalized_gradients=coeff_reduc_df.div(((coeff_reduc_df**2).sum(axis=1))**0.5, axis=0)
plt.cla()
plt.clf()
plt.close()
# sc=scatter_plot(embed_reduc, c=feat_df['F0semitoneFrom27.5Hz_sma3nz_amean'].values)
sc=scatter_plot(embed_reduc, c=feat_df['F0 mean'].values)
plot_gradients(normalized_gradients,selected_reduc, ax=sc.get_figure().gca())
sc.get_figure().savefig(directory+'/scatter_F0_mean_'+method+'.png')
plt.cla()
plt.clf()
plt.close()
# sc=scatter_plot(embed_reduc, c=feat_df['F0semitoneFrom27.5Hz_sma3nz_amean'].values)
sc=scatter_plot(embed_reduc, c=feat_df['F0 percentile50.0'].values)
plot_gradients(normalized_gradients,selected_reduc, ax=sc.get_figure().gca())
sc.get_figure().savefig(directory+'/scatter_F0_percentile50.0_'+method+'.png')
print(feat_df.columns)
# import pdb;pdb.set_trace()
plt.cla()
plt.clf()
plt.close()
# sc=scatter_plot(embed_reduc, c=feat_df['F0semitoneFrom27.5Hz_sma3nz_amean'].values)
sc=scatter_plot(embed_reduc, c=feat_df['F3amplitudeLogRelF0 stdNorm'].values)
plot_gradients(normalized_gradients,selected_reduc, ax=sc.get_figure().gca())
sc.get_figure().savefig(directory+'/scatter_F3amplitudeLogRelF0_stdNorm_'+method+'.png')
plt.cla()
plt.clf()
plt.close()
# sc=scatter_plot(embed_reduc, c=feat_df['F0semitoneFrom27.5Hz_sma3nz_amean'].values)
sc=scatter_plot(embed_reduc, c=feat_df['stdVoicedSegmentLengthSec'].values)
plot_gradients(normalized_gradients,selected_reduc, ax=sc.get_figure().gca())
sc.get_figure().savefig(directory+'/scatter_stdVoicedSegmentLengthSec_'+method+'.png')
plt.cla()
plt.clf()
plt.close()
hist=sns.distplot(feat_df['F0 mean'])
hist.get_figure().savefig(directory+'/hist_F0_mean_'+method+'.png')
# hist=sns.distplot(feat_df['F3amplitudeLogRelF0 stddevNorm'])
# hist.get_figure().savefig('figures/hist_F3amplitudeLogRelF0_stddevNorm_'+method+'.png')
#mi=mi_regression_feat_embed(pd.DataFrame(embed_reduc), feat_df)
#print('mi',mi.sort_values(0)[::-1][:20])
#print('mi',mi.sort_values(1)[::-1][:20])
# Plot corrs heatmaps
plt.close()
corrs_heatmap_feats=sns.heatmap(feat_df.corr().abs(), xticklabels=False)
corrs_heatmap_feats.get_figure().savefig(directory+'/corrs_heatmap_feats.pdf', bbox_inches='tight')
plt.close()
embed_corr=pd.DataFrame(embed).corr().abs()
embed_corr_heatmap=sns.heatmap(embed_corr)
embed_corr_heatmap.get_figure().savefig(directory+'/embed_corr_heatmap.pdf', bbox_inches='tight')
plt.close()
corr_feat_embed=pd.concat([pd.DataFrame(embed),feat_df], axis=1).corr().abs()
sns.set(font_scale=0.2)
corr_feat_embed_heatmap=sns.heatmap(corr_feat_embed, xticklabels=False)
# add_margin(corr_feat_embed_heatmap,x=0.1,y=0.0)
corr_feat_embed_heatmap.get_figure().savefig(directory+'/corr_feat_embed_heatmap.pdf', bbox_inches='tight')
else:
print('Wrong task, does not exist')
if __name__=="__main__":
main_work()
| 21,139 | 0 | 414 |
e3f1a10563e3ed55f959f158fa486ba1b174905f | 3,160 | py | Python | scripts/network_analysis/generate_all_paths.py | mwinding/connectome_analysis | dbc747290891805863c9481921d8080dc2043d21 | [
"MIT"
] | null | null | null | scripts/network_analysis/generate_all_paths.py | mwinding/connectome_analysis | dbc747290891805863c9481921d8080dc2043d21 | [
"MIT"
] | 2 | 2022-02-10T11:03:49.000Z | 2022-02-10T11:04:08.000Z | scripts/network_analysis/generate_all_paths.py | mwinding/connectome_analysis | dbc747290891805863c9481921d8080dc2043d21 | [
"MIT"
] | null | null | null | # %%
#
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import networkx as nx
from joblib import Parallel, delayed
from tqdm import tqdm
from pymaid_creds import url, name, password, token
import pymaid
import connectome_tools.cluster_analysis as clust
import connectome_tools.celltype as ct
import connectome_tools.process_graph as pg
import connectome_tools.process_matrix as pm
rm = pymaid.CatmaidInstance(url, token, name, password)
# load previously generated paths
all_edges_combined = pd.read_csv('interhemisphere/csv/all_paired_edges.csv', index_col=0)
graph = pg.Analyze_Nx_G(all_edges_combined, graph_type='directed')
pairs = pm.Promat.get_pairs()
# %%
# load neuron types
all_sensories = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain sensories')
all_outputs = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain outputs')
sensory_pair_ids = pm.Promat.load_pairs_from_annotation('sensory', pairs, return_type='all_pair_ids', skids=all_sensories, use_skids=True)
outputs_pair_ids = pm.Promat.load_pairs_from_annotation('output', pairs, return_type='all_pair_ids', skids=all_outputs, use_skids=True)
dVNC_pair_ids = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids')
dSEZ_pair_ids = pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_ids')
RGN_pair_ids = pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_ids')
# %%
# generate and save paths all sensory to outputs
cutoff = 6
outputs = [dVNC_pair_ids, dSEZ_pair_ids, RGN_pair_ids, outputs_pair_ids]
output_types = ['dVNC', 'dSEZ', 'RGN', 'output']
save_paths = [f'data/paths/all_paths_sens-to-{output_type}_cutoff{cutoff}' for output_type in output_types]
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(G=graph.G, source_list=sensory_pair_ids, targets=outputs[i], cutoff=cutoff, save_path = save_paths[i]) for i in tqdm(range(len(save_paths))))
'''
save_path = f'data/paths/all_paths_sens-to-dVNC_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, dVNC_pair_ids, cutoff=cutoff, save_path=save_path)
save_path = f'data/paths/all_paths_sens-to-dSEZ_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, dSEZ_pair_ids, cutoff=cutoff, save_path=save_path)
save_path = f'data/paths/all_paths_sens-to-RGN_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, RGN_pair_ids, cutoff=cutoff, save_path=save_path)
save_path = f'data/paths/all_paths_sens-to-output_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, outputs_pair_ids, cutoff=cutoff, save_path=save_path)
'''
# %%
#
dVNC_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-dVNC_cutoff{cutoff}.csv.gz')
dSEZ_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-dSEZ_cutoff{cutoff}.csv.gz')
RGN_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-RGN_cutoff{cutoff}.csv.gz')
output_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-output_cutoff{cutoff}.csv.gz')
# %%
| 42.702703 | 209 | 0.811076 | # %%
#
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import networkx as nx
from joblib import Parallel, delayed
from tqdm import tqdm
from pymaid_creds import url, name, password, token
import pymaid
import connectome_tools.cluster_analysis as clust
import connectome_tools.celltype as ct
import connectome_tools.process_graph as pg
import connectome_tools.process_matrix as pm
rm = pymaid.CatmaidInstance(url, token, name, password)
# load previously generated paths
all_edges_combined = pd.read_csv('interhemisphere/csv/all_paired_edges.csv', index_col=0)
graph = pg.Analyze_Nx_G(all_edges_combined, graph_type='directed')
pairs = pm.Promat.get_pairs()
# %%
# load neuron types
all_sensories = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain sensories')
all_outputs = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain outputs')
sensory_pair_ids = pm.Promat.load_pairs_from_annotation('sensory', pairs, return_type='all_pair_ids', skids=all_sensories, use_skids=True)
outputs_pair_ids = pm.Promat.load_pairs_from_annotation('output', pairs, return_type='all_pair_ids', skids=all_outputs, use_skids=True)
dVNC_pair_ids = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids')
dSEZ_pair_ids = pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_ids')
RGN_pair_ids = pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_ids')
# %%
# generate and save paths all sensory to outputs
cutoff = 6
outputs = [dVNC_pair_ids, dSEZ_pair_ids, RGN_pair_ids, outputs_pair_ids]
output_types = ['dVNC', 'dSEZ', 'RGN', 'output']
save_paths = [f'data/paths/all_paths_sens-to-{output_type}_cutoff{cutoff}' for output_type in output_types]
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(G=graph.G, source_list=sensory_pair_ids, targets=outputs[i], cutoff=cutoff, save_path = save_paths[i]) for i in tqdm(range(len(save_paths))))
'''
save_path = f'data/paths/all_paths_sens-to-dVNC_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, dVNC_pair_ids, cutoff=cutoff, save_path=save_path)
save_path = f'data/paths/all_paths_sens-to-dSEZ_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, dSEZ_pair_ids, cutoff=cutoff, save_path=save_path)
save_path = f'data/paths/all_paths_sens-to-RGN_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, RGN_pair_ids, cutoff=cutoff, save_path=save_path)
save_path = f'data/paths/all_paths_sens-to-output_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, outputs_pair_ids, cutoff=cutoff, save_path=save_path)
'''
# %%
#
dVNC_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-dVNC_cutoff{cutoff}.csv.gz')
dSEZ_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-dSEZ_cutoff{cutoff}.csv.gz')
RGN_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-RGN_cutoff{cutoff}.csv.gz')
output_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-output_cutoff{cutoff}.csv.gz')
# %%
| 0 | 0 | 0 |
ba26548fed3f4b93f8168afaf3637ec24973ff02 | 3,407 | py | Python | mayan/apps/document_states/tests/test_workflow_instance_views.py | sophiawa/Mayan-EDMS | 42f20576d0c690b645a60bf53c5169cda4264231 | [
"Apache-2.0"
] | null | null | null | mayan/apps/document_states/tests/test_workflow_instance_views.py | sophiawa/Mayan-EDMS | 42f20576d0c690b645a60bf53c5169cda4264231 | [
"Apache-2.0"
] | 10 | 2021-03-20T00:01:17.000Z | 2022-03-12T00:48:43.000Z | mayan/apps/document_states/tests/test_workflow_instance_views.py | sophiawa/Mayan-EDMS | 42f20576d0c690b645a60bf53c5169cda4264231 | [
"Apache-2.0"
] | 1 | 2021-04-30T09:44:14.000Z | 2021-04-30T09:44:14.000Z | from mayan.apps.documents.tests.base import GenericDocumentViewTestCase
from ..permissions import permission_workflow_transition
from .mixins import (
WorkflowTestMixin, WorkflowViewTestMixin, WorkflowTransitionViewTestMixin
)
| 35.489583 | 80 | 0.714412 | from mayan.apps.documents.tests.base import GenericDocumentViewTestCase
from ..permissions import permission_workflow_transition
from .mixins import (
WorkflowTestMixin, WorkflowViewTestMixin, WorkflowTransitionViewTestMixin
)
class WorkflowTransitionDocumentViewTestCase(
WorkflowTestMixin, WorkflowViewTestMixin, WorkflowTransitionViewTestMixin,
GenericDocumentViewTestCase
):
auto_upload_test_document = False
def setUp(self):
super(WorkflowTransitionDocumentViewTestCase, self).setUp()
self._create_test_workflow(add_document_type=True)
self._create_test_workflow_states()
self._create_test_workflow_transitions()
self._upload_test_document()
self.test_workflow_instance = self.test_document.workflows.first()
def test_workflow_transition_selection_get_view_with_workflow_access(self):
self.grant_access(
obj=self.test_workflow, permission=permission_workflow_transition
)
response = self._request_test_workflow_transition_selection_get_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(
self.test_workflow_instance.get_current_state(),
self.test_workflow_state_1
)
def test_workflow_transition_selection_post_view_with_workflow_access(self):
self.grant_access(
obj=self.test_workflow, permission=permission_workflow_transition
)
response = self._request_test_workflow_transition_selection_post_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_workflow_instance.get_current_state(),
self.test_workflow_state_1
)
def test_workflow_transition_execute_view_no_access(self):
"""
Test transitioning a workflow without the transition workflow
permission.
"""
response = self._request_test_workflow_transition_execute_view()
self.assertEqual(response.status_code, 404)
# Workflow should remain in the same initial state
self.assertEqual(
self.test_workflow_instance.get_current_state(),
self.test_workflow_state_1
)
def test_workflow_transition_execute_view_with_workflow_access(self):
"""
Test transitioning a workflow by granting the transition workflow
permission to the role.
"""
self.grant_access(
obj=self.test_workflow, permission=permission_workflow_transition
)
response = self._request_test_workflow_transition_execute_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_workflow_instance.get_current_state(),
self.test_workflow_state_2
)
def test_workflow_transition_execute_view_with_transition_access(self):
"""
Test transitioning a workflow by granting the transition workflow
permission to the role.
"""
self.grant_access(
obj=self.test_workflow_transition,
permission=permission_workflow_transition
)
response = self._request_test_workflow_transition_execute_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_workflow_instance.get_current_state(),
self.test_workflow_state_2
)
| 1,203 | 1,947 | 23 |
47980dd92978c73d705c0a44b70d6343faa77b36 | 2,338 | py | Python | src/recording_script_generator_tests/test_app_preparation.py | stefantaubert/recording-script-generator | 01cdcd4b85ed7f245f4bb8535d870c04472746c9 | [
"MIT"
] | null | null | null | src/recording_script_generator_tests/test_app_preparation.py | stefantaubert/recording-script-generator | 01cdcd4b85ed7f245f4bb8535d870c04472746c9 | [
"MIT"
] | null | null | null | src/recording_script_generator_tests/test_app_preparation.py | stefantaubert/recording-script-generator | 01cdcd4b85ed7f245f4bb8535d870c04472746c9 | [
"MIT"
] | null | null | null | # from pathlib import Path
# from text_utils.ipa2symb import IPAExtractionSettings
# from text_utils.language import Language
# from text_utils.text import EngToIpaMode
# def test_add_corpus_from_text_file(tmp_path: Path):
# base_dir = tmp_path / "base_dir"
# text_path = tmp_path / "input.txt"
# text_path.write_text("line1\nline2\n")
# add_corpus_from_text_file(
# base_dir=base_dir,
# corpus_name="corpus1",
# step_name="step1",
# text_path=text_path,
# lang=Language.ENG,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# assert (base_dir / "corpora" / "corpus1" / "step1" / "data.pkl").exists()
# def test_app_normalize(tmp_path: Path):
# base_dir = tmp_path / "base_dir"
# text_path = tmp_path / "input.txt"
# text_path.write_text("line 1\nline 2\n")
# add_corpus_from_text_file(
# base_dir=base_dir,
# corpus_name="corpus1",
# step_name="step1",
# text_path=text_path,
# lang=Language.ENG,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# app_normalize(
# base_dir=base_dir,
# corpus_name="corpus1",
# in_step_name="step1",
# out_step_name="step2",
# target=PreparationTarget.BOTH,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# assert (base_dir / "corpora" / "corpus1" / "step2" / "data.pkl").exists()
# def test_app_convert_to_ipa(tmp_path: Path):
# base_dir = tmp_path / "base_dir"
# text_path = tmp_path / "input.txt"
# text_path.write_text("line 1\nline 2\n")
# add_corpus_from_text_file(
# base_dir=base_dir,
# corpus_name="corpus1",
# step_name="step1",
# text_path=text_path,
# lang=Language.ENG,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# app_convert_to_ipa(
# base_dir=base_dir,
# corpus_name="corpus1",
# in_step_name="step1",
# out_step_name="step2",
# target=PreparationTarget.BOTH,
# ignore_arcs=True,
# ignore_tones=True,
# replace_unknown_ipa_by="_",
# mode=EngToIpaMode.BOTH,
# overwrite=False,
# )
# assert (base_dir / "corpora" / "corpus1" / "step2" / "data.pkl").exists()
| 26.568182 | 77 | 0.662532 | # from pathlib import Path
# from text_utils.ipa2symb import IPAExtractionSettings
# from text_utils.language import Language
# from text_utils.text import EngToIpaMode
# def test_add_corpus_from_text_file(tmp_path: Path):
# base_dir = tmp_path / "base_dir"
# text_path = tmp_path / "input.txt"
# text_path.write_text("line1\nline2\n")
# add_corpus_from_text_file(
# base_dir=base_dir,
# corpus_name="corpus1",
# step_name="step1",
# text_path=text_path,
# lang=Language.ENG,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# assert (base_dir / "corpora" / "corpus1" / "step1" / "data.pkl").exists()
# def test_app_normalize(tmp_path: Path):
# base_dir = tmp_path / "base_dir"
# text_path = tmp_path / "input.txt"
# text_path.write_text("line 1\nline 2\n")
# add_corpus_from_text_file(
# base_dir=base_dir,
# corpus_name="corpus1",
# step_name="step1",
# text_path=text_path,
# lang=Language.ENG,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# app_normalize(
# base_dir=base_dir,
# corpus_name="corpus1",
# in_step_name="step1",
# out_step_name="step2",
# target=PreparationTarget.BOTH,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# assert (base_dir / "corpora" / "corpus1" / "step2" / "data.pkl").exists()
# def test_app_convert_to_ipa(tmp_path: Path):
# base_dir = tmp_path / "base_dir"
# text_path = tmp_path / "input.txt"
# text_path.write_text("line 1\nline 2\n")
# add_corpus_from_text_file(
# base_dir=base_dir,
# corpus_name="corpus1",
# step_name="step1",
# text_path=text_path,
# lang=Language.ENG,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# app_convert_to_ipa(
# base_dir=base_dir,
# corpus_name="corpus1",
# in_step_name="step1",
# out_step_name="step2",
# target=PreparationTarget.BOTH,
# ignore_arcs=True,
# ignore_tones=True,
# replace_unknown_ipa_by="_",
# mode=EngToIpaMode.BOTH,
# overwrite=False,
# )
# assert (base_dir / "corpora" / "corpus1" / "step2" / "data.pkl").exists()
| 0 | 0 | 0 |
a88cb734e8a4d1522640c81ae06996fd2293fe55 | 240 | py | Python | 9-C/ozljeda.py | ugoertz/l19contest | f157944074b493fe20166e3bc68899b64fe25400 | [
"CC0-1.0"
] | null | null | null | 9-C/ozljeda.py | ugoertz/l19contest | f157944074b493fe20166e3bc68899b64fe25400 | [
"CC0-1.0"
] | null | null | null | 9-C/ozljeda.py | ugoertz/l19contest | f157944074b493fe20166e3bc68899b64fe25400 | [
"CC0-1.0"
] | null | null | null | K = int(input())
a = input().split()
d = [0]
for x in a:
d.append(d[-1] ^ int(x)) # d[i] == a_1 ^ ... ^ a_i
Q = int(input())
for i in range(Q):
l, r = [int(x) for x in input().split()]
print(d[r % (K+1)] ^ d[(l-1) % (K+1)])
| 18.461538 | 55 | 0.445833 | K = int(input())
a = input().split()
d = [0]
for x in a:
d.append(d[-1] ^ int(x)) # d[i] == a_1 ^ ... ^ a_i
Q = int(input())
for i in range(Q):
l, r = [int(x) for x in input().split()]
print(d[r % (K+1)] ^ d[(l-1) % (K+1)])
| 0 | 0 | 0 |
58e9299903bc4d2a2b66c9a1b04f64faa9b45616 | 1,756 | py | Python | model/model.py | SohamChattopadhyayEE/RRCNN | 3bcd84bb44b68dea1d8e4dbd25923c8d4e7953ed | [
"MIT"
] | 2 | 2021-12-01T05:46:09.000Z | 2022-02-09T15:24:48.000Z | model/model.py | SohamChattopadhyayEE/RRCNN | 3bcd84bb44b68dea1d8e4dbd25923c8d4e7953ed | [
"MIT"
] | null | null | null | model/model.py | SohamChattopadhyayEE/RRCNN | 3bcd84bb44b68dea1d8e4dbd25923c8d4e7953ed | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
### Model : RRCNN-C ###
| 27.015385 | 94 | 0.588269 | import torch
import torch.nn as nn
### Model : RRCNN-C ###
class ResBlock(nn.Module):
def __init__(self, num_ft = 64, kernel_size = 3, stride = 1, padding = 1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(nn.Conv1d(num_ft, num_ft, kernel_size, stride, padding))
m.append(nn.BatchNorm1d(num_ft))
m.append(nn.ReLU())
self.body = nn.Sequential(*m)
def forward(self, x):
res = self.body(x)
res += x
return res
class RRCNN_C(nn.Module):
def __init__(self, num_channels, num_classes, num_res_ft = 64, num_res = 2):
super(RRCNN_C, self).__init__()
self.conv = nn.Conv1d(num_channels, num_res_ft, kernel_size = 3, stride = 1, padding = 1)
self.res = ResBlock()
mat = []
for _ in range(num_res):
mat.append(ResBlock(num_ft = num_res_ft))
mat.append(nn.RReLU())
self.res_body_1 = nn.Sequential(*mat)
mat2 = []
for _ in range(num_res):
mat2.append(ResBlock(num_ft = num_res_ft))
mat2.append(nn.RReLU())
self.res_body_2 = nn.Sequential(*mat2)
mat3 = []
for _ in range(num_res):
mat3.append(ResBlock(num_ft = num_res_ft))
mat3.append(nn.RReLU())
self.res_body_3 = nn.Sequential(*mat3)
self.avg = nn.AdaptiveAvgPool1d(1)
self.maxpool = nn.MaxPool1d(1)
self.fc = nn.Linear(num_res_ft, num_classes)
self.clf = nn.Softmax()
def forward(self, x):
x_in = self.conv(x)
x = self.res_body_1(x_in)
x = x + x_in
x1 = x
x = self.res_body_2(x)
x = x+x1
x2 = x
x = self.res_body_3(x)
x = x + x2
x = self.avg(x)
x = torch.flatten(x)
x = self.fc(x)
x = self.clf(x)
return x | 1,529 | 9 | 153 |
e1fcece13431aeff08ed9df71b6ebbbcaefc607f | 3,355 | py | Python | ailib/mdp/environment/environment.py | jeremiedecock/pyai | 5ac032b487670d258ecb3d5fa85a416c76c9871b | [
"MIT"
] | 2 | 2018-05-09T01:54:38.000Z | 2019-08-27T23:18:58.000Z | ailib/mdp/environment/environment.py | jeremiedecock/pyai | 5ac032b487670d258ecb3d5fa85a416c76c9871b | [
"MIT"
] | null | null | null | ailib/mdp/environment/environment.py | jeremiedecock/pyai | 5ac032b487670d258ecb3d5fa85a416c76c9871b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2014,2015,2016,2017 Jeremie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Scipy discrete probability distribution (used in doTransition function)
from scipy.stats import rv_discrete
| 36.868132 | 89 | 0.70462 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2014,2015,2016,2017 Jeremie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Scipy discrete probability distribution (used in doTransition function)
from scipy.stats import rv_discrete
class Environment(object):
def reward(self, current_state, action=None, next_state=None):
raise NotImplementedError
def transition(self, current_state, action, next_state):
raise NotImplementedError
# doTransition is common to all Environment classes
def doTransition(self, current_state, action):
assert current_state in self.stateSet
assert action in self.actionSet
next_state_proba = self.transition(current_state, action)
# randomly generate the next state acording to the next_state_proba distribution
state_proba_list = [item for item in next_state_proba.items()]
state_list = [item[0] for item in state_proba_list]
proba_list = [item[1] for item in state_proba_list]
# A Scipy probability distribution
distrib = rv_discrete(values=(range(len(state_list)), proba_list))
# One sample
next_state_index = distrib.rvs()
next_state = state_list[next_state_index]
reward = self.reward(current_state, action, next_state)
return (next_state, reward)
def display(self, state=None):
raise NotImplementedError
def simulate(self, agent, initial_state=None, max_it=float("inf")):
"""
max_it (maximum number of iterations) can be used to avoid infinites simulations.
"""
if initial_state is None:
initial_state = self.initialState
else:
assert initial_state in self.stateSet
state_list = [initial_state]
action_list = []
reward_list = []
state = initial_state
while(state not in self.finalStateSet and len(action_list) < max_it):
action = agent.getAction(state)
(state, reward) = self.doTransition(state, action)
state_list.append(state)
action_list.append(action)
#reward_list.append(reward) # TODO
reward_list = [self.reward(state) for state in state_list] # TODO
return (state_list, action_list, reward_list)
| 981 | 1,063 | 23 |
85c7ad160e7e636600cc847da169e8f4264d444b | 56 | py | Python | past_archive/boj/2438(makestar).py | DongHyunByun/algorithm_practice | c726c69d35306d23467f4af6e10f2db6fdc68234 | [
"MIT"
] | null | null | null | past_archive/boj/2438(makestar).py | DongHyunByun/algorithm_practice | c726c69d35306d23467f4af6e10f2db6fdc68234 | [
"MIT"
] | null | null | null | past_archive/boj/2438(makestar).py | DongHyunByun/algorithm_practice | c726c69d35306d23467f4af6e10f2db6fdc68234 | [
"MIT"
] | null | null | null |
a=int(input())
for i in range(1,a+1):
print(i*'*') | 11.2 | 22 | 0.517857 |
a=int(input())
for i in range(1,a+1):
print(i*'*') | 0 | 0 | 0 |
00eb1c3883b3f91763dada8373cd6e805e05f3f7 | 954 | py | Python | bert2tf/executors/models/roberta/__init__.py | xiongma/bert2tf | 105fd1524edb703bf68aec8fde289de5923e1f78 | [
"Apache-2.0"
] | 7 | 2021-08-05T16:35:08.000Z | 2022-01-04T03:26:10.000Z | bert2tf/executors/models/roberta/__init__.py | xiongma/bert2tf | 105fd1524edb703bf68aec8fde289de5923e1f78 | [
"Apache-2.0"
] | 96 | 2021-08-06T08:32:09.000Z | 2022-01-21T11:07:25.000Z | bert2tf/executors/models/roberta/__init__.py | xiongma/bert2tf | 105fd1524edb703bf68aec8fde289de5923e1f78 | [
"Apache-2.0"
] | null | null | null | from ..bert import Bert, BertMLMHead
from ..configs import RobertaConfig
class Roberta(Bert):
"""
Roberta
"""
config_cls = RobertaConfig
class RobertaPreTraining(Roberta):
"""
Roberta PreTraining Model, it exclude nsp task
"""
| 29.8125 | 79 | 0.602725 | from ..bert import Bert, BertMLMHead
from ..configs import RobertaConfig
class Roberta(Bert):
"""
Roberta
"""
config_cls = RobertaConfig
class RobertaPreTraining(Roberta):
"""
Roberta PreTraining Model, it exclude nsp task
"""
def __init__(self, **kwargs):
super(RobertaPreTraining, self).__init__(with_pooler=False, **kwargs)
self.mlm = BertMLMHead(vocab_size=self.config.vocab_size,
hidden_size=self.config.hidden_size,
initializer_range=self.config.initializer_range,
hidden_act=self.config.hidden_act,
embedding_layer=self.embeddings,
name='cls')
def call(self, inputs, **kwargs):
hidden_states = super(RobertaPreTraining, self).call(inputs)
prediction_scores = self.mlm(hidden_states)
return prediction_scores
| 640 | 0 | 54 |
03e222b7e10a6ab5d6b633abb4f2d30ab2e044cc | 6,320 | py | Python | Server/SpotifakeServer_TrackHandler.py | BrunoLujan/Spotifake-DESER | a811444af0a1326659dd27949c6a1c66c7cd66a1 | [
"Apache-2.0"
] | null | null | null | Server/SpotifakeServer_TrackHandler.py | BrunoLujan/Spotifake-DESER | a811444af0a1326659dd27949c6a1c66c7cd66a1 | [
"Apache-2.0"
] | null | null | null | Server/SpotifakeServer_TrackHandler.py | BrunoLujan/Spotifake-DESER | a811444af0a1326659dd27949c6a1c66c7cd66a1 | [
"Apache-2.0"
] | null | null | null | import sys
import os
import thriftpy
sys.path.append("../")
sys.path.append("gen-py")
from SpotifakeServices import TrackService
from SpotifakeServices.ttypes import *
from SpotifakeManagement.ttypes import *
from SQLConnection.sqlServer_track import SqlServerTrackManagement | 41.578947 | 128 | 0.671203 | import sys
import os
import thriftpy
sys.path.append("../")
sys.path.append("gen-py")
from SpotifakeServices import TrackService
from SpotifakeServices.ttypes import *
from SpotifakeManagement.ttypes import *
from SQLConnection.sqlServer_track import SqlServerTrackManagement
class SpotifakeServerTrackHandler(TrackService.Iface):
connection: SqlServerTrackManagement = SqlServerTrackManagement()
spotifakeManagement_thrift = thriftpy.load('../Thrift/SpotifakeManagement.thrift', module_name='spotifakeManagement_thrift')
spotifakeServices_thrift = thriftpy.load('../Thrift/SpotifakeServices.thrift', module_name='spotifakeServices_thrift')
Date = spotifakeManagement_thrift.Date
Track = spotifakeManagement_thrift.Track
def __init__(self):
pass
def GetTrackByTitle(self,title):
trackAux = Track()
trackFound = SqlServerTrackManagement.GetTrackByTitle(self,title)
trackAux = Track()
trackAux.idTrack = trackFound.IdTrack
trackAux.durationSeconds = trackFound.durationSeconds
trackAux.title = trackFound.title
trackAux.trackNumber = trackFound.trackNumber
trackAux.storagePath = trackFound.storagePath
trackAux.gender = trackFound.IdGenre
return trackFound
def GetTrackByAlbumId(self, idAlbum):
trackList = []
trackFound = SqlServerTrackManagement.GetTrackByIdAlbum(self, idAlbum)
for n in trackFound:
trackAux = Track()
trackAux.idTrack = n.IdTrack
trackAux.durationSeconds = n.durationSeconds
trackAux.title = n.title
trackAux.trackNumber = n.trackNumber
trackAux.storagePath = n.storagePath
trackAux.gender = n.IdGenre
trackList.append(trackAux)
return trackList
def GetTrackByPlaylistId(self,idPlaylist):
trackList = []
trackFound = SqlServerTrackManagement.GetTrackByPlaylistId(self, idPlaylist)
for n in trackFound:
trackAux = Track()
trackAux.idTrack = n.IdTrack
trackAux.durationSeconds = n.durationSeconds
trackAux.title = n.title
trackAux.trackNumber = n.trackNumber
trackAux.storagePath = n.storagePath
trackAux.gender = n.IdGenre
trackList.append(trackAux)
return trackList
def GetTrackByLibraryId(self, idLibrary):
trackList = []
trackFound = SqlServerTrackManagement.GetTrackByIdLibrary(self, idLibrary)
for n in trackFound:
trackAux = Track()
trackAux.idTrack = n.IdTrack
trackAux.durationSeconds = n.durationSeconds
trackAux.title = n.title
trackAux.trackNumber = n.trackNumber
trackAux.storagePath = n.storagePath
trackAux.gender = n.IdGenre
trackList.append(trackAux)
return trackList
def GetTrackByQuery(self, query):
trackList = []
trackFound = SqlServerTrackManagement.GetTrackByQuery(self, query)
if trackFound != 0:
for n in trackFound:
track = Track(n.IdTrack, n.trackNumber, n.durationSeconds, n.storagePath, n.title)
track.title = n.title
track.stageName = n.ContentCreatorName
track.gender = n.IdGenre
trackList.append(track)
return trackList
return False
def AddTrackToAlbum(self, idAlbum, newTrack, idContentCreator):
idNewTrack = SqlServerTrackManagement.AddTrackToAlbum(self, idAlbum, newTrack)
return idNewTrack
def AddFeaturingTrack(self, idNewTrack, idContentCreator):
SqlServerTrackManagement.AddFeaturingTrack(self, idNewTrack, idContentCreator)
return idNewTrack
def DeleteAlbumTrack(self,idAlbum, trackNumber):
trackFound = SqlServerTrackManagement.DeleteAlbumTrack(self, idAlbum, trackNumber)
return trackFound.idTrack
def UpdateAlbumTrackTitle(self, idAlbum, trackNumber, newAlbumTrackTitle):
trackFound = SqlServerTrackManagement.UpdateAlbumTrackTitle(self, idAlbum, trackNumber, newAlbumTrackTitle)
return trackFound
def AddTrackToLibrary(self, idLibrary, idTrack):
result = SqlServerTrackManagement.AddTrackToLibrary(self, idLibrary, idTrack)
return result
def DeleteLibraryTrack(self,idLibrary, IdTrack):
trackFound = SqlServerTrackManagement.DeleteLibraryTrack(self, idLibrary, idTrack)
return trackFound.idTrack
def AddTrackToPlaylist(self, idPlaylist, idTrack):
trackFound = SqlServerTrackManagement.AddTrackToPlaylist(self, idPlaylist, idTrack)
return trackFound
def DeletePlaylistTrack(self,idPlaylist, IdTrack):
trackFound = SqlServerTrackManagement.DeletePlaylistTrack(self, idPlaylist, idTrack)
return trackFound.idTrack
def GetLocalTracksByIdConsumer(self, idConsumer):
localTrackList = []
localTrackFound = SqlServerTrackManagement.GetLocalTracksByIdConsumer(self, idConsumer)
print(localTrackFound)
for n in localTrackFound:
localTrackAux = LocalTrack()
localTrackAux.idConsumer = n.IdConsumer
localTrackAux.fileName = n.fileName
localTrackAux.artistName = n.artistName
localTrackAux.title = n.title
localTrackList.append(localTrackAux)
return localTrackList
def AddLocalTrack(self, localTrack):
result = SqlServerTrackManagement.AddLocalTrack(self, localTrack)
return result
def GenerateRadioStation(self, idGender):
trackList = []
trackFound = SqlServerTrackManagement.GenerateRadioStation(self, idGender)
if trackFound != 0:
for n in trackFound:
trackAux = Track()
trackAux.idTrack = n.IdTrack
trackAux.durationSeconds = n.durationSeconds
trackAux.title = n.title
trackAux.trackNumber = n.trackNumber
trackAux.storagePath = n.storagePath
trackAux.gender = n.IdGenre
trackList.append(trackAux)
return trackList | 5,109 | 913 | 23 |
b98d8fa44acab639539a323f20f75b6910755f6d | 29,992 | py | Python | geraldo/generators/base.py | gustavohenrique/wms | ec3632626d63d1662c0aa1a4693dd091ba55eb39 | [
"CC-BY-3.0"
] | 1 | 2015-08-06T20:58:05.000Z | 2015-08-06T20:58:05.000Z | geraldo/generators/base.py | gustavohenrique/wms | ec3632626d63d1662c0aa1a4693dd091ba55eb39 | [
"CC-BY-3.0"
] | null | null | null | geraldo/generators/base.py | gustavohenrique/wms | ec3632626d63d1662c0aa1a4693dd091ba55eb39 | [
"CC-BY-3.0"
] | 1 | 2020-01-26T20:48:06.000Z | 2020-01-26T20:48:06.000Z | from geraldo.utils import get_attr_value, calculate_size
from geraldo.widgets import Widget, Label, SystemField
from geraldo.graphics import Graphic, RoundRect, Rect, Line, Circle, Arc,\
Ellipse, Image
class ReportGenerator(object):
"""A report generator is used to generate a report to a specific format."""
_is_first_page = True
_is_latest_page = True
_current_top_position = 0
_current_left_position = 0
_current_page_number = 0
_current_object = None
_current_queryset = None
_generation_datetime = None
# Groupping
_groups_values = None
_groups_working_values = None
_groups_changed = None
_groups_stack = None
# The rendered report has pages, each page is a ReportPage instance
_rendered_pages = None
_page_rect = None
def __init__(self, report):
"""This method should be overrided to receive others arguments"""
self.report = report
# Initializes some attributes
self._rendered_pages = []
self._groups_values = {}
self._groups_working_values = {}
self._groups_changed = {}
self._groups_stack = []
def execute(self):
"""This method must be overrided to execute the report generation."""
# Initializes pages
self._is_first_page = True
def render_border(self, borders_dict, rect_dict):
"""Renders a border in the coordinates setted in the rect."""
b_all = borders_dict.get('all', None)
if b_all:
graphic = isinstance(b_all, Graphic) and b_all or Rect()
graphic.set_rect(
left=rect_dict['left'],
top=rect_dict['top'] - rect_dict['height'],
width=rect_dict['right'] - rect_dict['left'],
height=rect_dict['height'],
)
self._rendered_pages[-1].elements.append(graphic)
b_left = borders_dict.get('left', None)
if b_left:
graphic = isinstance(b_left, Graphic) and b_left or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['top'],
right=rect_dict['left'], bottom=rect_dict['bottom']
)
self._rendered_pages[-1].elements.append(graphic)
b_top = borders_dict.get('top', None)
if b_top:
graphic = isinstance(b_top, Graphic) and b_top or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['top'],
right=rect_dict['right'], bottom=rect_dict['top']
)
self._rendered_pages[-1].elements.append(graphic)
b_right = borders_dict.get('right', None)
if b_right:
graphic = isinstance(b_right, Graphic) and b_right or Line()
graphic.set_rect(
left=rect_dict['right'], top=rect_dict['top'],
right=rect_dict['right'], bottom=rect_dict['bottom']
)
self._rendered_pages[-1].elements.append(graphic)
b_bottom = borders_dict.get('bottom', None)
if b_bottom:
graphic = isinstance(b_right, Graphic) and b_right or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['bottom'],
right=rect_dict['right'], bottom=rect_dict['bottom']
)
self._rendered_pages[-1].elements.append(graphic)
def make_band_rect(self, band, top_position, left_position):
"""Returns the right band rect on the PDF canvas"""
band_rect = {
'left': left_position, #self.report.margin_left,
'top': top_position,
'right': left_position + self.calculate_size(band.width), #self.report.page_size[0] - self.report.margin_right,
'bottom': top_position - self.calculate_size(band.height),
'height': self.calculate_size(band.height),
}
return band_rect
def render_band(self, band, top_position=None, left_position=None,
update_top=True, current_object=None):
"""Generate a band having the current top position or informed as its
top coordinate"""
# Sets the current object
current_object = current_object or self._current_object
# Page width. This should be done in a metaclass in Report domain TODO
self._rendered_pages[-1].width = self.calculate_size(self.report.page_size[0]) -\
self.calculate_size(self.report.margin_left) - self.calculate_size(self.report.margin_right)
# Default value for band width
band.width = self.calculate_size(band.width) or self._rendered_pages[-1].width
# Coordinates
left_position = left_position or self.get_left_pos()
# Increases the top position when being an inline displayed detail band
if left_position > self.calculate_size(self.report.margin_left) and\
getattr(band, 'display_inline', False) and\
band.width < self.get_available_width():
temp_height = band.height + getattr(band, 'margin_top', 0) + getattr(band, 'margin_bottom', 0)
self.update_top_pos(decrease=self.calculate_size(temp_height))
else:
self.update_left_pos(set=0)
left_position = self.get_left_pos()
temp_top = top_position = top_position or self.get_top_pos()
# Calculates the band dimensions on the canvas
band_rect = self.make_band_rect(band, top_position, left_position)
# Band borders
self.render_border(band.borders, band_rect)
# Variable that stores the highest height at all elements
highest_height = 0
# Loop at band widgets
for element in band.elements:
# Doesn't render not visible element
if not element.visible:
continue
# Widget element
if isinstance(element, Widget):
widget = element.clone()
# Set widget colors
widget.font_color = self.report.default_font_color
# Set widget basic attributes
widget.instance = current_object
widget.generator = self
widget.report = self.report # This should be done by a metaclass in Band domain TODO
widget.band = band # This should be done by a metaclass in Band domain TODO
widget.page = self._rendered_pages[-1]
if isinstance(widget, SystemField):
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top))
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.height)
elif isinstance(widget, Label):
widget.para = self.make_paragraph(widget.text, self.make_paragraph_style(band, widget.style))
if widget.truncate_overflow:
self.keep_in_frame(
widget,
self.calculate_size(widget.width),
self.calculate_size(widget.height),
[widget.para],
mode='truncate',
)
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top), self.calculate_size(widget.height))
else:
self.wrap_paragraph_on(widget.para, self.calculate_size(widget.width), self.calculate_size(widget.height))
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top), self.calculate_size(widget.para.height))
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.para.height)
else:
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.height)
# Sets element height as the highest
if temp_height > highest_height:
highest_height = temp_height
self._rendered_pages[-1].elements.append(widget)
# Graphic element
elif isinstance(element, Graphic):
graphic = element.clone()
# Set widget basic attributes
graphic.instance = current_object
graphic.generator = self
graphic.report = self.report # This should be done by a metaclass in Band domain TODO
graphic.band = band # This should be done by a metaclass in Band domain TODO
graphic.page = self._rendered_pages[-1]
# Set graphic colors
graphic.fill_color = graphic.fill_color or self.report.default_fill_color
graphic.stroke_color = graphic.stroke_color or self.report.default_stroke_color
if isinstance(graphic, RoundRect):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
elif isinstance(graphic, Rect):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
elif isinstance(graphic, Line):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Circle):
graphic.left_center = band_rect['left'] + self.calculate_size(graphic.left_center)
graphic.top_center = top_position - self.calculate_size(graphic.top_center)
elif isinstance(graphic, Arc):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Ellipse):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Image):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
# Sets element height as the highest
temp_height = self.calculate_size(element.top) + self.calculate_size(graphic.height)
if temp_height > highest_height:
highest_height = temp_height
self._rendered_pages[-1].elements.append(graphic)
# Updates top position
if update_top:
if band.auto_expand_height:
band_height = highest_height
else:
band_height = self.calculate_size(band.height)
band_height += self.calculate_size(getattr(band, 'margin_top', 0))
band_height += self.calculate_size(getattr(band, 'margin_bottom', 0))
self.update_top_pos(band_height)
# Updates left position
if getattr(band, 'display_inline', False):
self.update_left_pos(band.width + self.calculate_size(getattr(band, 'margin_right', 0)))
else:
self.update_left_pos(set=0)
# Child bands
for child_band in band.child_bands or []: # TODO This "or []" here is a quickfix
# Doesn't generate if it is not visible
if not child_band.visible:
continue
self.force_blank_page_by_height(self.calculate_size(child_band.height))
self.render_band(child_band)
def force_blank_page_by_height(self, height):
"""Check if the height is in client available report height and
makes a new page if necessary"""
if self.get_available_height() < height:
self.force_new_page()
def force_new_page(self, insert_new_page=True):
"""Starts a new blank page"""
# Ends the current page
self._current_top_position = 0
# Creates the new page
if insert_new_page:
self._rendered_pages.append(ReportPage())
# Starts a new one
self.start_new_page()
# Page footer
self.render_page_footer()
def render_begin(self):
"""Renders the report begin band if it exists"""
if not self.report.band_begin:
return
# Doesn't generate this band if it is not visible
if not self.report.band_begin.visible:
return
# Call method that print the band area and its widgets
self.render_band(self.report.band_begin)
def render_summary(self):
"""Generate the report summary band if it exists"""
if not self.report.band_summary:
return
# Doesn't generate this band if it is not visible
if not self.report.band_summary.visible:
return
# Clears groups stack
self._groups_stack = []
# Check to force new page if there is no available space
self.force_blank_page_by_height(self.calculate_size(self.report.band_summary.height))
# Call method that print the band area and its widgets
self.render_band(self.report.band_summary)
def render_page_header(self):
"""Generate the report page header band if it exists"""
if not self.report.band_page_header:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_header.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_header,
top_position=self.calculate_size(self.report.margin_top),
update_top=False,
)
def render_page_footer(self):
"""Generate the report page footer band if it exists"""
if not self.report.band_page_footer:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_footer.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_footer,
top_position=self.calculate_size(self.report.page_size[1]) -\
self.calculate_size(self.report.margin_bottom) -\
self.calculate_size(self.report.band_page_footer.height),
update_top=False,
)
def render_end_current_page(self):
"""Closes the current page, using page breaker constant. Everything done after
this will draw into a new page. Before this, using the generate_page_footer
method to draw the footer"""
self.render_page_footer()
if self._is_latest_page:
self.render_summary()
self._current_page_number += 1
self._is_first_page = False
self.update_top_pos(set=0) # <---- update top position
def render_bands(self):
"""Loops into the objects list to create the report pages until the end"""
# Preparing local auxiliar variables
self._current_page_number = 0
self._current_object_index = 0
objects = self.report.get_objects_list()
# just an alias to make it easier
d_band = self.report.band_detail
# Empty report
if self.report.print_if_empty and not objects:
self.start_new_page()
self.render_begin()
self.render_end_current_page()
# Loop for pages
while self._current_object_index < len(objects):
# Starts a new page and generates the page header band
self.start_new_page()
first_object_on_page = True
# Generate the report begin band
if self._current_page_number == 0:
self.render_begin()
# Does generate objects if there is no details band
if not d_band:
self._current_object_index = len(objects)
# Loop for objects to go into grid on current page
while self._current_object_index < len(objects):
# Get current object from list
self._current_object = objects[self._current_object_index]
# Renders group bands for changed values
self.calc_changed_groups(first_object_on_page)
if not first_object_on_page:
self.render_groups_footers()
self.render_groups_headers()
# Generate this band only if it is visible
if d_band.visible:
self.render_band(d_band)
# Renders subreports
self.render_subreports()
# Next object
self._current_object_index += 1
first_object_on_page = False
# Break this if this page doesn't suppport nothing more...
# ... if there is no more available height
if self.get_available_height() < self.calculate_size(d_band.height):
# right margin is not considered to calculate the necessary space
d_width = self.calculate_size(d_band.width) + self.calculate_size(getattr(d_band, 'margin_left', 0))
# ... and this is not an inline displayed detail band or there is no width available
if not getattr(d_band, 'display_inline', False) or self.get_available_width() < d_width:
break
# ... or this band forces a new page and this is not the last object in objects list
elif d_band.force_new_page and self._current_object_index < len(objects):
break
# Sets this is the latest page or not
self._is_latest_page = self._current_object_index >= len(objects)
# Renders the finish group footer bands
if self._is_latest_page:
self.calc_changed_groups(False)
self.render_groups_footers(force=True)
# Ends the current page, printing footer and summary and necessary
self.render_end_current_page()
# Breaks if this is the latest item
if self._is_latest_page:
break
# Increment page number
self._current_page_number += 1
def start_new_page(self, with_header=True):
"""Do everything necessary to be done to start a new page"""
self._rendered_pages.append(ReportPage())
if with_header:
self.render_page_header()
# Page borders
if self.report.borders:
if not self._page_rect:
self._page_rect = self.report.get_page_rect()
self._page_rect['top'] = self.calculate_size(self.report.page_size[1]) - self._page_rect['top']
self._page_rect['bottom'] = self.calculate_size(self.report.page_size[1]) - self._page_rect['bottom']
self.render_border(self.report.borders, self._page_rect)
def calculate_size(self, size):
"""Uses the function 'calculate_size' to calculate a size"""
return calculate_size(size)
def get_left_pos(self):
"""Returns the left position of the drawer. Is useful on inline displayed detail bands"""
return self.calculate_size(self.report.margin_left) + self._current_left_position
def get_top_pos(self):
"""We use this to use this to get the current top position,
considering also the top margin."""
ret = self.calculate_size(self.report.margin_top) + self._current_top_position
if self.report.band_page_header:
ret += self.calculate_size(self.report.band_page_header.height)
return ret
def get_available_height(self):
"""Returns the available client height area from the current top position
until the end of page, considering the bottom margin."""
ret = self.calculate_size(self.report.page_size[1]) - self.calculate_size(self.report.margin_bottom) -\
self.calculate_size(self.report.margin_top) - self._current_top_position
if self.report.band_page_header:
ret -= self.calculate_size(self.report.band_page_header.height)
if self.report.band_page_footer:
ret -= self.calculate_size(self.report.band_page_footer.height)
return ret
def update_top_pos(self, increase=0, decrease=0, set=None):
"""Updates the current top position controller, increasing (by default),
decreasing or setting it with a new value."""
if set is not None:
self._current_top_position = set
else:
self._current_top_position += increase
self._current_top_position -= decrease
return self._current_top_position
def update_left_pos(self, increase=0, decrease=0, set=None):
"""Updates the current left position controller, increasing (by default),
decreasing or setting it with a new value."""
if set is not None:
self._current_left_position = set
else:
self._current_left_position += increase
self._current_left_position -= decrease
return self._current_left_position
def get_page_count(self):
"""Calculate and returns the page count for this report. The challenge
here is do this calculate before to generate the pages."""
return len(self._rendered_pages)
def make_paragraph(self, text, style=None):
"""Uses the Paragraph class to return a new paragraph object"""
raise Exception('Not implemented')
def wrap_paragraph_on(self, paragraph, width, height):
"""Wraps the paragraph on the height/width informed"""
raise Exception('Not implemented')
# Stylizing
def set_fill_color(self, color):
"""Sets the current fill on canvas. Used for fonts and shape fills"""
pass
def set_stroke_color(self, color):
"""Sets the current stroke on canvas"""
pass
def set_stroke_width(self, width):
"""Sets the stroke/line width for shapes"""
pass
# Groups topic
def calc_changed_groups(self, force_no_changed=False):
"""Defines which groups has been changed their driver values to be
used to render group bands"""
changed = force_no_changed
# Stores the previous group values
self._groups_working_values = self._groups_values.copy()
# Loops on groups until find the first changed, then all under it are considered
# changed also
for group in self.report.groups:
# Gets the current value to compare with the old one
current_value = get_attr_value(self._current_object, group.attribute_name)
# Set changed as True if if wasn't and there is a change
changed = changed or current_value != self._groups_values.get(group, None)
# Stores new values
self._groups_changed[group] = changed
self._groups_values[group] = current_value
# Appends to the stack
if changed:
self._groups_stack.append(group)
def render_groups_headers(self):
"""Renders the report headers using 'changed' definition calculated by
'calc_changed_groups'"""
# Update working values for groups
self._groups_working_values = self._groups_values
# Loops on groups to render changed ones
for group in self.report.groups:
if self._groups_changed.get(group, None) and\
group.band_header and\
group.band_header.visible:
self.force_blank_page_by_height(self.calculate_size(group.band_header.height))
self.render_band(group.band_header)
def render_groups_footers(self, force=False):
"""Renders the report footers using previous 'changed' definition calculated by
'calc_changed_groups'"""
# Loops on groups to render changed ones
for group in reversed(self.report.groups):
if force or ( self._groups_changed.get(group, None) and\
self._groups_stack and\
self._groups_stack[-1] == group ):
if group.band_footer and group.band_footer.visible:
self.force_blank_page_by_height(self.calculate_size(group.band_footer.height))
self.render_band(group.band_footer)
if self._groups_stack:
self._groups_working_values.pop(self._groups_stack[-1])
self._groups_stack.pop()
def get_current_queryset(self):
"""Returns the current queryset. This solves a problem with subreports
footers and headers, and solves also flexibility and customization issues."""
# Customized and SubReports
if self._current_queryset is not None:
return self._current_queryset
# Groups
elif self._groups_stack:
return self.get_objects_in_group()
# Defaul detail driver queryset
return self.report.queryset
def get_objects_in_group(self):
"""Returns objects filtered in the current group or all if there is no
group"""
filter_dict = dict([(group.attribute_name, value) for group, value in self._groups_working_values.items()])
return filter(filter_object, self.report.queryset)
# SubReports
def render_subreports(self):
"""Renders subreports bands for the current object in, usings its
own queryset.
For a while just the detail band is rendered. Maybe in future we
change this to accept header and footer."""
for subreport in self.report.subreports:
# Subreports must have detail band
if not subreport.band_detail or not subreport.visible:
continue
# Sets the parent object and automatically clear the queryset
# in memory
subreport.parent_object = self._current_object
# Sets the temporary currenty queryset
self._current_queryset = subreport.get_objects_list()
# Loops objects
for num, obj in enumerate(subreport.get_objects_list()):
# Renders the header band
if num == 0 and subreport.band_header:
# Forces new page if there is no available space
force_new_page(subreport.band_header.height)
# Renders the header band
if subreport.band_header.visible:
self.render_band(subreport.band_header)
# Forces new page if there is no available space
force_new_page(subreport.band_detail.height)
# Renders the detail band
if subreport.band_detail.visible:
self.render_band(subreport.band_detail, current_object=obj)
# Renders the footer band
if subreport.band_footer:
# Forces new page if there is no available space
force_new_page(subreport.band_footer.height)
# Renders the header band
if subreport.band_footer.visible:
self.render_band(subreport.band_footer)
# Sets back the default currenty queryset
self._current_queryset = None
def make_paragraph_style(self, band, style=None):
"""Merge report default_style + band default_style + widget style"""
raise Exception('Not implemented')
| 41.25447 | 139 | 0.615831 | from geraldo.utils import get_attr_value, calculate_size
from geraldo.widgets import Widget, Label, SystemField
from geraldo.graphics import Graphic, RoundRect, Rect, Line, Circle, Arc,\
Ellipse, Image
class ReportPage(object):
rect = None
elements = None
width = None
def __init__(self):
self.elements = []
class ReportGenerator(object):
"""A report generator is used to generate a report to a specific format."""
_is_first_page = True
_is_latest_page = True
_current_top_position = 0
_current_left_position = 0
_current_page_number = 0
_current_object = None
_current_queryset = None
_generation_datetime = None
# Groupping
_groups_values = None
_groups_working_values = None
_groups_changed = None
_groups_stack = None
# The rendered report has pages, each page is a ReportPage instance
_rendered_pages = None
_page_rect = None
def __init__(self, report):
"""This method should be overrided to receive others arguments"""
self.report = report
# Initializes some attributes
self._rendered_pages = []
self._groups_values = {}
self._groups_working_values = {}
self._groups_changed = {}
self._groups_stack = []
def execute(self):
"""This method must be overrided to execute the report generation."""
# Initializes pages
self._is_first_page = True
def render_border(self, borders_dict, rect_dict):
"""Renders a border in the coordinates setted in the rect."""
b_all = borders_dict.get('all', None)
if b_all:
graphic = isinstance(b_all, Graphic) and b_all or Rect()
graphic.set_rect(
left=rect_dict['left'],
top=rect_dict['top'] - rect_dict['height'],
width=rect_dict['right'] - rect_dict['left'],
height=rect_dict['height'],
)
self._rendered_pages[-1].elements.append(graphic)
b_left = borders_dict.get('left', None)
if b_left:
graphic = isinstance(b_left, Graphic) and b_left or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['top'],
right=rect_dict['left'], bottom=rect_dict['bottom']
)
self._rendered_pages[-1].elements.append(graphic)
b_top = borders_dict.get('top', None)
if b_top:
graphic = isinstance(b_top, Graphic) and b_top or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['top'],
right=rect_dict['right'], bottom=rect_dict['top']
)
self._rendered_pages[-1].elements.append(graphic)
b_right = borders_dict.get('right', None)
if b_right:
graphic = isinstance(b_right, Graphic) and b_right or Line()
graphic.set_rect(
left=rect_dict['right'], top=rect_dict['top'],
right=rect_dict['right'], bottom=rect_dict['bottom']
)
self._rendered_pages[-1].elements.append(graphic)
b_bottom = borders_dict.get('bottom', None)
if b_bottom:
graphic = isinstance(b_right, Graphic) and b_right or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['bottom'],
right=rect_dict['right'], bottom=rect_dict['bottom']
)
self._rendered_pages[-1].elements.append(graphic)
def make_band_rect(self, band, top_position, left_position):
"""Returns the right band rect on the PDF canvas"""
band_rect = {
'left': left_position, #self.report.margin_left,
'top': top_position,
'right': left_position + self.calculate_size(band.width), #self.report.page_size[0] - self.report.margin_right,
'bottom': top_position - self.calculate_size(band.height),
'height': self.calculate_size(band.height),
}
return band_rect
def render_band(self, band, top_position=None, left_position=None,
update_top=True, current_object=None):
"""Generate a band having the current top position or informed as its
top coordinate"""
# Sets the current object
current_object = current_object or self._current_object
# Page width. This should be done in a metaclass in Report domain TODO
self._rendered_pages[-1].width = self.calculate_size(self.report.page_size[0]) -\
self.calculate_size(self.report.margin_left) - self.calculate_size(self.report.margin_right)
# Default value for band width
band.width = self.calculate_size(band.width) or self._rendered_pages[-1].width
# Coordinates
left_position = left_position or self.get_left_pos()
# Increases the top position when being an inline displayed detail band
if left_position > self.calculate_size(self.report.margin_left) and\
getattr(band, 'display_inline', False) and\
band.width < self.get_available_width():
temp_height = band.height + getattr(band, 'margin_top', 0) + getattr(band, 'margin_bottom', 0)
self.update_top_pos(decrease=self.calculate_size(temp_height))
else:
self.update_left_pos(set=0)
left_position = self.get_left_pos()
temp_top = top_position = top_position or self.get_top_pos()
# Calculates the band dimensions on the canvas
band_rect = self.make_band_rect(band, top_position, left_position)
# Band borders
self.render_border(band.borders, band_rect)
# Variable that stores the highest height at all elements
highest_height = 0
# Loop at band widgets
for element in band.elements:
# Doesn't render not visible element
if not element.visible:
continue
# Widget element
if isinstance(element, Widget):
widget = element.clone()
# Set widget colors
widget.font_color = self.report.default_font_color
# Set widget basic attributes
widget.instance = current_object
widget.generator = self
widget.report = self.report # This should be done by a metaclass in Band domain TODO
widget.band = band # This should be done by a metaclass in Band domain TODO
widget.page = self._rendered_pages[-1]
if isinstance(widget, SystemField):
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top))
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.height)
elif isinstance(widget, Label):
widget.para = self.make_paragraph(widget.text, self.make_paragraph_style(band, widget.style))
if widget.truncate_overflow:
self.keep_in_frame(
widget,
self.calculate_size(widget.width),
self.calculate_size(widget.height),
[widget.para],
mode='truncate',
)
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top), self.calculate_size(widget.height))
else:
self.wrap_paragraph_on(widget.para, self.calculate_size(widget.width), self.calculate_size(widget.height))
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top), self.calculate_size(widget.para.height))
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.para.height)
else:
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.height)
# Sets element height as the highest
if temp_height > highest_height:
highest_height = temp_height
self._rendered_pages[-1].elements.append(widget)
# Graphic element
elif isinstance(element, Graphic):
graphic = element.clone()
# Set widget basic attributes
graphic.instance = current_object
graphic.generator = self
graphic.report = self.report # This should be done by a metaclass in Band domain TODO
graphic.band = band # This should be done by a metaclass in Band domain TODO
graphic.page = self._rendered_pages[-1]
# Set graphic colors
graphic.fill_color = graphic.fill_color or self.report.default_fill_color
graphic.stroke_color = graphic.stroke_color or self.report.default_stroke_color
if isinstance(graphic, RoundRect):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
elif isinstance(graphic, Rect):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
elif isinstance(graphic, Line):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Circle):
graphic.left_center = band_rect['left'] + self.calculate_size(graphic.left_center)
graphic.top_center = top_position - self.calculate_size(graphic.top_center)
elif isinstance(graphic, Arc):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Ellipse):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Image):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
# Sets element height as the highest
temp_height = self.calculate_size(element.top) + self.calculate_size(graphic.height)
if temp_height > highest_height:
highest_height = temp_height
self._rendered_pages[-1].elements.append(graphic)
# Updates top position
if update_top:
if band.auto_expand_height:
band_height = highest_height
else:
band_height = self.calculate_size(band.height)
band_height += self.calculate_size(getattr(band, 'margin_top', 0))
band_height += self.calculate_size(getattr(band, 'margin_bottom', 0))
self.update_top_pos(band_height)
# Updates left position
if getattr(band, 'display_inline', False):
self.update_left_pos(band.width + self.calculate_size(getattr(band, 'margin_right', 0)))
else:
self.update_left_pos(set=0)
# Child bands
for child_band in band.child_bands or []: # TODO This "or []" here is a quickfix
# Doesn't generate if it is not visible
if not child_band.visible:
continue
self.force_blank_page_by_height(self.calculate_size(child_band.height))
self.render_band(child_band)
def force_blank_page_by_height(self, height):
"""Check if the height is in client available report height and
makes a new page if necessary"""
if self.get_available_height() < height:
self.force_new_page()
def force_new_page(self, insert_new_page=True):
"""Starts a new blank page"""
# Ends the current page
self._current_top_position = 0
# Creates the new page
if insert_new_page:
self._rendered_pages.append(ReportPage())
# Starts a new one
self.start_new_page()
# Page footer
self.render_page_footer()
def render_begin(self):
"""Renders the report begin band if it exists"""
if not self.report.band_begin:
return
# Doesn't generate this band if it is not visible
if not self.report.band_begin.visible:
return
# Call method that print the band area and its widgets
self.render_band(self.report.band_begin)
def render_summary(self):
"""Generate the report summary band if it exists"""
if not self.report.band_summary:
return
# Doesn't generate this band if it is not visible
if not self.report.band_summary.visible:
return
# Clears groups stack
self._groups_stack = []
# Check to force new page if there is no available space
self.force_blank_page_by_height(self.calculate_size(self.report.band_summary.height))
# Call method that print the band area and its widgets
self.render_band(self.report.band_summary)
def render_page_header(self):
"""Generate the report page header band if it exists"""
if not self.report.band_page_header:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_header.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_header,
top_position=self.calculate_size(self.report.margin_top),
update_top=False,
)
def render_page_footer(self):
"""Generate the report page footer band if it exists"""
if not self.report.band_page_footer:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_footer.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_footer,
top_position=self.calculate_size(self.report.page_size[1]) -\
self.calculate_size(self.report.margin_bottom) -\
self.calculate_size(self.report.band_page_footer.height),
update_top=False,
)
def render_end_current_page(self):
"""Closes the current page, using page breaker constant. Everything done after
this will draw into a new page. Before this, using the generate_page_footer
method to draw the footer"""
self.render_page_footer()
if self._is_latest_page:
self.render_summary()
self._current_page_number += 1
self._is_first_page = False
self.update_top_pos(set=0) # <---- update top position
def render_bands(self):
"""Loops into the objects list to create the report pages until the end"""
# Preparing local auxiliar variables
self._current_page_number = 0
self._current_object_index = 0
objects = self.report.get_objects_list()
# just an alias to make it easier
d_band = self.report.band_detail
# Empty report
if self.report.print_if_empty and not objects:
self.start_new_page()
self.render_begin()
self.render_end_current_page()
# Loop for pages
while self._current_object_index < len(objects):
# Starts a new page and generates the page header band
self.start_new_page()
first_object_on_page = True
# Generate the report begin band
if self._current_page_number == 0:
self.render_begin()
# Does generate objects if there is no details band
if not d_band:
self._current_object_index = len(objects)
# Loop for objects to go into grid on current page
while self._current_object_index < len(objects):
# Get current object from list
self._current_object = objects[self._current_object_index]
# Renders group bands for changed values
self.calc_changed_groups(first_object_on_page)
if not first_object_on_page:
self.render_groups_footers()
self.render_groups_headers()
# Generate this band only if it is visible
if d_band.visible:
self.render_band(d_band)
# Renders subreports
self.render_subreports()
# Next object
self._current_object_index += 1
first_object_on_page = False
# Break this if this page doesn't suppport nothing more...
# ... if there is no more available height
if self.get_available_height() < self.calculate_size(d_band.height):
# right margin is not considered to calculate the necessary space
d_width = self.calculate_size(d_band.width) + self.calculate_size(getattr(d_band, 'margin_left', 0))
# ... and this is not an inline displayed detail band or there is no width available
if not getattr(d_band, 'display_inline', False) or self.get_available_width() < d_width:
break
# ... or this band forces a new page and this is not the last object in objects list
elif d_band.force_new_page and self._current_object_index < len(objects):
break
# Sets this is the latest page or not
self._is_latest_page = self._current_object_index >= len(objects)
# Renders the finish group footer bands
if self._is_latest_page:
self.calc_changed_groups(False)
self.render_groups_footers(force=True)
# Ends the current page, printing footer and summary and necessary
self.render_end_current_page()
# Breaks if this is the latest item
if self._is_latest_page:
break
# Increment page number
self._current_page_number += 1
def start_new_page(self, with_header=True):
"""Do everything necessary to be done to start a new page"""
self._rendered_pages.append(ReportPage())
if with_header:
self.render_page_header()
# Page borders
if self.report.borders:
if not self._page_rect:
self._page_rect = self.report.get_page_rect()
self._page_rect['top'] = self.calculate_size(self.report.page_size[1]) - self._page_rect['top']
self._page_rect['bottom'] = self.calculate_size(self.report.page_size[1]) - self._page_rect['bottom']
self.render_border(self.report.borders, self._page_rect)
def calculate_size(self, size):
"""Uses the function 'calculate_size' to calculate a size"""
return calculate_size(size)
def get_left_pos(self):
"""Returns the left position of the drawer. Is useful on inline displayed detail bands"""
return self.calculate_size(self.report.margin_left) + self._current_left_position
def get_available_width(self):
return self.calculate_size(self.report.page_size[0]) - self.calculate_size(self.report.margin_left) -\
self.calculate_size(self.report.margin_right) - self._current_left_position
def calculate_top(self, *args):
return sum(args)
def get_top_pos(self):
"""We use this to use this to get the current top position,
considering also the top margin."""
ret = self.calculate_size(self.report.margin_top) + self._current_top_position
if self.report.band_page_header:
ret += self.calculate_size(self.report.band_page_header.height)
return ret
def get_available_height(self):
"""Returns the available client height area from the current top position
until the end of page, considering the bottom margin."""
ret = self.calculate_size(self.report.page_size[1]) - self.calculate_size(self.report.margin_bottom) -\
self.calculate_size(self.report.margin_top) - self._current_top_position
if self.report.band_page_header:
ret -= self.calculate_size(self.report.band_page_header.height)
if self.report.band_page_footer:
ret -= self.calculate_size(self.report.band_page_footer.height)
return ret
def update_top_pos(self, increase=0, decrease=0, set=None):
"""Updates the current top position controller, increasing (by default),
decreasing or setting it with a new value."""
if set is not None:
self._current_top_position = set
else:
self._current_top_position += increase
self._current_top_position -= decrease
return self._current_top_position
def update_left_pos(self, increase=0, decrease=0, set=None):
"""Updates the current left position controller, increasing (by default),
decreasing or setting it with a new value."""
if set is not None:
self._current_left_position = set
else:
self._current_left_position += increase
self._current_left_position -= decrease
return self._current_left_position
def get_page_count(self):
"""Calculate and returns the page count for this report. The challenge
here is do this calculate before to generate the pages."""
return len(self._rendered_pages)
def make_paragraph(self, text, style=None):
"""Uses the Paragraph class to return a new paragraph object"""
raise Exception('Not implemented')
def wrap_paragraph_on(self, paragraph, width, height):
"""Wraps the paragraph on the height/width informed"""
raise Exception('Not implemented')
# Stylizing
def set_fill_color(self, color):
"""Sets the current fill on canvas. Used for fonts and shape fills"""
pass
def set_stroke_color(self, color):
"""Sets the current stroke on canvas"""
pass
def set_stroke_width(self, width):
"""Sets the stroke/line width for shapes"""
pass
# Groups topic
def calc_changed_groups(self, force_no_changed=False):
"""Defines which groups has been changed their driver values to be
used to render group bands"""
changed = force_no_changed
# Stores the previous group values
self._groups_working_values = self._groups_values.copy()
# Loops on groups until find the first changed, then all under it are considered
# changed also
for group in self.report.groups:
# Gets the current value to compare with the old one
current_value = get_attr_value(self._current_object, group.attribute_name)
# Set changed as True if if wasn't and there is a change
changed = changed or current_value != self._groups_values.get(group, None)
# Stores new values
self._groups_changed[group] = changed
self._groups_values[group] = current_value
# Appends to the stack
if changed:
self._groups_stack.append(group)
def render_groups_headers(self):
"""Renders the report headers using 'changed' definition calculated by
'calc_changed_groups'"""
# Update working values for groups
self._groups_working_values = self._groups_values
# Loops on groups to render changed ones
for group in self.report.groups:
if self._groups_changed.get(group, None) and\
group.band_header and\
group.band_header.visible:
self.force_blank_page_by_height(self.calculate_size(group.band_header.height))
self.render_band(group.band_header)
def render_groups_footers(self, force=False):
"""Renders the report footers using previous 'changed' definition calculated by
'calc_changed_groups'"""
# Loops on groups to render changed ones
for group in reversed(self.report.groups):
if force or ( self._groups_changed.get(group, None) and\
self._groups_stack and\
self._groups_stack[-1] == group ):
if group.band_footer and group.band_footer.visible:
self.force_blank_page_by_height(self.calculate_size(group.band_footer.height))
self.render_band(group.band_footer)
if self._groups_stack:
self._groups_working_values.pop(self._groups_stack[-1])
self._groups_stack.pop()
def get_current_queryset(self):
"""Returns the current queryset. This solves a problem with subreports
footers and headers, and solves also flexibility and customization issues."""
# Customized and SubReports
if self._current_queryset is not None:
return self._current_queryset
# Groups
elif self._groups_stack:
return self.get_objects_in_group()
# Defaul detail driver queryset
return self.report.queryset
def get_objects_in_group(self):
"""Returns objects filtered in the current group or all if there is no
group"""
filter_dict = dict([(group.attribute_name, value) for group, value in self._groups_working_values.items()])
def filter_object(obj):
for k,v in filter_dict.items():
if get_attr_value(obj, k) != v:
return False
return obj
return filter(filter_object, self.report.queryset)
# SubReports
def render_subreports(self):
"""Renders subreports bands for the current object in, usings its
own queryset.
For a while just the detail band is rendered. Maybe in future we
change this to accept header and footer."""
def force_new_page(height):
# Forces new page if there is no available space
if self.get_available_height() < self.calculate_size(height):
self.render_page_footer()
self.force_new_page(insert_new_page=False)
for subreport in self.report.subreports:
# Subreports must have detail band
if not subreport.band_detail or not subreport.visible:
continue
# Sets the parent object and automatically clear the queryset
# in memory
subreport.parent_object = self._current_object
# Sets the temporary currenty queryset
self._current_queryset = subreport.get_objects_list()
# Loops objects
for num, obj in enumerate(subreport.get_objects_list()):
# Renders the header band
if num == 0 and subreport.band_header:
# Forces new page if there is no available space
force_new_page(subreport.band_header.height)
# Renders the header band
if subreport.band_header.visible:
self.render_band(subreport.band_header)
# Forces new page if there is no available space
force_new_page(subreport.band_detail.height)
# Renders the detail band
if subreport.band_detail.visible:
self.render_band(subreport.band_detail, current_object=obj)
# Renders the footer band
if subreport.band_footer:
# Forces new page if there is no available space
force_new_page(subreport.band_footer.height)
# Renders the header band
if subreport.band_footer.visible:
self.render_band(subreport.band_footer)
# Sets back the default currenty queryset
self._current_queryset = None
def make_paragraph_style(self, band, style=None):
"""Merge report default_style + band default_style + widget style"""
raise Exception('Not implemented')
def keep_in_frame(self, widget, width, height, paragraphs, mode):
raise Exception('Not implemented')
| 752 | 84 | 166 |
f61f403e0167f05e3a18dc9f92e6651a3bbdaa15 | 2,551 | py | Python | front-parent/python/zip-test.py | jufeng98/b2c-master | 5db75cea023c0f775a56b2a872e75e8f2fe4639f | [
"Unlicense"
] | null | null | null | front-parent/python/zip-test.py | jufeng98/b2c-master | 5db75cea023c0f775a56b2a872e75e8f2fe4639f | [
"Unlicense"
] | null | null | null | front-parent/python/zip-test.py | jufeng98/b2c-master | 5db75cea023c0f775a56b2a872e75e8f2fe4639f | [
"Unlicense"
] | null | null | null | import zipfile
try:
with zipfile.ZipFile("data/hfpy_ch5_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
fileContent = sanitize(fileContent)
players.append(sorted(fileContent.strip().split(",")))
print(players)
except IOError as e:
print("error" + str(e))
try:
with zipfile.ZipFile("data/hfpy_ch5_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
# 列表推导
player = [sanitize(content) for content in fileContent.strip().split(",")]
players.append(sorted(set(player))[0:3])
print(players)
except IOError as e:
print("error" + str(e))
try:
with zipfile.ZipFile("data/hfpy_ch6_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
infos = fileContent.strip().split(",")
player = {
"name": infos[0],
"birthday": infos[1]
}
scores = infos[2:len(infos) - 1]
# 列表推导
scores = [sanitize(content) for content in scores]
player["scores"] = sorted(set(scores))[0:3]
players.append(player)
print(players)
except IOError as e:
print("error" + str(e))
try:
with zipfile.ZipFile("data/hfpy_ch6_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
infos = fileContent.strip().split(",")
scores = infos[2:len(infos) - 1]
player = Player(infos[0], infos[1], scores)
players.append(player)
print(players)
except IOError as e:
print("error" + str(e))
jonny = NamedList("Jonny")
print(type(jonny))
print(dir(jonny))
| 28.988636 | 86 | 0.573893 | import zipfile
def sanitize(time):
return time.replace("-", ".").replace(":", ".")
try:
with zipfile.ZipFile("data/hfpy_ch5_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
fileContent = sanitize(fileContent)
players.append(sorted(fileContent.strip().split(",")))
print(players)
except IOError as e:
print("error" + str(e))
try:
with zipfile.ZipFile("data/hfpy_ch5_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
# 列表推导
player = [sanitize(content) for content in fileContent.strip().split(",")]
players.append(sorted(set(player))[0:3])
print(players)
except IOError as e:
print("error" + str(e))
try:
with zipfile.ZipFile("data/hfpy_ch6_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
infos = fileContent.strip().split(",")
player = {
"name": infos[0],
"birthday": infos[1]
}
scores = infos[2:len(infos) - 1]
# 列表推导
scores = [sanitize(content) for content in scores]
player["scores"] = sorted(set(scores))[0:3]
players.append(player)
print(players)
except IOError as e:
print("error" + str(e))
class Player:
def __init__(self, fullname, birthday, times):
self.name = fullname
self.birthday = birthday
self.scores = times
def top3(self):
return sorted(set([sanitize(content) for content in self.scores]))[0:3]
try:
with zipfile.ZipFile("data/hfpy_ch6_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
infos = fileContent.strip().split(",")
scores = infos[2:len(infos) - 1]
player = Player(infos[0], infos[1], scores)
players.append(player)
print(players)
except IOError as e:
print("error" + str(e))
class NamedList(list):
def __init__(self, a_name):
list.__init__([])
self.name = a_name
jonny = NamedList("Jonny")
print(type(jonny))
print(dir(jonny))
| 298 | -7 | 148 |
652211eac50ca7220594f6ac3e7cd5e4c7b797b6 | 671 | py | Python | nrf-rust-bootloaders/nrf52840-blinky-app/signatures/pyopenssl_keys/raw_signature_extract.py | nihalpasham/nrf-secureboot-test | f020df3e0240556571876e535d44ee3616cfe2ab | [
"Apache-2.0"
] | null | null | null | nrf-rust-bootloaders/nrf52840-blinky-app/signatures/pyopenssl_keys/raw_signature_extract.py | nihalpasham/nrf-secureboot-test | f020df3e0240556571876e535d44ee3616cfe2ab | [
"Apache-2.0"
] | null | null | null | nrf-rust-bootloaders/nrf52840-blinky-app/signatures/pyopenssl_keys/raw_signature_extract.py | nihalpasham/nrf-secureboot-test | f020df3e0240556571876e535d44ee3616cfe2ab | [
"Apache-2.0"
] | null | null | null | # Extract the raw 32 byte values of 'r and s' from OpenSSL's DER formatted signature. bytelen('r + s') == 64
from asn1crypto.core import Sequence
import binascii
raw64byte_sig = ''
with open("REAL_DERformat_openssl_gen_sig.bin", "rb") as f:
signature = f.read()
# parse the ASN.1 sequence from this signature
seq = Sequence.load(signature)
# print the native (Pythonic) representation of this ASN.1 object
dict = seq.native
for k,v in dict.items():
hexed = hex(v).strip('0x')
# print(hexed)
raw64byte_sig += hexed
# print(raw64byte_sig)
with open("REAL_raw64byte_sig_gen_from_openssl.bin", "wb") as f:
f.write(binascii.unhexlify(raw64byte_sig)) | 35.315789 | 109 | 0.724292 | # Extract the raw 32 byte values of 'r and s' from OpenSSL's DER formatted signature. bytelen('r + s') == 64
from asn1crypto.core import Sequence
import binascii
raw64byte_sig = ''
with open("REAL_DERformat_openssl_gen_sig.bin", "rb") as f:
signature = f.read()
# parse the ASN.1 sequence from this signature
seq = Sequence.load(signature)
# print the native (Pythonic) representation of this ASN.1 object
dict = seq.native
for k,v in dict.items():
hexed = hex(v).strip('0x')
# print(hexed)
raw64byte_sig += hexed
# print(raw64byte_sig)
with open("REAL_raw64byte_sig_gen_from_openssl.bin", "wb") as f:
f.write(binascii.unhexlify(raw64byte_sig)) | 0 | 0 | 0 |
a8eb81d1e70e2a1e281eb22566f4d417e0f4494c | 800 | py | Python | hierarchy_astar_uavs/hierarchy_astar_uavs/grid_search_astar.py | jn89b/hierarchy_astar_uavs | 4714260fc1b1d8c431a5ef2f259503918b90b2ed | [
"MIT"
] | null | null | null | hierarchy_astar_uavs/hierarchy_astar_uavs/grid_search_astar.py | jn89b/hierarchy_astar_uavs | 4714260fc1b1d8c431a5ef2f259503918b90b2ed | [
"MIT"
] | null | null | null | hierarchy_astar_uavs/hierarchy_astar_uavs/grid_search_astar.py | jn89b/hierarchy_astar_uavs | 4714260fc1b1d8c431a5ef2f259503918b90b2ed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Do grid search only for uavs
should save the configurations of the uavs from hiearchy search
send this configuration to the Astar
"""
from __future__ import print_function
import numpy as np
import math as m
import random
from Astar import Astar, AstarGraph, AstarLowLevel
import itertools
from itertools import combinations, permutations, product
import time
import gc
import heapq
import pickle
from operator import add, sub
def load_pkl_map(pkl_file_name):
"""returns the annotated map from a pkl file"""
with open(map_pkl_name, 'rb') as f:
annotated_map = pickle.load(f)
return annotated_map
if __name__=='__main__':
##let's just load the map to save us time
map_pkl_name = 'map_test.pkl'
annoted_map = load_pkl_map()
| 22.857143 | 63 | 0.73625 | # -*- coding: utf-8 -*-
"""
Do grid search only for uavs
should save the configurations of the uavs from hiearchy search
send this configuration to the Astar
"""
from __future__ import print_function
import numpy as np
import math as m
import random
from Astar import Astar, AstarGraph, AstarLowLevel
import itertools
from itertools import combinations, permutations, product
import time
import gc
import heapq
import pickle
from operator import add, sub
def load_pkl_map(pkl_file_name):
"""returns the annotated map from a pkl file"""
with open(map_pkl_name, 'rb') as f:
annotated_map = pickle.load(f)
return annotated_map
if __name__=='__main__':
##let's just load the map to save us time
map_pkl_name = 'map_test.pkl'
annoted_map = load_pkl_map()
| 0 | 0 | 0 |
afc60914fa9f7d74df0ed7e2213416b1dd04c15e | 1,328 | py | Python | tikup/argparser.py | gabefair/TikUp | ff6fa9df19c04a91a416e74d9f71be49cf3f1142 | [
"MIT"
] | 148 | 2020-06-05T23:27:42.000Z | 2022-03-24T10:25:41.000Z | tikup/argparser.py | gabefair/TikUp | ff6fa9df19c04a91a416e74d9f71be49cf3f1142 | [
"MIT"
] | 40 | 2020-06-05T21:17:20.000Z | 2022-02-17T23:22:48.000Z | tikup/argparser.py | gabefair/TikUp | ff6fa9df19c04a91a416e74d9f71be49cf3f1142 | [
"MIT"
] | 22 | 2020-07-08T05:44:02.000Z | 2022-02-18T03:33:45.000Z | from argparse import ArgumentParser
| 34.947368 | 108 | 0.636295 | from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser(description="An auto downloader and uploader for TikTok videos.")
parser.add_argument("user")
parser.add_argument(
"--no-delete", action="store_false", help="don't delete files once uploaded to the Internet Archive"
)
parser.add_argument(
"--hashtag", action="store_true", help="download this hashtag"
)
parser.add_argument(
"--limit", help="set limit on amount of TikToks to download"
)
parser.add_argument(
"--use-download-archive",
action="store_true",
help=(
"record the video url to the download archive. "
"This will download only videos not listed in the archive file. "
"Record the IDs of all downloaded videos in it."
),
)
parser.add_argument(
"--id", action="store_true", help="download this video ID"
)
parser.add_argument(
"--liked", action="store_true", help="download this user's liked posts"
)
parser.add_argument(
"--folder", help="set download destination (default: ~/.tikup)"
)
parser.add_argument(
"--no-upload", action="store_false", help="turn off uploading to the Internet Archive"
)
args = parser.parse_args()
return args
| 1,269 | 0 | 23 |
243aa2e3cfe9a344e489882a124e7c3032fb2945 | 466 | py | Python | Python/math/Squares_in_2n_Chessboard.py | yashsahay2014/NeoAlgo | 4f1e5bdd6d9d899fa354de94740e0aecf5ecd2be | [
"MIT"
] | 897 | 2020-06-25T00:12:52.000Z | 2022-03-24T00:49:31.000Z | Python/math/Squares_in_2n_Chessboard.py | adarshnjena/NeoAlgo | 77a92858d2bf970054ef31c2f55a6d79917a786a | [
"MIT"
] | 5,707 | 2020-06-24T17:53:28.000Z | 2022-01-22T05:03:15.000Z | Python/math/Squares_in_2n_Chessboard.py | adarshnjena/NeoAlgo | 77a92858d2bf970054ef31c2f55a6d79917a786a | [
"MIT"
] | 1,817 | 2020-06-25T03:51:05.000Z | 2022-03-29T05:14:07.000Z | #Find total number of Squares in a N*N cheesboard.
# using mathematical logic
#taking input
num = int(input("Enter the number :"))
obj = Solution()
print("The square : ")
print(obj.squares(num))
'''
Time complexity : O(N)
Space complexity : O(1)
Input :
Enter the number : 1
Output :
The square is : 1
''' | 17.259259 | 50 | 0.585837 | #Find total number of Squares in a N*N cheesboard.
class Solution:
# using mathematical logic
def squares(self, N):
sum=0
for i in range(1,N+1):
s=i*i
sum+=s
return sum
#taking input
num = int(input("Enter the number :"))
obj = Solution()
print("The square : ")
print(obj.squares(num))
'''
Time complexity : O(N)
Space complexity : O(1)
Input :
Enter the number : 1
Output :
The square is : 1
''' | 110 | -6 | 49 |
c9579a0aadf1385931d1d9db24aac5d3531e8a9d | 3,660 | py | Python | nlplr/utils/context_preview/marker.py | jwiltfang/nlp-labeling-repair | 5ba490cb5931d36e30807ff81ce56287876cd67c | [
"MIT"
] | 1 | 2021-11-17T11:25:36.000Z | 2021-11-17T11:25:36.000Z | nlplr/utils/context_preview/marker.py | jwiltfang/nlp-labeling-repair | 5ba490cb5931d36e30807ff81ce56287876cd67c | [
"MIT"
] | null | null | null | nlplr/utils/context_preview/marker.py | jwiltfang/nlp-labeling-repair | 5ba490cb5931d36e30807ff81ce56287876cd67c | [
"MIT"
] | null | null | null | import fitz
from typing import Dict, List
import logging
logger = logging.getLogger(__name__)
| 40.666667 | 119 | 0.605191 | import fitz
from typing import Dict, List
import logging
logger = logging.getLogger(__name__)
class Marker:
def __init__(self):
self.doc = None
self.rect_size_add = (-20, -10, 20, 10)
self.point_move = (20, 10)
self.stroke_color = (0, 0, 1)
self.fill_color_correct = (0, 1, 0)
self.fill_color_incorrect = (1, 0, 0)
self.text_correct = 'correct value (stays unchanged)'
self.text_incorrect = 'incorrect value (is changed)'
self.icon = 'Help'
self.setting = {'rect_size_add': (-20, -10, 20, 10),
'point_move': (20, 10),
'stroke_color': (0, 0, 1),
'fill_color_correct': (0, 1, 0),
'fill_color_incorrect': (1, 0, 0),
'text_incorrect': 'correct value (stays unchanged)',
'text_correct': 'incorrect value (is changed)',
'icon': 'Help'}
def load_pdf(self, filepath: str):
"""open file to work with pdf in fitz"""
self.doc = fitz.open(filepath)
def save_pdf(self, save_location: str):
self.doc.save(save_location)
self.doc.close()
return save_location
def mark_words(self, selected_words: Dict[str, List[str]]):
logger.info(f'preview for {selected_words}')
for page in self.doc:
# correct word
for word in selected_words['correct']:
rect, point = self.get_word_geometry(page, word)
if rect:
self.mark_correct_word(page, rect, point)
# incorrect word
for word in selected_words['incorrect']:
rect, point = self.get_word_geometry(page, word)
if rect:
self.mark_incorrect_word(page, rect, point)
def get_word_geometry(self, page, word):
"""return left_upper_point for alignment"""
try:
quads = page.search_for(word, quads=True)[0] # quads returns the points of each corner of the element
rect = quads.rect + self.rect_size_add # increase size
point = quads[3] + self.point_move # moved to the side of the element
return rect, point
except IndexError as ie:
logger.error(f'Index Error: {ie}')
return None, None
def mark_correct_word(self, page, rect, point):
annot1 = self._add_text_annot(page, point, self.text_correct)
annot2 = self._add_rect_annot(page, rect, stroke_color=self.stroke_color, fill_color=self.fill_color_correct)
def mark_incorrect_word(self, page, rect, point):
annot1 = self._add_text_annot(page, point, self.text_incorrect)
annot2 = self._add_rect_annot(page, rect, stroke_color=self.stroke_color, fill_color=self.fill_color_incorrect)
@staticmethod
def _add_text_annot(page, point, text_value, icon='Help'):
annot = page.addTextAnnot(point, text_value, icon=icon)
return annot
@staticmethod
def _add_rect_annot(page, rect, width=1, dashes=(1, 2), stroke_color=(0, 0, 1), fill_color=(0, 0, 1), opacity=0.5):
annot = page.addRectAnnot(rect)
annot.set_border(width=width, dashes=dashes)
annot.set_colors(stroke=stroke_color, fill=fill_color)
annot.update(opacity=opacity)
return annot
@staticmethod
def _add_freetext_annot(page, rect, text_value, alignment=(100, 0, 100, 0)):
"""align freetext to the right"""
freetext_rect = rect + alignment
annot = page.addFreetextAnnot(freetext_rect, text_value)
return annot | 2,410 | 1,131 | 23 |
dd6b3de66e11cb0ef4026b7ac64027ff9e5b4605 | 2,010 | py | Python | bind/libevt/python_api/test_abi.py | ZizhouJia/evt_python_api | 4c0a62a80a1d6383e0bd7610ddec261acf437f21 | [
"MIT"
] | null | null | null | bind/libevt/python_api/test_abi.py | ZizhouJia/evt_python_api | 4c0a62a80a1d6383e0bd7610ddec261acf437f21 | [
"MIT"
] | null | null | null | bind/libevt/python_api/test_abi.py | ZizhouJia/evt_python_api | 4c0a62a80a1d6383e0bd7610ddec261acf437f21 | [
"MIT"
] | null | null | null | #encoding=utf-8
import evt_abi
abi=evt_abi.evt_abi()
j=r'''
{
"name": "test",
"issuer": "EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"issue": {
"name": "issue",
"threshold": 1,
"authorizers": [{
"ref": "[A] EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"weight": 1
}]
},
"transfer": {
"name": "transfer",
"threshold": 1,
"authorizers": [{
"ref": "[G] OWNER",
"weight": 1
}]
},
"manage": {
"name": "manage",
"threshold": 1,
"authorizers": [{
"ref": "[A] EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"weight": 1
}]
}
}
"transfer": {
"name": "transfer",
"threshold": 1,
"authorizers": [{
"ref": "[G] OWNER",
"weight": 1
}]
},
"manage": {
"name": "manage",
"threshold": 1,
"authorizers": [{
"ref": "[A] EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"weight": 1
}]
}
}
'''
j2=r'''
{
"expiration": "2018-05-20T12:25:51",
"ref_block_num": 8643,
"ref_block_prefix": 842752750,
"delay_sec": 0,
"actions": [
{
"name": "newdomain",
"domain": "domain",
"key": "test2",
"data": "000000000000000000000000109f077d0003c7e3ff0060d848bd31bf53daf1d5fed7d82c9b1121394ee15dcafb07e913a9700000000000a5317601000000010100000003c7e3ff0060d848bd31bf53daf1d5fed7d82c9b1121394ee15dcafb07e913a9706d4859000000000100000000572d3ccdcd010000000102000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000002866a69101000000010100000003c7e3ff0060d848bd31bf53daf1d5fed7d82c9b1121394ee15dcafb07e913a9706d4859000000000100"
}
],
"transaction_extensions": []
}
'''
bin=abi.evt_abi_json_to_bin("newdomain",j)
json=abi.evt_abi_bin_to_json("newdomain",bin)
print(bin.data)
print(json)
chain_id=abi.evt_chain_id_from_string("bb248d6319e51ad38502cc8ef8fe607eb5ad2cd0be2bdc0e6e30a506761b8636")
digest=abi.evt_trx_json_to_digest(j2, chain_id)
print(chain_id)
print(digest)
| 25.125 | 472 | 0.688557 | #encoding=utf-8
import evt_abi
abi=evt_abi.evt_abi()
j=r'''
{
"name": "test",
"issuer": "EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"issue": {
"name": "issue",
"threshold": 1,
"authorizers": [{
"ref": "[A] EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"weight": 1
}]
},
"transfer": {
"name": "transfer",
"threshold": 1,
"authorizers": [{
"ref": "[G] OWNER",
"weight": 1
}]
},
"manage": {
"name": "manage",
"threshold": 1,
"authorizers": [{
"ref": "[A] EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"weight": 1
}]
}
}
"transfer": {
"name": "transfer",
"threshold": 1,
"authorizers": [{
"ref": "[G] OWNER",
"weight": 1
}]
},
"manage": {
"name": "manage",
"threshold": 1,
"authorizers": [{
"ref": "[A] EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"weight": 1
}]
}
}
'''
j2=r'''
{
"expiration": "2018-05-20T12:25:51",
"ref_block_num": 8643,
"ref_block_prefix": 842752750,
"delay_sec": 0,
"actions": [
{
"name": "newdomain",
"domain": "domain",
"key": "test2",
"data": "000000000000000000000000109f077d0003c7e3ff0060d848bd31bf53daf1d5fed7d82c9b1121394ee15dcafb07e913a9700000000000a5317601000000010100000003c7e3ff0060d848bd31bf53daf1d5fed7d82c9b1121394ee15dcafb07e913a9706d4859000000000100000000572d3ccdcd010000000102000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000002866a69101000000010100000003c7e3ff0060d848bd31bf53daf1d5fed7d82c9b1121394ee15dcafb07e913a9706d4859000000000100"
}
],
"transaction_extensions": []
}
'''
bin=abi.evt_abi_json_to_bin("newdomain",j)
json=abi.evt_abi_bin_to_json("newdomain",bin)
print(bin.data)
print(json)
chain_id=abi.evt_chain_id_from_string("bb248d6319e51ad38502cc8ef8fe607eb5ad2cd0be2bdc0e6e30a506761b8636")
digest=abi.evt_trx_json_to_digest(j2, chain_id)
print(chain_id)
print(digest)
| 0 | 0 | 0 |
949cb3326f5275a2def522326a1a985a254afcbf | 425 | py | Python | csv/demo2.py | silianpan/seal-spider-demo | 23bf013d08f9edaf23823bc3787f579bccd0ec3a | [
"Apache-2.0"
] | null | null | null | csv/demo2.py | silianpan/seal-spider-demo | 23bf013d08f9edaf23823bc3787f579bccd0ec3a | [
"Apache-2.0"
] | 3 | 2021-09-08T01:11:16.000Z | 2022-03-02T15:14:03.000Z | csv/demo2.py | silianpan/seal-spider-demo | 23bf013d08f9edaf23823bc3787f579bccd0ec3a | [
"Apache-2.0"
] | 1 | 2019-08-04T09:57:29.000Z | 2019-08-04T09:57:29.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-13 14:52
# @Author : liupan
# @Site :
# @File : demo2.py
# @Software: PyCharm
import csv
with open('data.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
writer.writerow(['id', 'name', 'age'])
writer.writerow(['10001', 'Mike', 20])
writer.writerow(['10002', 'Bob', 22])
writer.writerow(['10003', 'Jordan', 21]) | 26.5625 | 47 | 0.581176 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-13 14:52
# @Author : liupan
# @Site :
# @File : demo2.py
# @Software: PyCharm
import csv
with open('data.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
writer.writerow(['id', 'name', 'age'])
writer.writerow(['10001', 'Mike', 20])
writer.writerow(['10002', 'Bob', 22])
writer.writerow(['10003', 'Jordan', 21]) | 0 | 0 | 0 |
985f2d5897f8575d465cd87d1c72d449ce7781df | 15,150 | py | Python | plugins/dbnd-spark/src/dbnd_spark/spark_targets/spark_histograms.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | plugins/dbnd-spark/src/dbnd_spark/spark_targets/spark_histograms.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | plugins/dbnd-spark/src/dbnd_spark/spark_targets/spark_histograms.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import logging
import os
import time
import typing
from collections import defaultdict
import pyspark.sql as spark
from pyspark import Row
from pyspark.sql.functions import (
approx_count_distinct,
col,
count,
countDistinct,
desc,
floor,
isnull,
lit,
when,
)
from pyspark.sql.types import (
ArrayType,
BooleanType,
MapType,
NumericType,
StringType,
StructType,
)
from dbnd._core.settings.histogram import HistogramConfig
from dbnd._core.utils import seven
if typing.TYPE_CHECKING:
from typing import Tuple, Dict, List
from targets.value_meta import ValueMetaConf
from pyspark.sql.dataframe import DataFrame
logger = logging.getLogger(__name__)
class SparkHistograms(object):
"""
calculates histograms and stats on spark dataframe.
they're calculated together since we do it per column and we use cache.
"""
def _cache_df_with_parquet_store(self, df, spark_parquet_cache_dir):
""" save dataframe as column-based parquet file to allow fast column queries which histograms depend on """
from dbnd_spark.spark_targets import SparkDataFrameValueType
signature = SparkDataFrameValueType().to_signature(df)
file_name = "dbnd_spark_dataframe_{}.parquet".format(signature)
path = os.path.join(spark_parquet_cache_dir, file_name)
self._temp_parquet_path = path
logger.info("Caching spark dataframe into '%s'.", path)
df.write.parquet(path)
logger.info("Reading spark dataframe from '%s'.", path)
df = df.sql_ctx.sparkSession.read.parquet(path)
return df
def _is_count_in_summary(self, dataframe, column_name):
""" dataframe.summary() returns count only for numeric and string types, otherwise we need to calculate it our own """
column_field = [f for f in dataframe.schema.fields if f.name == column_name][0]
return isinstance(column_field.dataType, (NumericType, StringType))
def _convert_numeric_histogram_collect_to_tuple(
self, value_counts, min_value, max_value
):
# type: (List[Row], float, float) -> Tuple
"""
value_counts is list of rows with each row representing a bucket.
each bucket has: bucket index and number of values.
we convert it to histogram represented as a tuple of 2 lists:
number of values in bucket and bucket boundaries.
"""
bucket_count = 20
counts = [0] * bucket_count
bucket_size = (max_value - min_value) / bucket_count
values = [min_value + i * bucket_size for i in range(bucket_count + 1)]
for row in value_counts:
bucket, count = row
if bucket is None:
continue
if bucket == bucket_count:
# handle edge of last bucket (values equal to max_value will be in bucket n+1 instead of n)
bucket = bucket - 1
counts[bucket] += count
return counts, values
def _calc_spark_categorical_hist_and_stats(self, column_df_list):
"""
all columns in column_df_list should have the same type (e.g. all should be booleans or all strings).
it might not be relevant anymore since we do collect() per column.
keeping it that way for now so we could change it back to collect() once for all columns.
"""
max_buckets = 50
value_counts = []
for column_df in column_df_list:
if self.config.spark_cache_dataframe_column:
column_df_cached = True
column_df.cache()
else:
column_df_cached = False
try:
column_name = column_df.schema.names[0]
self._calc_categorical_column_stats(column_df, column_name)
if column_name not in self._histogram_column_names:
continue
column_value_counts = (
column_df.groupby(column_name)
.count()
.orderBy(desc("count"))
.withColumn("column_name", lit(column_name))
.limit(max_buckets - 1)
)
column_value_counts = column_value_counts.collect()
value_counts.extend(column_value_counts)
finally:
if column_df_cached:
column_df.unpersist()
return value_counts
def _add_others(self, histograms):
""" sum all least significant values (who left out of histogram) to one bucket """
for column_name, histogram in histograms.items():
histogram_sum_count = sum(histogram[0])
others_count = self.stats[column_name]["count"] - histogram_sum_count
if others_count > 0:
histogram[0].append(others_count)
histogram[1].append("_others")
@seven.contextlib.contextmanager
| 38.35443 | 126 | 0.624224 | from __future__ import absolute_import
import logging
import os
import time
import typing
from collections import defaultdict
import pyspark.sql as spark
from pyspark import Row
from pyspark.sql.functions import (
approx_count_distinct,
col,
count,
countDistinct,
desc,
floor,
isnull,
lit,
when,
)
from pyspark.sql.types import (
ArrayType,
BooleanType,
MapType,
NumericType,
StringType,
StructType,
)
from dbnd._core.settings.histogram import HistogramConfig
from dbnd._core.utils import seven
if typing.TYPE_CHECKING:
from typing import Tuple, Dict, List
from targets.value_meta import ValueMetaConf
from pyspark.sql.dataframe import DataFrame
logger = logging.getLogger(__name__)
class SparkHistograms(object):
"""
calculates histograms and stats on spark dataframe.
they're calculated together since we do it per column and we use cache.
"""
def __init__(self, df, meta_conf):
self.df = df # type: DataFrame
self.meta_conf = meta_conf # type: ValueMetaConf
self.config = HistogramConfig() # type: HistogramConfig
self.system_metrics = dict()
self.stats = dict()
self.histograms = dict()
self._temp_parquet_path = None
self._histogram_column_names = None # columns to calc histograms on
self._stats_column_names = None # columns to calc stats on
def get_histograms_and_stats(self):
# type: () -> Tuple[Dict[str, Dict], Dict[str, Tuple]]
if self.stats or self.histograms:
return self.stats, self.histograms
df_cached = False
df = None
try:
df = self._filter_columns(self.df)
if self.config.spark_parquet_cache_dir:
df = self._cache_df_with_parquet_store(
df, spark_parquet_cache_dir=self.config.spark_parquet_cache_dir
)
if self.config.spark_cache_dataframe:
df_cached = True
df.cache()
with self._measure_time("histograms_and_stats_calc_time"):
self.histograms = self._calc_histograms_and_stats(df)
return self.stats, self.histograms
except Exception:
logger.exception("Error occured during histograms calculation")
return {}, {}
finally:
if self._temp_parquet_path and df:
self._remove_parquet(df.sql_ctx.sparkSession)
if df_cached:
df.unpersist()
def _cache_df_with_parquet_store(self, df, spark_parquet_cache_dir):
""" save dataframe as column-based parquet file to allow fast column queries which histograms depend on """
from dbnd_spark.spark_targets import SparkDataFrameValueType
signature = SparkDataFrameValueType().to_signature(df)
file_name = "dbnd_spark_dataframe_{}.parquet".format(signature)
path = os.path.join(spark_parquet_cache_dir, file_name)
self._temp_parquet_path = path
logger.info("Caching spark dataframe into '%s'.", path)
df.write.parquet(path)
logger.info("Reading spark dataframe from '%s'.", path)
df = df.sql_ctx.sparkSession.read.parquet(path)
return df
def _remove_parquet(self, spark_session):
# we are not able to delete generically files so we overwrite them with almost no data
empty_df = spark_session.createDataFrame([("",)], [""])
empty_df.write.parquet(self._temp_parquet_path, mode="overwrite")
def _filter_columns(self, df):
self._histogram_column_names = self._get_column_names_from_request(
df, self.meta_conf.log_histograms
)
self._stats_column_names = self._get_column_names_from_request(
df, self.meta_conf.log_stats
)
column_names = list(
set(self._histogram_column_names + self._stats_column_names)
)
df = df.select(column_names)
df = self._filter_complex_columns(df)
return df
def _get_column_names_from_request(self, df, data_request):
column_names = list(data_request.include_columns)
for column_def in df.schema:
if data_request.include_all_string and isinstance(
column_def.dataType, StringType
):
column_names.append(column_def.name)
elif data_request.include_all_boolean and isinstance(
column_def.dataType, BooleanType
):
column_names.append(column_def.name)
elif data_request.include_all_numeric and isinstance(
column_def.dataType, NumericType
):
column_names.append(column_def.name)
column_names = [
column
for column in column_names
if column not in data_request.exclude_columns
]
return column_names
def _filter_complex_columns(self, df):
simple_columns = []
for column_def in df.schema:
if isinstance(column_def.dataType, (ArrayType, MapType, StructType)):
logger.warning(
"Column %s was ignored in histogram calculation as it contains complex type (%s)",
column_def.name,
column_def.dataType,
)
continue
simple_columns.append(column_def.name)
return df.select(simple_columns)
def _is_count_in_summary(self, dataframe, column_name):
""" dataframe.summary() returns count only for numeric and string types, otherwise we need to calculate it our own """
column_field = [f for f in dataframe.schema.fields if f.name == column_name][0]
return isinstance(column_field.dataType, (NumericType, StringType))
def _calc_histograms_and_stats(self, df):
# type: (spark.DataFrame) -> Dict
histograms = dict()
df_count = df.count()
for column_schema in df.schema:
self.stats[column_schema.name] = dict(
count=df_count, type=column_schema.dataType.jsonValue()
)
with self._measure_time("boolean_histograms_and_stats_calc_time"):
boolean_histograms = self._calc_categorical_hist_and_stats_by_type(
df, BooleanType
)
histograms.update(boolean_histograms)
with self._measure_time("string_histograms_and_stats_calc_time"):
str_histograms = self._calc_categorical_hist_and_stats_by_type(
df, StringType
)
histograms.update(str_histograms)
with self._measure_time("numeric_histograms_and_stats_calc_time"):
numeric_histograms = self._calc_numeric_hist_and_stats(df)
histograms.update(numeric_histograms)
return histograms
def _get_columns_by_type(self, dataframe, column_type):
return [
dataframe.select(f.name)
for f in dataframe.schema
if isinstance(f.dataType, column_type)
]
def _calc_numeric_hist_and_stats(self, df):
column_df_list = self._get_columns_by_type(df, NumericType)
if not column_df_list:
return dict()
histograms = dict()
for column_df in column_df_list:
column_df_cached = False
if self.config.spark_cache_dataframe_column:
column_df_cached = True
column_df.cache()
try:
column_name = column_df.schema.names[0]
self._calc_numeric_column_stats(column_df, column_name)
if column_name not in self._histogram_column_names:
continue
if (
"min" not in self.stats[column_name]
or "max" not in self.stats[column_name]
):
logger.warning(
"Failed to calculate min/max for column '%s', skipping histogram calculation",
column_name,
)
continue
column_histograms = self._calc_numeric_column_histogram(
column_df, column_name
)
histograms[column_name] = column_histograms
finally:
if column_df_cached:
column_df.unpersist()
return histograms
def _calc_numeric_column_histogram(self, column_df, column_name):
min_value = self.stats[column_name]["min"]
max_value = self.stats[column_name]["max"]
value_counts = self._get_column_numeric_buckets_df(
column_df, min_value, max_value, 20,
)
value_counts = value_counts.collect()
column_histograms = self._convert_numeric_histogram_collect_to_tuple(
value_counts, min_value, max_value
)
return column_histograms
def _calc_numeric_column_stats(self, column_df, column_name):
if column_name in self._stats_column_names:
column_summary = column_df.summary().collect()
else:
# min & max are required for histogram calculation
column_summary = column_df.summary("min", "max").collect()
stats = self.stats[column_name]
for summary_row in column_summary:
if summary_row[column_name] is None:
continue
if summary_row.summary == "count":
stats["non-null"] = float(summary_row[column_name])
else:
stats[summary_row.summary] = float(summary_row[column_name])
if "stddev" in stats:
stats["std"] = stats.pop("stddev")
if column_name not in self._stats_column_names:
return
# count in summary doesn't include nulls, while count() function does
stats["null-count"] = stats["count"] - stats["non-null"]
stats["distinct"] = column_df.distinct().count()
def _convert_numeric_histogram_collect_to_tuple(
self, value_counts, min_value, max_value
):
# type: (List[Row], float, float) -> Tuple
"""
value_counts is list of rows with each row representing a bucket.
each bucket has: bucket index and number of values.
we convert it to histogram represented as a tuple of 2 lists:
number of values in bucket and bucket boundaries.
"""
bucket_count = 20
counts = [0] * bucket_count
bucket_size = (max_value - min_value) / bucket_count
values = [min_value + i * bucket_size for i in range(bucket_count + 1)]
for row in value_counts:
bucket, count = row
if bucket is None:
continue
if bucket == bucket_count:
# handle edge of last bucket (values equal to max_value will be in bucket n+1 instead of n)
bucket = bucket - 1
counts[bucket] += count
return counts, values
def _get_column_numeric_buckets_df(
self, column_df, min_value, max_value, bucket_count
):
min_value, max_value = float(min_value), float(max_value)
first_bucket = min_value
bucket_size = (max_value - min_value) / bucket_count
df_with_bucket = column_df.withColumn(
"bucket", floor((column_df[0] - first_bucket) / bucket_size)
)
counts_df = df_with_bucket.groupby("bucket").count()
return counts_df
def _calc_categorical_hist_and_stats_by_type(self, dataframe, column_type):
column_df_list = self._get_columns_by_type(dataframe, column_type)
if not column_df_list:
return dict()
value_counts = self._calc_spark_categorical_hist_and_stats(column_df_list)
histograms = self._convert_categorical_histogram_collect_to_dict(value_counts)
if column_type == StringType:
self._add_others(histograms)
return histograms
def _calc_spark_categorical_hist_and_stats(self, column_df_list):
"""
all columns in column_df_list should have the same type (e.g. all should be booleans or all strings).
it might not be relevant anymore since we do collect() per column.
keeping it that way for now so we could change it back to collect() once for all columns.
"""
max_buckets = 50
value_counts = []
for column_df in column_df_list:
if self.config.spark_cache_dataframe_column:
column_df_cached = True
column_df.cache()
else:
column_df_cached = False
try:
column_name = column_df.schema.names[0]
self._calc_categorical_column_stats(column_df, column_name)
if column_name not in self._histogram_column_names:
continue
column_value_counts = (
column_df.groupby(column_name)
.count()
.orderBy(desc("count"))
.withColumn("column_name", lit(column_name))
.limit(max_buckets - 1)
)
column_value_counts = column_value_counts.collect()
value_counts.extend(column_value_counts)
finally:
if column_df_cached:
column_df.unpersist()
return value_counts
def _calc_categorical_column_stats(self, column_df, column_name):
if column_name not in self._histogram_column_names:
return
if isinstance(column_df.schema[0].dataType, BooleanType):
return
# count in summary doesn't include nulls, while count() function does
column_summary = column_df.summary("count").collect()
stats = self.stats[column_name]
stats["non-null"] = int(column_summary[0][column_name])
stats["null-count"] = stats["count"] - stats["non-null"]
stats["distinct"] = column_df.distinct().count()
def _add_others(self, histograms):
""" sum all least significant values (who left out of histogram) to one bucket """
for column_name, histogram in histograms.items():
histogram_sum_count = sum(histogram[0])
others_count = self.stats[column_name]["count"] - histogram_sum_count
if others_count > 0:
histogram[0].append(others_count)
histogram[1].append("_others")
def _convert_categorical_histogram_collect_to_dict(self, value_counts):
# type: (List[Row]) -> Dict
histogram_dict = defaultdict(lambda: ([], []))
for row in value_counts:
value, count, column_name = row
histogram_dict[column_name][0].append(count)
histogram_dict[column_name][1].append(value)
return histogram_dict
@seven.contextlib.contextmanager
def _measure_time(self, metric_key):
start_time = time.time()
try:
yield
finally:
end_time = time.time()
self.system_metrics[metric_key] = end_time - start_time
| 9,727 | 0 | 431 |
89e251a803d3710ca5b759f9fb1825b2f3546560 | 2,133 | py | Python | setup.py | cinserra/S3 | eefc12265bd7824204dc5cbbd648e3ff8b291273 | [
"MIT"
] | 3 | 2017-07-24T17:35:20.000Z | 2019-11-05T15:40:45.000Z | setup.py | cinserra/S3 | eefc12265bd7824204dc5cbbd648e3ff8b291273 | [
"MIT"
] | 1 | 2018-10-24T10:46:26.000Z | 2018-10-24T10:46:26.000Z | setup.py | cinserra/S3 | eefc12265bd7824204dc5cbbd648e3ff8b291273 | [
"MIT"
] | 1 | 2021-04-12T13:03:05.000Z | 2021-04-12T13:03:05.000Z | from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
from os import sys, path
import os,shutil,re
from glob import glob
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
from imp import find_module
try: find_module('numpy')
except: sys.exit('### Error: python module numpy not found')
try: find_module('pyfits')
except: sys.exit('### Error: python module pyfits not found')
try: find_module('pyraf')
except: sys.exit('### Error: python module pyraf not found')
try: find_module('matplotlib')
except: sys.exit('### Error: python module matplotlib not found')
try: find_module('scipy')
except: sys.exit('### Error: python module matplotlib not found')
setup(
name='s3',
version='1.1.0',
author='C.Inserra',
author_email='c.inserra@qub.ac.uk',
classifiers=[
# How mature is this project?
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Stable',
'Intended Audience :: General users',
'Topic :: Astronomy :: photometric corrections',
'Programming Language :: Python :: 2.7',
],
scripts=['bin/STHREE','bin/SNAKE','bin/SNAKELOOP','bin/SMS','bin/SNAP'],
url='https://github.com/cinserra',
license=open('LICENSE.rst').read(),
description='S3 is a package for K and P-correction and synthetic mags',
long_description=open('README.rst').read(),
keywords='K-correction P-correction magnitudes',
install_requires = ['numpy','pyfits','pyraf','matplotlib','scipy'],
packages=['s3'],
package_dir={'':'src'},
package_data = {'s3' : ["metadata/*.txt","metadata/NTT/*.txt","metadata/NOT/*.txt",\
"metadata/PS1/*.txt","metadata/ASIAGO/*.txt","metadata/LCOGT/*.txt",\
"metadata/SKYMAPPER/*.txt","metadata/LT/*.txt","metadata/LSQ/*.txt",\
"metadata/WHT/*.txt","metadata/OGLE/*.txt","metadata/VLT/*.txt"]}
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#}
)
| 34.403226 | 105 | 0.612283 | from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
from os import sys, path
import os,shutil,re
from glob import glob
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
from imp import find_module
try: find_module('numpy')
except: sys.exit('### Error: python module numpy not found')
try: find_module('pyfits')
except: sys.exit('### Error: python module pyfits not found')
try: find_module('pyraf')
except: sys.exit('### Error: python module pyraf not found')
try: find_module('matplotlib')
except: sys.exit('### Error: python module matplotlib not found')
try: find_module('scipy')
except: sys.exit('### Error: python module matplotlib not found')
setup(
name='s3',
version='1.1.0',
author='C.Inserra',
author_email='c.inserra@qub.ac.uk',
classifiers=[
# How mature is this project?
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Stable',
'Intended Audience :: General users',
'Topic :: Astronomy :: photometric corrections',
'Programming Language :: Python :: 2.7',
],
scripts=['bin/STHREE','bin/SNAKE','bin/SNAKELOOP','bin/SMS','bin/SNAP'],
url='https://github.com/cinserra',
license=open('LICENSE.rst').read(),
description='S3 is a package for K and P-correction and synthetic mags',
long_description=open('README.rst').read(),
keywords='K-correction P-correction magnitudes',
install_requires = ['numpy','pyfits','pyraf','matplotlib','scipy'],
packages=['s3'],
package_dir={'':'src'},
package_data = {'s3' : ["metadata/*.txt","metadata/NTT/*.txt","metadata/NOT/*.txt",\
"metadata/PS1/*.txt","metadata/ASIAGO/*.txt","metadata/LCOGT/*.txt",\
"metadata/SKYMAPPER/*.txt","metadata/LT/*.txt","metadata/LSQ/*.txt",\
"metadata/WHT/*.txt","metadata/OGLE/*.txt","metadata/VLT/*.txt"]}
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#}
)
| 0 | 0 | 0 |
0a323a22d051482bd90ad5a9925048faf4178e69 | 6,075 | py | Python | bullet/panda_gripper.py | tjdalsckd/OMG-Planner | 6e40ccddb55e751de6127ea869e0265a76c1c93a | [
"MIT"
] | 52 | 2020-10-07T01:53:31.000Z | 2022-03-04T20:43:37.000Z | bullet/panda_gripper.py | tjdalsckd/OMG-Planner | 6e40ccddb55e751de6127ea869e0265a76c1c93a | [
"MIT"
] | 14 | 2020-11-08T09:28:17.000Z | 2021-11-12T01:17:27.000Z | bullet/panda_gripper.py | tjdalsckd/OMG-Planner | 6e40ccddb55e751de6127ea869e0265a76c1c93a | [
"MIT"
] | 12 | 2020-10-07T02:10:04.000Z | 2022-03-10T15:38:04.000Z | # --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import pybullet as p
import os
import IPython
# adapted from https://github.com/bryandlee/franka_pybullet/tree/ac86319a0b2f6c863ba3c7ee3d52f4f51b2be3bd
if __name__ == "__main__":
robot = Panda(realtime=1)
| 30.375 | 105 | 0.533992 | # --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import pybullet as p
import os
import IPython
# adapted from https://github.com/bryandlee/franka_pybullet/tree/ac86319a0b2f6c863ba3c7ee3d52f4f51b2be3bd
class Panda:
def __init__(
self, stepsize=1e-3, realtime=0, init_joints=None, base_shift=[0, 0, 0]
):
self.t = 0.0
self.stepsize = stepsize
self.realtime = realtime
self.control_mode = "torque"
self.position_control_gain_p = [
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
]
self.position_control_gain_d = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
]
f_max = 250
self.max_torque = [
f_max,
f_max,
f_max,
f_max,
f_max,
f_max,
f_max,
f_max,
f_max,
f_max,
]
# connect pybullet
p.setRealTimeSimulation(self.realtime)
# load models
current_dir = os.path.dirname(os.path.abspath(__file__))
p.setAdditionalSearchPath(current_dir + "/models")
flags = p.URDF_ENABLE_CACHED_GRAPHICS_SHAPES
self.robot = p.loadURDF(
"panda/panda_gripper.urdf",
useFixedBase=True,
flags=p.URDF_USE_SELF_COLLISION,
)
self._base_position = [
-0.05 - base_shift[0],
0.0 - base_shift[1],
-0.65 - base_shift[2],
]
self.pandaUid = self.robot
# robot parameters
self.dof = p.getNumJoints(self.robot)
c = p.createConstraint(
self.robot,
8,
self.robot,
9,
jointType=p.JOINT_GEAR,
jointAxis=[1, 0, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0],
)
p.changeConstraint(c, gearRatio=-1, erp=0.1, maxForce=50)
self.joints = []
self.q_min = []
self.q_max = []
self.target_pos = []
self.target_torque = []
self.pandaEndEffectorIndex = 7
for j in range(self.dof):
p.changeDynamics(self.robot, j, linearDamping=0, angularDamping=0)
joint_info = p.getJointInfo(self.robot, j)
self.joints.append(j)
self.q_min.append(joint_info[8])
self.q_max.append(joint_info[9])
self.target_pos.append((self.q_min[j] + self.q_max[j]) / 2.0)
self.target_torque.append(0.0)
self.reset(init_joints)
def reset(self, joints=None):
self.t = 0.0
self.control_mode = "torque"
p.resetBasePositionAndOrientation(
self.pandaUid, self._base_position, [0.000000, 0.000000, 0.000000, 1.000000]
)
if joints is None:
self.target_pos = [0.0, -1.285, 0, -2.356, 0.0, 1.571, 0.785, 0, 0.04, 0.04]
for j in range(self.dof):
self.target_torque[j] = 0.0
p.resetJointState(self.robot, j, targetValue=self.target_pos[j])
else:
joints = list(joints)
joints.insert(7, 0)
for j in range(self.dof):
self.target_pos[j] = joints[j]
self.target_torque[j] = 0.0
p.resetJointState(self.robot, j, targetValue=self.target_pos[j])
self.resetController()
self.setTargetPositions(self.target_pos)
def step(self):
self.t += self.stepsize
p.stepSimulation()
def resetController(self):
p.setJointMotorControlArray(
bodyUniqueId=self.robot,
jointIndices=self.joints,
controlMode=p.VELOCITY_CONTROL,
forces=[0.0 for i in range(self.dof)],
)
def setControlMode(self, mode):
if mode == "position":
self.control_mode = "position"
elif mode == "torque":
if self.control_mode != "torque":
self.resetController()
self.control_mode = "torque"
else:
raise Exception("wrong control mode")
def append(self, target_pos):
if len(target_pos) == 9:
if type(target_pos) == list:
target_pos.insert(7, 0)
return target_pos
else:
target_pos = np.insert(target_pos, 7, 0)
return target_pos
return target_pos
def setTargetPositions(self, target_pos):
self.target_pos = self.append(target_pos)
p.setJointMotorControlArray(
bodyUniqueId=self.robot,
jointIndices=self.joints,
controlMode=p.POSITION_CONTROL,
targetPositions=self.target_pos,
forces=self.max_torque,
positionGains=self.position_control_gain_p,
velocityGains=self.position_control_gain_d,
)
def setTargetTorques(self, target_torque):
self.target_torque = target_torque
p.setJointMotorControlArray(
bodyUniqueId=self.robot,
jointIndices=self.joints,
controlMode=p.TORQUE_CONTROL,
forces=self.target_torque,
)
def getJointStates(self):
joint_states = p.getJointStates(self.robot, self.joints)
joint_pos = [x[0] for x in joint_states]
joint_vel = [x[1] for x in joint_states]
return joint_pos, joint_vel
def solveInverseDynamics(self, pos, vel, acc):
return list(p.calculateInverseDynamics(self.robot, pos, vel, acc))
def solveInverseKinematics(self, pos, ori):
return list(p.calculateInverseKinematics(self.robot, 7, pos, ori))
if __name__ == "__main__":
robot = Panda(realtime=1)
| 5,376 | -9 | 318 |
e754165a29918520b0eae8907e662670afcba8d2 | 4,536 | py | Python | showtime.py | bduge/Showtime | fea78128d84d9631a46bf2ac00592edee5b57810 | [
"MIT"
] | null | null | null | showtime.py | bduge/Showtime | fea78128d84d9631a46bf2ac00592edee5b57810 | [
"MIT"
] | null | null | null | showtime.py | bduge/Showtime | fea78128d84d9631a46bf2ac00592edee5b57810 | [
"MIT"
] | null | null | null | from pandas import DataFrame
import urllib.error
import urllib.request
from bs4 import BeautifulSoup
# initializes variables used for beautifulsoup
valid = False
postcode = input("Please enter your postal code so we may show you relevant showtime information (no spaces): ")
imdb = "https://www.imdb.com/showtimes/location/CA/" + postcode + "?ref_=sh_lc"
while not valid:
try:
page = urllib.request.urlopen(imdb)
valid = True
except urllib.error.HTTPError:
postcode = input("Please enter a valid postal code: ")
imdb = "https://www.imdb.com/showtimes/location/CA/" + postcode + "?ref_=sh_lc"
soup = BeautifulSoup(page, 'html.parser')
# initializes arrays used for storing scrapped information
movies = []
review = []
runtime = []
release = []
rating = []
genre = []
showtimes = []
movies_list = soup.find_all('div', attrs={'class': 'lister-item mode-grid'})
for movie in movies_list: # uses a loop to extract the information of a movie for all movies playing
a = movie.find('span', attrs={'name': 'alpha'}).attrs['data-value']
b = movie.find('span', attrs={'name': 'user_rating'}).attrs['data-value']
c = movie.find('span', attrs={'name': 'runtime'}).attrs['data-value']
d = movie.find('span', attrs={'name':'release_date'}).attrs['data-value']
try:
e = movie.find('span', attrs={'class': 'certificate'}).string
except AttributeError:
e = ""
f = movie.find('span', attrs={'class': 'genre'}).string.strip()
g = movie.find('div', attrs={'class': 'title'}).a.get('href')
year = get_year(d)
movies.append(a)
review.append(b)
runtime.append(c)
release.append(year)
rating.append(e)
genre.append(f)
showtimes.append("https://www.imdb.com"+g) # formats the string to produce a url based on the link within html file
# provides user a list of movies playing and a method of inputting their selection
print("Here are the movies playing near you: \n")
for x in range(0, len(movies)):
print(str(x+1) + ". {} {}/10 Runtime: {} \n Release Year: {} Genre: {} {}\n"
.format(movies[x], review[x], runtime[x], release[x], genre[x], rating[x]))
print("Please select a movie: ")
selection = get_selection()
while not(isinstance(selection, int)) or selection < 1 or selection > len(movies):
print("Your selection was not valid, please try again: ")
selection = get_selection()
# Creates new soup to access more detailed information about movie after user has made their selection
detailedMovie = showtimes[selection-1]
page2 = urllib.request.urlopen(detailedMovie[:47] + "CA/" + postcode)
soup2 = BeautifulSoup(page2, 'html.parser')
theaters_list = soup2.findAll('div', attrs={'class': 'list detail'})
times = []
print("Showtimes for " + movies[selection-1] + " : \n")
# Finds all the theaters in local area of user
for theater in theaters_list:
odd = theater.findAll('div', attrs={'class', 'list_item odd'})
even = theater.findAll('div', attrs={'class', 'list_item even'})
for x in odd:
print(x.find('span', attrs={'itemprop': 'name'})
.find(text=True, recursive=False))
print(x.find('span', attrs={'itemprop': 'streetAddress'}).string)
times = x.findAll('meta', attrs={'itemprop': 'startDate'})
for y in times:
print(get_time(y.attrs['content']) + ' ', end="", flush=True)
print('\n')
for x in even:
print(x.find('span', attrs={'itemprop': 'name'})
.find(text=True, recursive=False))
print(x.find('span', attrs={'itemprop': 'streetAddress'}).string)
times = x.findAll('meta', attrs={'itemprop': 'startDate'})
for y in times:
print(get_time(y.attrs['content']) + ' ', end="", flush=True)
print('\n')
df = DataFrame({'Movies Playing': movies, 'Rating': review,
'Runtime (min)': runtime, 'Release Year': release})
try:
df.to_excel('showtimes.xlsx', sheet_name='sheet1', index=False)
except PermissionError:
print("There was an error creating the spreadsheet, please make sure"
" the file is not currently open.")
| 32.633094 | 120 | 0.6444 | from pandas import DataFrame
import urllib.error
import urllib.request
from bs4 import BeautifulSoup
# initializes variables used for beautifulsoup
valid = False
postcode = input("Please enter your postal code so we may show you relevant showtime information (no spaces): ")
imdb = "https://www.imdb.com/showtimes/location/CA/" + postcode + "?ref_=sh_lc"
while not valid:
try:
page = urllib.request.urlopen(imdb)
valid = True
except urllib.error.HTTPError:
postcode = input("Please enter a valid postal code: ")
imdb = "https://www.imdb.com/showtimes/location/CA/" + postcode + "?ref_=sh_lc"
soup = BeautifulSoup(page, 'html.parser')
# initializes arrays used for storing scrapped information
movies = []
review = []
runtime = []
release = []
rating = []
genre = []
showtimes = []
def get_year(s): # formats date string to return the year
return s[:4]
def get_time(s): # formats time string to return the length of movie in minutes
return s[-5:]
def get_selection(): # helper function for the user to select a movie they're interested in watching
try:
s = int(input())
return s
except ValueError:
print("")
movies_list = soup.find_all('div', attrs={'class': 'lister-item mode-grid'})
for movie in movies_list: # uses a loop to extract the information of a movie for all movies playing
a = movie.find('span', attrs={'name': 'alpha'}).attrs['data-value']
b = movie.find('span', attrs={'name': 'user_rating'}).attrs['data-value']
c = movie.find('span', attrs={'name': 'runtime'}).attrs['data-value']
d = movie.find('span', attrs={'name':'release_date'}).attrs['data-value']
try:
e = movie.find('span', attrs={'class': 'certificate'}).string
except AttributeError:
e = ""
f = movie.find('span', attrs={'class': 'genre'}).string.strip()
g = movie.find('div', attrs={'class': 'title'}).a.get('href')
year = get_year(d)
movies.append(a)
review.append(b)
runtime.append(c)
release.append(year)
rating.append(e)
genre.append(f)
showtimes.append("https://www.imdb.com"+g) # formats the string to produce a url based on the link within html file
# provides user a list of movies playing and a method of inputting their selection
print("Here are the movies playing near you: \n")
for x in range(0, len(movies)):
print(str(x+1) + ". {} {}/10 Runtime: {} \n Release Year: {} Genre: {} {}\n"
.format(movies[x], review[x], runtime[x], release[x], genre[x], rating[x]))
print("Please select a movie: ")
selection = get_selection()
while not(isinstance(selection, int)) or selection < 1 or selection > len(movies):
print("Your selection was not valid, please try again: ")
selection = get_selection()
# Creates new soup to access more detailed information about movie after user has made their selection
detailedMovie = showtimes[selection-1]
page2 = urllib.request.urlopen(detailedMovie[:47] + "CA/" + postcode)
soup2 = BeautifulSoup(page2, 'html.parser')
theaters_list = soup2.findAll('div', attrs={'class': 'list detail'})
times = []
print("Showtimes for " + movies[selection-1] + " : \n")
# Finds all the theaters in local area of user
for theater in theaters_list:
odd = theater.findAll('div', attrs={'class', 'list_item odd'})
even = theater.findAll('div', attrs={'class', 'list_item even'})
for x in odd:
print(x.find('span', attrs={'itemprop': 'name'})
.find(text=True, recursive=False))
print(x.find('span', attrs={'itemprop': 'streetAddress'}).string)
times = x.findAll('meta', attrs={'itemprop': 'startDate'})
for y in times:
print(get_time(y.attrs['content']) + ' ', end="", flush=True)
print('\n')
for x in even:
print(x.find('span', attrs={'itemprop': 'name'})
.find(text=True, recursive=False))
print(x.find('span', attrs={'itemprop': 'streetAddress'}).string)
times = x.findAll('meta', attrs={'itemprop': 'startDate'})
for y in times:
print(get_time(y.attrs['content']) + ' ', end="", flush=True)
print('\n')
df = DataFrame({'Movies Playing': movies, 'Rating': review,
'Runtime (min)': runtime, 'Release Year': release})
try:
df.to_excel('showtimes.xlsx', sheet_name='sheet1', index=False)
except PermissionError:
print("There was an error creating the spreadsheet, please make sure"
" the file is not currently open.")
| 303 | 0 | 69 |
9389b24f98a0417d2bf736ba7ef8fac80af73aff | 1,001 | py | Python | test/test_crom.py | smlt/tars | 540d37cfa566ef6c656b234b8c3ce7430f2ebb59 | [
"MIT"
] | null | null | null | test/test_crom.py | smlt/tars | 540d37cfa566ef6c656b234b8c3ce7430f2ebb59 | [
"MIT"
] | null | null | null | test/test_crom.py | smlt/tars | 540d37cfa566ef6c656b234b8c3ce7430f2ebb59 | [
"MIT"
] | null | null | null | from tars.helpers.api import CromAPI
def test_crom_api():
"""Simple test cases for the Crom API wrapper."""
api = CromAPI("en")
# Honestly, the tag list seems like the only thing that I'm reasonably guaranteeing
# won't be screwed with in the future.
scp_5000 = api.get_one_page_meta("scp-5000")
assert 'scp' in scp_5000['tags']
oldest_pages_iterator = api.get_all_pages()
oldest_pages_1 = next(oldest_pages_iterator)
assert len(oldest_pages_1) == 100
assert oldest_pages_1[0]['title'] == 'Manage Site'
oldest_pages_2 = next(oldest_pages_iterator)
assert len(oldest_pages_2) == 100
assert oldest_pages_2[0]['title'] == 'SCP-145'
oldest_tales = next(api.get_all_pages(tags=['tale']))
assert oldest_tales[0]['title'] == 'Archived Incident 076-2_682'
nav_pages = next(api.get_all_pages(categories=['nav']))
assert len(nav_pages) == 2
assert nav_pages[0]['title'] == 'Top Bar Menu'
assert nav_pages[1]['title'] == 'Side'
| 33.366667 | 87 | 0.686314 | from tars.helpers.api import CromAPI
def test_crom_api():
"""Simple test cases for the Crom API wrapper."""
api = CromAPI("en")
# Honestly, the tag list seems like the only thing that I'm reasonably guaranteeing
# won't be screwed with in the future.
scp_5000 = api.get_one_page_meta("scp-5000")
assert 'scp' in scp_5000['tags']
oldest_pages_iterator = api.get_all_pages()
oldest_pages_1 = next(oldest_pages_iterator)
assert len(oldest_pages_1) == 100
assert oldest_pages_1[0]['title'] == 'Manage Site'
oldest_pages_2 = next(oldest_pages_iterator)
assert len(oldest_pages_2) == 100
assert oldest_pages_2[0]['title'] == 'SCP-145'
oldest_tales = next(api.get_all_pages(tags=['tale']))
assert oldest_tales[0]['title'] == 'Archived Incident 076-2_682'
nav_pages = next(api.get_all_pages(categories=['nav']))
assert len(nav_pages) == 2
assert nav_pages[0]['title'] == 'Top Bar Menu'
assert nav_pages[1]['title'] == 'Side'
| 0 | 0 | 0 |
ec4c7e3d3e632ae0322eb3ffed63378d84e975cd | 22 | py | Python | Util/crop.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | Util/crop.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | Util/crop.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | # 沿每个维度按crop_width裁剪数组 | 22 | 22 | 0.909091 | # 沿每个维度按crop_width裁剪数组 | 0 | 0 | 0 |
4479ebdd6f429bb24bd17c7e4ebb363ba32dfdc0 | 7,406 | py | Python | depp/utils.py | yueyujiang/DEPP | d3ee8a446523efe26cb79afd6567a423f70299a9 | [
"MIT"
] | 4 | 2021-01-30T20:57:50.000Z | 2021-06-30T07:12:23.000Z | depp/utils.py | yueyujiang/DEPP | d3ee8a446523efe26cb79afd6567a423f70299a9 | [
"MIT"
] | null | null | null | depp/utils.py | yueyujiang/DEPP | d3ee8a446523efe26cb79afd6567a423f70299a9 | [
"MIT"
] | 1 | 2021-05-22T15:41:26.000Z | 2021-05-22T15:41:26.000Z | import torch
import os
import pandas as pd
import math
import numpy as np
import dendropy
from Bio import SeqIO
| 41.144444 | 104 | 0.644748 | import torch
import os
import pandas as pd
import math
import numpy as np
import dendropy
from Bio import SeqIO
def get_seq_length(args):
backbone_seq_file = args.backbone_seq_file
backbone_tree_file = args.backbone_tree_file
seq = SeqIO.to_dict(SeqIO.parse(backbone_seq_file, "fasta"))
args.sequence_length = len(list(seq.values())[0])
tree = dendropy.Tree.get(path=backbone_tree_file, schema='newick')
num_nodes = len(tree.leaf_nodes())
if args.embedding_size == -1:
args.embedding_size = 2 ** math.floor(math.log2(10 * num_nodes ** (1 / 2)))
def distance_portion(nodes1, nodes2, mode):
if len(nodes1.shape) == 1:
nodes1 = nodes1.unsqueeze(0)
if len(nodes2.shape) == 1:
nodes2 = nodes2.unsqueeze(0)
n1 = len(nodes1)
n2 = len(nodes2)
nodes1 = nodes1.view(n1, 1, -1)
nodes2 = nodes2.view(1, n2, -1)
if mode == 'ms':
return torch.sum((nodes1 - nodes2) ** 2, dim=-1)
elif mode == 'L2':
# breakpoint()
return (torch.sum((nodes1 - nodes2) ** 2, dim=-1) + 1e-6).sqrt()
elif mode == 'L1':
return torch.sum(abs(nodes1 - nodes2), dim=-1)
elif mode == 'cosine':
return 1 - torch.nn.functional.cosine_similarity(nodes1, nodes2, dim=-1)
elif mode == 'tan':
cosine = torch.nn.functional.cosine_similarity(nodes1, nodes2, dim=-1)
return (1 - cosine ** 2) / (cosine + 1e-9)
def distance(nodes1, nodes2, mode):
# node1: query
# node2: backbone
dist = []
for i in range(math.ceil(len(nodes1) / 1000.0)):
dist.append(distance_portion(nodes1[i * 1000: (i + 1) * 1000], nodes2, mode))
return torch.cat(dist, dim=0)
def mse_loss(model_dist, true_dist, weighted_method):
assert model_dist.shape == true_dist.shape
if weighted_method == 'ols':
return ((model_dist - true_dist) ** 2).mean()
elif weighted_method == 'fm':
weight = 1 / (true_dist + 1e-4) ** 2
return ((model_dist - true_dist) ** 2 * weight).mean()
elif weighted_method == 'be':
weight = 1 / (true_dist + 1e-4)
return ((model_dist - true_dist) ** 2 * weight).mean()
elif weighted_method == 'square_root_fm':
true_dist = torch.sqrt(true_dist)
weight = 1 / (true_dist + 1e-4) ** 2
return ((model_dist - true_dist) ** 2 * weight).mean()
elif weighted_method == 'square_root_be':
true_dist = torch.sqrt(true_dist)
weight = 1 / (true_dist + 1e-4)
return ((model_dist - true_dist) ** 2 * weight).mean()
elif weighted_method == 'square_root_ols':
true_dist = torch.sqrt(true_dist)
weight = 1
return ((model_dist - true_dist) ** 2 * weight).mean()
elif weighted_method == 'square_root_sqrt':
true_dist = torch.sqrt(true_dist)
weight = 1 / (torch.sqrt(true_dist) + 1e-4)
return ((model_dist - true_dist) ** 2 * weight).mean()
elif weighted_method == 'square_root_four':
true_dist = torch.sqrt(true_dist)
weight = 1 / (true_dist + 1e-4) ** 4
return ((model_dist - true_dist) ** 2 * weight).mean()
def process_seq(self_seq, args, isbackbone):
L = len(list(self_seq.values())[0])
seq_tmp = {}
raw_seqs = []
ks = []
if args.replicate_seq and (isbackbone or args.query_dist):
for k in self_seq:
seq_tmp[k.split('_')[0]] = torch.zeros(4, L)
for k in self_seq:
seq = np.zeros([4, L])
raw_seq = np.array(self_seq[k])
raw_seqs.append(raw_seq.reshape(1, -1))
ks.append(k)
seq[0][raw_seq == 'A'] = 1
seq[1][raw_seq == 'C'] = 1
seq[2][raw_seq == 'G'] = 1
seq[3][raw_seq == 'T'] = 1
seq[:, raw_seq == '-'] = args.gap_encode
if args.replicate_seq and (isbackbone or args.query_dist):
seq_tmp[k.split('_')[0]] += torch.from_numpy(seq)
else:
seq_tmp[k] = torch.from_numpy(seq)
if args.replicate_seq and (isbackbone or args.query_dist):
for k in seq_tmp:
seq_tmp[k] = seq_tmp[k].float() / (seq_tmp[k].sum(dim=0, keepdim=True) + 1e-8)
names = []
seqs = []
for k in seq_tmp:
names.append(k)
seqs.append(seq_tmp[k].unsqueeze(0))
return names, torch.cat(seqs, dim=0)
def save_depp_dist(model, args):
print('processing data...')
backbone_seq_file = args.backbone_seq_file
query_seq_file = args.query_seq_file
dis_file_root = os.path.join(args.outdir)
# args.distance_ratio = float(1.0 / float(args.embedding_size) / 10 * float(args.distance_alpha))
args.distance_ratio = model.hparams.distance_ratio
args.gap_encode = model.hparams.gap_encode
args.jc_correct = model.hparams.jc_correct
print('jc_correct', args.jc_correct)
if args.jc_correct:
args.jc_ratio = model.hparams.jc_ratio
if not os.path.exists(dis_file_root):
os.makedirs(dis_file_root)
backbone_seq = SeqIO.to_dict(SeqIO.parse(backbone_seq_file, "fasta"))
query_seq = SeqIO.to_dict(SeqIO.parse(query_seq_file, "fasta"))
if args.jc_correct:
backbone_seq_names, backbone_seq_names_raw, backbone_seq_tensor, backbone_raw_array = \
process_seq(backbone_seq, args, isbackbone=True)
query_seq_names, query_seq_names_raw, query_seq_tensor, query_raw_array = \
process_seq(query_seq, args, isbackbone=False)
else:
# breakpoint()
backbone_seq_names, backbone_seq_tensor = process_seq(backbone_seq, args, isbackbone=True)
query_seq_names, query_seq_tensor = process_seq(query_seq, args, isbackbone=False)
for param in model.parameters():
param.requires_grad = False
print('finish data processing!')
print(f'{len(backbone_seq_names)} backbone sequences')
print(f'{len(query_seq_names)} query sequence(s)')
print(f'calculating embeddings...')
backbone_encodings = []
for i in range(math.ceil(len(backbone_seq_tensor) / 2000.0)):
encodings_tmp = model(backbone_seq_tensor[i * 2000: (i + 1) * 2000].float()).detach()
backbone_encodings.append(encodings_tmp)
backbone_encodings = torch.cat(backbone_encodings, dim=0)
query_encodings = []
for i in range(math.ceil(len(query_seq_tensor) / 2000.0)):
encodings_tmp = model(query_seq_tensor[i * 2000: (i + 1) * 2000].float()).detach()
query_encodings.append(encodings_tmp)
query_encodings = torch.cat(query_encodings, dim=0)
print(f'finish embedding calculation!')
query_dist = distance(query_encodings, backbone_encodings, args.distance_mode) * args.distance_ratio
query_dist = np.array(query_dist)
query_dist[query_dist < 1e-3] = 0
if args.weighted_method == 'square_root_fm':
data_origin = dict(zip(query_seq_names, list(query_dist ** 2)))
else:
data_origin = dict(zip(query_seq_names, list(query_dist)))
data_origin = pd.DataFrame.from_dict(data_origin, orient='index', columns=backbone_seq_names)
if args.query_dist:
idx = data_origin.index
data_origin = data_origin[idx]
data_origin.to_csv(os.path.join(dis_file_root, f'depp.csv'), sep='\t')
if not os.path.isdir(f'{args.outdir}/depp_tmp'):
os.makedirs(f'{args.outdir}/depp_tmp')
with open(f'{args.outdir}/depp_tmp/seq_name.txt', 'w') as f:
f.write("\n".join(query_seq_names) + '\n')
print('original distanace matrix saved!')
| 7,155 | 0 | 138 |
3838e0e6ff36b16d6849505d4a65b649d6d1d919 | 4,642 | py | Python | tests/registries/test_requires_finalizer.py | tavaresrodrigo/kopf | 97e1c7a926705a79dabce2931e96a924252b61df | [
"MIT"
] | 855 | 2020-08-19T09:40:38.000Z | 2022-03-31T19:13:29.000Z | tests/registries/test_requires_finalizer.py | tavaresrodrigo/kopf | 97e1c7a926705a79dabce2931e96a924252b61df | [
"MIT"
] | 715 | 2019-12-23T14:17:35.000Z | 2022-03-30T20:54:45.000Z | tests/registries/test_requires_finalizer.py | tavaresrodrigo/kopf | 97e1c7a926705a79dabce2931e96a924252b61df | [
"MIT"
] | 97 | 2019-04-25T09:32:54.000Z | 2022-03-30T10:15:30.000Z | import pytest
import kopf
from kopf._core.intents.filters import PRESENT
OBJECT_BODY = {
'apiVersion': 'group/version',
'kind': 'singular',
'metadata': {
'name': 'test',
'labels': {
'key': 'value',
},
'annotations': {
'key': 'value',
}
}
}
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('labels', [
pytest.param({'key': 'value'}, id='value-matches'),
pytest.param({'key': PRESENT}, id='key-exists'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, False, id='mandatory'),
])
@pytest.mark.parametrize('labels', [
pytest.param({'key': 'othervalue'}, id='value-mismatch'),
pytest.param({'otherkey': PRESENT}, id='key-doesnt-exist'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('annotations', [
pytest.param({'key': 'value'}, id='value-matches'),
pytest.param({'key': PRESENT}, id='key-exists'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, False, id='mandatory'),
])
@pytest.mark.parametrize('annotations', [
pytest.param({'key': 'othervalue'}, id='value-mismatch'),
pytest.param({'otherkey': PRESENT}, id='key-doesnt-exist'),
])
| 31.578231 | 76 | 0.699914 | import pytest
import kopf
from kopf._core.intents.filters import PRESENT
OBJECT_BODY = {
'apiVersion': 'group/version',
'kind': 'singular',
'metadata': {
'name': 'test',
'labels': {
'key': 'value',
},
'annotations': {
'key': 'value',
}
}
}
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
def test_requires_finalizer_deletion_handler(
optional, expected, cause_factory, resource, registry):
cause = cause_factory(resource=resource, body=OBJECT_BODY)
@kopf.on.delete(*resource, optional=optional)
def fn(**_):
pass
requires_finalizer = registry._changing.requires_finalizer(cause)
assert requires_finalizer == expected
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
def test_requires_finalizer_multiple_handlers(
optional, expected, cause_factory, resource, registry):
cause = cause_factory(resource=resource, body=OBJECT_BODY)
@kopf.on.create(*resource)
def fn1(**_):
pass
@kopf.on.delete(*resource, optional=optional)
def fn2(**_):
pass
requires_finalizer = registry._changing.requires_finalizer(cause)
assert requires_finalizer == expected
def test_requires_finalizer_no_deletion_handler(
cause_factory, resource, registry):
cause = cause_factory(resource=resource, body=OBJECT_BODY)
@kopf.on.create(*resource)
def fn1(**_):
pass
requires_finalizer = registry._changing.requires_finalizer(cause)
assert requires_finalizer is False
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('labels', [
pytest.param({'key': 'value'}, id='value-matches'),
pytest.param({'key': PRESENT}, id='key-exists'),
])
def test_requires_finalizer_deletion_handler_matches_labels(
labels, optional, expected, cause_factory, resource, registry):
cause = cause_factory(resource=resource, body=OBJECT_BODY)
@kopf.on.delete(*resource, labels=labels, optional=optional)
def fn(**_):
pass
requires_finalizer = registry._changing.requires_finalizer(cause)
assert requires_finalizer == expected
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, False, id='mandatory'),
])
@pytest.mark.parametrize('labels', [
pytest.param({'key': 'othervalue'}, id='value-mismatch'),
pytest.param({'otherkey': PRESENT}, id='key-doesnt-exist'),
])
def test_requires_finalizer_deletion_handler_mismatches_labels(
labels, optional, expected, cause_factory, resource, registry):
cause = cause_factory(resource=resource, body=OBJECT_BODY)
@kopf.on.delete(*resource, labels=labels, optional=optional)
def fn(**_):
pass
requires_finalizer = registry._changing.requires_finalizer(cause)
assert requires_finalizer == expected
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('annotations', [
pytest.param({'key': 'value'}, id='value-matches'),
pytest.param({'key': PRESENT}, id='key-exists'),
])
def test_requires_finalizer_deletion_handler_matches_annotations(
annotations, optional, expected, cause_factory, resource, registry):
cause = cause_factory(resource=resource, body=OBJECT_BODY)
@kopf.on.delete(*resource, annotations=annotations, optional=optional)
def fn(**_):
pass
requires_finalizer = registry._changing.requires_finalizer(cause)
assert requires_finalizer == expected
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, False, id='mandatory'),
])
@pytest.mark.parametrize('annotations', [
pytest.param({'key': 'othervalue'}, id='value-mismatch'),
pytest.param({'otherkey': PRESENT}, id='key-doesnt-exist'),
])
def test_requires_finalizer_deletion_handler_mismatches_annotations(
annotations, optional, expected, cause_factory, resource, registry):
cause = cause_factory(resource=resource, body=OBJECT_BODY)
@kopf.on.delete(*resource, annotations=annotations, optional=optional)
def fn(**_):
pass
requires_finalizer = registry._changing.requires_finalizer(cause)
assert requires_finalizer == expected
| 2,640 | 0 | 155 |
439d923f4977a7ed626f2ab2ed070915a4c9d9e6 | 2,370 | py | Python | openstack/tests/functional/vpc/v1/public_ip.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 43 | 2018-12-19T08:39:15.000Z | 2021-07-21T02:45:43.000Z | openstack/tests/functional/vpc/v1/public_ip.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 11 | 2019-03-17T13:28:56.000Z | 2020-09-23T23:57:50.000Z | openstack/tests/functional/vpc/v1/public_ip.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 47 | 2018-12-19T05:14:25.000Z | 2022-03-19T15:28:30.000Z | # -*- coding:utf-8 -*-
# Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import sys
import warnings
import os
from openstack import utils
from openstack import connection
utils.enable_logging(debug=True, stream=sys.stdout)
warnings.filterwarnings('ignore')
auth_url = '******'
userDomainId = '******'
projectId = '******'
username = '******'
password = os.getenv('get_secret_code')
conn = connection.Connection(
auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password,
verify=False
)
if __name__ == '__main__':
# test_public_ips(conn)
# test_get_public_ip(conn)
# test_create_public_ip(conn)
# test_update_public_ip(conn)
# test_delete_public_ip(conn)
# test_find_public_ip(conn)
pass
| 25.76087 | 87 | 0.683966 | # -*- coding:utf-8 -*-
# Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import sys
import warnings
import os
from openstack import utils
from openstack import connection
utils.enable_logging(debug=True, stream=sys.stdout)
warnings.filterwarnings('ignore')
auth_url = '******'
userDomainId = '******'
projectId = '******'
username = '******'
password = os.getenv('get_secret_code')
conn = connection.Connection(
auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password,
verify=False
)
def test_public_ips(_conn):
query = {
# "": ""
}
objs = _conn.vpcv1.public_ips(**query)
for obj in objs:
print(obj)
def test_get_public_ip(_conn):
print(_conn.vpcv1.get_public_ip('6ffdbd50-1425-4901-9383-09993304db61'))
def test_create_public_ip(_conn):
data = {
"publicip": {
"type": "5_bgp",
},
"bandwidth": {
"share_type": "WHOLE",
"id": "7a7781c0-6205-486b-a6d0-d321c4a7076a"
}
}
print(_conn.vpcv1.create_public_ip(**data))
def test_update_public_ip(_conn):
data = {
"ip_version": 6,
# "port_id": "2f8254a3-c7ec-4600-bc10-cdfdf9a4384b",
# "port_id": None
}
print(_conn.vpcv1.update_public_ip('6ffdbd50-1425-4901-9383-09993304db61', **data))
def test_delete_public_ip(_conn):
print(_conn.vpcv1.delete_public_ip('1b725806-ace8-4f02-a2ad-08870f48b4ca'))
def test_find_public_ip(_conn):
print(_conn.vpcv1.find_public_ip('6ffdbd50-1425-4901-9383-09993304db61'))
if __name__ == '__main__':
# test_public_ips(conn)
# test_get_public_ip(conn)
# test_create_public_ip(conn)
# test_update_public_ip(conn)
# test_delete_public_ip(conn)
# test_find_public_ip(conn)
pass
| 889 | 0 | 138 |
013c01b4e51a8f310d553c5020ab688bf2779022 | 22 | py | Python | func/__init__.py | igroykt/letsencrypt-nic | 67fb823f1435be1f109e3bab1f09579452814cb0 | [
"BSD-3-Clause"
] | 4 | 2021-11-13T15:22:48.000Z | 2022-02-25T04:01:38.000Z | func/__init__.py | igroykt/letsencrypt-nic | 67fb823f1435be1f109e3bab1f09579452814cb0 | [
"BSD-3-Clause"
] | 1 | 2022-02-08T09:02:03.000Z | 2022-02-15T07:06:43.000Z | func/__init__.py | igroykt/letsencrypt-nic | 67fb823f1435be1f109e3bab1f09579452814cb0 | [
"BSD-3-Clause"
] | 2 | 2021-01-11T16:58:35.000Z | 2022-01-14T12:26:51.000Z | from .func import Func | 22 | 22 | 0.818182 | from .func import Func | 0 | 0 | 0 |
09b56129d950870a6bb2195c853f7aec6d571ead | 12,356 | py | Python | notebooks/scripts/transform_xml_to_json.py | cclauss/discovery-starter-kit | 9a7672d50bc5570ed80de04d14065994cd8aa906 | [
"MIT"
] | 27 | 2017-05-23T17:35:14.000Z | 2020-05-01T09:05:10.000Z | notebooks/scripts/transform_xml_to_json.py | cclauss/discovery-starter-kit | 9a7672d50bc5570ed80de04d14065994cd8aa906 | [
"MIT"
] | 14 | 2017-06-07T03:03:00.000Z | 2019-07-19T19:14:04.000Z | notebooks/scripts/transform_xml_to_json.py | cclauss/discovery-starter-kit | 9a7672d50bc5570ed80de04d14065994cd8aa906 | [
"MIT"
] | 39 | 2017-06-22T11:23:03.000Z | 2021-02-18T04:29:07.000Z | import xml.etree.ElementTree
import json
import sys
import os
import hashlib
from collections import defaultdict
from bs4 import BeautifulSoup
if '__file__' in globals():
sys.path.insert(0, os.path.join(os.path.abspath(__file__), 'scripts'))
else:
sys.path.insert(0, os.path.join(os.path.abspath(os.getcwd()), 'scripts'))
from discovery_setup_utils import curdir, makeSurePathExists # noqa
# 'DATA_TYPE' should be the same as the data set downloaded
DATA_TYPE = 'travel'
# INPUT_DIR should correspond to the location of the extracted stackexchange
# by default, evaluates to <current_project_dir>/data/<DATA_TYPE>
INPUT_DIR = os.path.abspath(
os.path.join(os.path.abspath(curdir), '..', 'data', DATA_TYPE)
)
# OUTPUT_DIR should correspond where you want your documents written to disk
# by default, evaluates to <INPUT_DIR>/json
OUTPUT_DIR = os.path.abspath(os.path.join(INPUT_DIR, 'json'))
makeSurePathExists(OUTPUT_DIR)
def genId(filename):
"""
Generates an identifier suitable for ingestion
Based off of the Watson Discovery Tooling method of generating IDs
"""
return hashlib.md5(filename).hexdigest()
def getUsers(usersXML, OUTPUT_DIR):
"""
Returns a dictionary of user ID to dictionary of user properties:
{
"<userid_int>": {
"reputation": <reputation_int>,
"displayName": <displayname_str>
}
}
"""
print('Starting getUsers...')
USERS_FILE_NAME = 'users.json'
USERS_FILE_PATH = os.path.abspath(
os.path.join(OUTPUT_DIR, '..', USERS_FILE_NAME)
)
if os.path.isfile(USERS_FILE_PATH):
print('Loading users from file cache...')
with open(USERS_FILE_PATH, 'r') as usersFile:
return json.loads(usersFile.read())
users_to_metadata = {}
for user in usersXML.findall('row'):
reputation = int(user.get('Reputation'))
name = user.get('DisplayName')
users_to_metadata[user.get('Id')] = {'reputation': reputation,
'displayName': name}
# write the file for later runs
user_to_metadata_str = json.dumps(users_to_metadata).replace('\n', '')
with open(USERS_FILE_PATH, 'w') as usersFile:
usersFile.write(user_to_metadata_str + '\n')
return users_to_metadata
def getVotes(votesXML, OUTPUT_DIR):
"""
Returns a dictionary of posts to vote types with counts of each type:
{
"<post_id_str>": {
"<vote_type_id_str>": <vote_count_int>,
"<vote_type_id_str>": <vote_count_int>,
...
}
}
"""
print('Starting getVotes...')
VOTES_FILE_NAME = 'votes.json'
VOTES_FILE_PATH = os.path.abspath(
os.path.join(OUTPUT_DIR, '..', VOTES_FILE_NAME)
)
if os.path.isfile(VOTES_FILE_PATH):
print('Loading votes from file cache...')
with open(VOTES_FILE_PATH, 'r') as votesFile:
return json.loads(votesFile.read())
# Types of votes
# Id | Name
# -- | ----------------------
# 1 | AcceptedByOriginator
# 2 | UpMod
# 3 | DownMod
# 4 | Offensive
# 5 | Favorite
# 6 | Close
# 7 | Reopen
# 8 | BountyStart
# 9 | BountyClose
# 10 | Deletion
# 11 | Undeletion
# 12 | Spam
# 15 | ModeratorReview
# 16 | ApproveEditSuggestion
initial_vote_types = {'1': 0,
'2': 0,
'3': 0,
'4': 0,
'5': 0,
'6': 0,
'7': 0,
'8': 0,
'9': 0,
'10': 0,
'11': 0,
'12': 0,
'15': 0,
'16': 0}
posts_to_votes = defaultdict(dict)
for vote in votesXML.findall('row'):
voteTypeId = vote.get('VoteTypeId')
if voteTypeId in initial_vote_types:
postId = vote.get('PostId')
if postId in posts_to_votes:
newCount = posts_to_votes[postId][voteTypeId] + 1
posts_to_votes[postId][voteTypeId] = newCount
else:
posts_to_votes[postId] = initial_vote_types.copy()
posts_to_votes[postId][voteTypeId] = 1
# write the file for later runs
posts_to_votes_str = json.dumps(posts_to_votes).replace('\n', '')
with open(VOTES_FILE_PATH, 'w') as votesFile:
votesFile.write(posts_to_votes_str + '\n')
return posts_to_votes
def validAnswer(item):
"""
determine whether or not the item has the required keys to write to file
"""
keys = {'id', 'text', 'question', 'question_metadata', 'answer_metadata',
'author_metadata', 'user_metadata'}
return keys <= set(item)
def writeAnswerFile(file_name, item, OUTPUT_DIR):
"""
writes the item as a document to be used for ingestion
"""
if validAnswer(item):
with open(os.path.join(OUTPUT_DIR, file_name), 'w') as answer_file:
answer_file.write(json.dumps(item).replace('\n', '') + '\n')
else:
print('Item missing required keys!')
print(json.dumps(item, indent=4))
def writeDocuments(postsXML, votesDict, usersDict, OUTPUT_DIR):
"""
splits the posts XML file into individual answer units by pairing 1 answer
to its corresponding question to prepare for document ingestion
(thus the question will be duplicated for multiple answers)
"""
documents = {}
for post in postsXML.findall('row'):
# Types of posts
# Id | Name
# -- | ---------
# 1 | Question
# 2 | Answer
postTypeId = int(post.get('PostTypeId'))
if postTypeId == 1:
handleQuestion(documents, post, OUTPUT_DIR)
elif postTypeId == 2:
handleAnswer(documents, post, votesDict, usersDict, OUTPUT_DIR)
print('Getting Posts...')
postsXML = xml.etree.ElementTree.parse(
os.path.join(INPUT_DIR, 'Posts.xml')
).getroot()
print('Posts loaded')
print('Getting Votes...')
votesXML = xml.etree.ElementTree.parse(
os.path.join(INPUT_DIR, 'Votes.xml')
).getroot()
votesDict = getVotes(votesXML, OUTPUT_DIR)
print('Votes loaded')
print('Getting Users...')
usersXML = xml.etree.ElementTree.parse(
os.path.join(INPUT_DIR, 'Users.xml')
).getroot()
usersDict = getUsers(usersXML, OUTPUT_DIR)
print('Users loaded')
print('Begin writing documents...')
writeDocuments(postsXML, votesDict, usersDict, OUTPUT_DIR)
print("Documents written to %s" % OUTPUT_DIR)
| 34.805634 | 79 | 0.598576 | import xml.etree.ElementTree
import json
import sys
import os
import hashlib
from collections import defaultdict
from bs4 import BeautifulSoup
if '__file__' in globals():
sys.path.insert(0, os.path.join(os.path.abspath(__file__), 'scripts'))
else:
sys.path.insert(0, os.path.join(os.path.abspath(os.getcwd()), 'scripts'))
from discovery_setup_utils import curdir, makeSurePathExists # noqa
# 'DATA_TYPE' should be the same as the data set downloaded
DATA_TYPE = 'travel'
# INPUT_DIR should correspond to the location of the extracted stackexchange
# by default, evaluates to <current_project_dir>/data/<DATA_TYPE>
INPUT_DIR = os.path.abspath(
os.path.join(os.path.abspath(curdir), '..', 'data', DATA_TYPE)
)
# OUTPUT_DIR should correspond where you want your documents written to disk
# by default, evaluates to <INPUT_DIR>/json
OUTPUT_DIR = os.path.abspath(os.path.join(INPUT_DIR, 'json'))
makeSurePathExists(OUTPUT_DIR)
def stripSpecial(html_doc):
soup = BeautifulSoup(html_doc, 'html.parser')
return soup.get_text()
def genId(filename):
"""
Generates an identifier suitable for ingestion
Based off of the Watson Discovery Tooling method of generating IDs
"""
return hashlib.md5(filename).hexdigest()
def genFilename(id):
return "%s_%s.json" % (DATA_TYPE, str(id))
def genTrainingFilename(id):
return "train_%s.json" % str(id)
def getUsers(usersXML, OUTPUT_DIR):
"""
Returns a dictionary of user ID to dictionary of user properties:
{
"<userid_int>": {
"reputation": <reputation_int>,
"displayName": <displayname_str>
}
}
"""
print('Starting getUsers...')
USERS_FILE_NAME = 'users.json'
USERS_FILE_PATH = os.path.abspath(
os.path.join(OUTPUT_DIR, '..', USERS_FILE_NAME)
)
if os.path.isfile(USERS_FILE_PATH):
print('Loading users from file cache...')
with open(USERS_FILE_PATH, 'r') as usersFile:
return json.loads(usersFile.read())
users_to_metadata = {}
for user in usersXML.findall('row'):
reputation = int(user.get('Reputation'))
name = user.get('DisplayName')
users_to_metadata[user.get('Id')] = {'reputation': reputation,
'displayName': name}
# write the file for later runs
user_to_metadata_str = json.dumps(users_to_metadata).replace('\n', '')
with open(USERS_FILE_PATH, 'w') as usersFile:
usersFile.write(user_to_metadata_str + '\n')
return users_to_metadata
def getVotes(votesXML, OUTPUT_DIR):
"""
Returns a dictionary of posts to vote types with counts of each type:
{
"<post_id_str>": {
"<vote_type_id_str>": <vote_count_int>,
"<vote_type_id_str>": <vote_count_int>,
...
}
}
"""
print('Starting getVotes...')
VOTES_FILE_NAME = 'votes.json'
VOTES_FILE_PATH = os.path.abspath(
os.path.join(OUTPUT_DIR, '..', VOTES_FILE_NAME)
)
if os.path.isfile(VOTES_FILE_PATH):
print('Loading votes from file cache...')
with open(VOTES_FILE_PATH, 'r') as votesFile:
return json.loads(votesFile.read())
# Types of votes
# Id | Name
# -- | ----------------------
# 1 | AcceptedByOriginator
# 2 | UpMod
# 3 | DownMod
# 4 | Offensive
# 5 | Favorite
# 6 | Close
# 7 | Reopen
# 8 | BountyStart
# 9 | BountyClose
# 10 | Deletion
# 11 | Undeletion
# 12 | Spam
# 15 | ModeratorReview
# 16 | ApproveEditSuggestion
initial_vote_types = {'1': 0,
'2': 0,
'3': 0,
'4': 0,
'5': 0,
'6': 0,
'7': 0,
'8': 0,
'9': 0,
'10': 0,
'11': 0,
'12': 0,
'15': 0,
'16': 0}
posts_to_votes = defaultdict(dict)
for vote in votesXML.findall('row'):
voteTypeId = vote.get('VoteTypeId')
if voteTypeId in initial_vote_types:
postId = vote.get('PostId')
if postId in posts_to_votes:
newCount = posts_to_votes[postId][voteTypeId] + 1
posts_to_votes[postId][voteTypeId] = newCount
else:
posts_to_votes[postId] = initial_vote_types.copy()
posts_to_votes[postId][voteTypeId] = 1
# write the file for later runs
posts_to_votes_str = json.dumps(posts_to_votes).replace('\n', '')
with open(VOTES_FILE_PATH, 'w') as votesFile:
votesFile.write(posts_to_votes_str + '\n')
return posts_to_votes
def extractQuestion(document):
return {'title': document.get('title'),
'subtitle': document.get('subtitle')}
def extractQuestionMetadata(document):
return {'id': document.get('id'),
'score': document.get('score'),
'views': document.get('views'),
'tags': document.get('tags')}
def extractAuthorMetadata(document):
authorUserId = document.get('authorUserId')
return {'id': authorUserId,
'username': usersDict.get(authorUserId, {}).get('displayName', 0)}
def handleQuestion(documents, question, OUTPUT_DIR):
postId = question.get('Id')
title = stripSpecial(question.get('Title'))
subtitle = stripSpecial(question.get('Body'))
answerCount = int(question.get('AnswerCount'))
views = int(question.get('ViewCount'))
tags = str(question.get('Tags'))
score = int(question.get('Score'))
acceptedAnswerId = question.get('AcceptedAnswerId', 0)
authorUserId = question.get('OwnerUserId')
if postId in documents:
# need to write out the answers in this question that got skipped
current_document = documents.get(postId)
current_document['title'] = title
current_document['subtitle'] = subtitle
current_document['authorUserId'] = authorUserId
current_document['acceptedAnswerId'] = acceptedAnswerId
current_document['views'] = views
current_document['tags'] = tags
current_document['score'] = score
current_document['answerCount'] = answerCount
documents[postId] = current_document
for skipped_answer in current_document.get('skipped_answers'):
answer_metadata = skipped_answer.get('answer_metadata')
answer_id = answer_metadata.get('id')
if answer_id == acceptedAnswerId:
answer_metadata['accepted'] = 1
file_name = genFilename(answer_id)
generatedId = genId(file_name)
item = {'id': generatedId,
'text': skipped_answer.get('text'),
'question': extractQuestion(current_document),
'question_metadata': extractQuestionMetadata(
current_document
),
'answer_metadata': answer_metadata,
'author_metadata': extractAuthorMetadata(current_document),
'user_metadata': skipped_answer.get('user_metadata')}
print('writing a skipped answer ID: ' + answer_id)
writeAnswerFile(file_name, item, OUTPUT_DIR)
else:
documents[postId] = {'id': postId, 'title': title,
'subtitle': subtitle, 'answerCount': answerCount,
'authorUserId': authorUserId,
'acceptedAnswerId': acceptedAnswerId,
'views': views, 'tags': tags, 'score': score}
def handleAnswer(documents, answer, votesDict, usersDict, OUTPUT_DIR):
postId = answer.get('Id')
parentId = answer.get('ParentId')
# answer information
userId = answer.get('OwnerUserId')
name = usersDict.get(userId, {}).get('displayName', '')
reputation = usersDict.get(userId, {}).get('reputation', 0)
user_metadata = {'id': userId, 'username': name, 'reputation': reputation}
answerText = stripSpecial(answer.get('Body'))
vote_types_to_count = votesDict.get(postId, {})
upVotes = 0
downVotes = 0
UP_VOTE = '2'
DOWN_VOTE = '3'
if UP_VOTE in vote_types_to_count:
upVotes = int(vote_types_to_count[UP_VOTE])
if DOWN_VOTE in vote_types_to_count:
downVotes = int(vote_types_to_count[DOWN_VOTE])
answer_metadata = {'id': postId,
'score': int(answer.get('Score')),
'upModVotes': upVotes,
'downModVotes': downVotes,
'accepted': 0,
'length': len(answerText)}
current_document = documents.get(parentId)
if current_document and current_document.get('title'):
# write out the answer to file
if postId == current_document.get('acceptedAnswerId'):
answer_metadata['accepted'] = 1
file_name = genFilename(postId)
generatedId = genId(file_name)
item = {'id': generatedId,
'text': answerText,
'question': extractQuestion(current_document),
'question_metadata': extractQuestionMetadata(current_document),
'answer_metadata': answer_metadata,
'author_metadata': extractAuthorMetadata(current_document),
'user_metadata': user_metadata}
writeAnswerFile(file_name, item, OUTPUT_DIR)
else:
# save it until later when we have the question text
skipped_answers = []
if documents.get(parentId):
# answers already exist
skipped_answers = documents.get(parentId).get('skipped_answers')
else:
documents[parentId] = {}
skipped_answer = {'text': answerText,
'answer_metadata': answer_metadata,
'user_metadata': user_metadata}
skipped_answers.append(skipped_answer)
documents[parentId]['skipped_answers'] = skipped_answers
def validAnswer(item):
"""
determine whether or not the item has the required keys to write to file
"""
keys = {'id', 'text', 'question', 'question_metadata', 'answer_metadata',
'author_metadata', 'user_metadata'}
return keys <= set(item)
def writeAnswerFile(file_name, item, OUTPUT_DIR):
"""
writes the item as a document to be used for ingestion
"""
if validAnswer(item):
with open(os.path.join(OUTPUT_DIR, file_name), 'w') as answer_file:
answer_file.write(json.dumps(item).replace('\n', '') + '\n')
else:
print('Item missing required keys!')
print(json.dumps(item, indent=4))
def writeDocuments(postsXML, votesDict, usersDict, OUTPUT_DIR):
"""
splits the posts XML file into individual answer units by pairing 1 answer
to its corresponding question to prepare for document ingestion
(thus the question will be duplicated for multiple answers)
"""
documents = {}
for post in postsXML.findall('row'):
# Types of posts
# Id | Name
# -- | ---------
# 1 | Question
# 2 | Answer
postTypeId = int(post.get('PostTypeId'))
if postTypeId == 1:
handleQuestion(documents, post, OUTPUT_DIR)
elif postTypeId == 2:
handleAnswer(documents, post, votesDict, usersDict, OUTPUT_DIR)
print('Getting Posts...')
postsXML = xml.etree.ElementTree.parse(
os.path.join(INPUT_DIR, 'Posts.xml')
).getroot()
print('Posts loaded')
print('Getting Votes...')
votesXML = xml.etree.ElementTree.parse(
os.path.join(INPUT_DIR, 'Votes.xml')
).getroot()
votesDict = getVotes(votesXML, OUTPUT_DIR)
print('Votes loaded')
print('Getting Users...')
usersXML = xml.etree.ElementTree.parse(
os.path.join(INPUT_DIR, 'Users.xml')
).getroot()
usersDict = getUsers(usersXML, OUTPUT_DIR)
print('Users loaded')
print('Begin writing documents...')
writeDocuments(postsXML, votesDict, usersDict, OUTPUT_DIR)
print("Documents written to %s" % OUTPUT_DIR)
| 5,426 | 0 | 184 |
dab60e82da1671208d29430a2a2f98886cbb5204 | 4,380 | py | Python | PeopleCounter.py | SiddiqMohammed/Rain-Room-OpenCV | a77062c1a1450ada65d1fc126125b2f7d5bf97a6 | [
"MIT"
] | null | null | null | PeopleCounter.py | SiddiqMohammed/Rain-Room-OpenCV | a77062c1a1450ada65d1fc126125b2f7d5bf97a6 | [
"MIT"
] | null | null | null | PeopleCounter.py | SiddiqMohammed/Rain-Room-OpenCV | a77062c1a1450ada65d1fc126125b2f7d5bf97a6 | [
"MIT"
] | null | null | null | import numpy as np
import cv2 as cv
try:
log = open('log.txt',"w")
except:
print( "No se puede abrir el archivo log")
#Contadores de entrada y salida
cnt_up = 0
cnt_down = 0
#Fuente de video
#cap = cv.VideoCapture(0)
cap = cv.VideoCapture('Test Files/videos/TestVideo.avi')
#Imprime las propiedades de captura a consola
for i in range(19):
print( i, cap.get(i))
if cap.isOpened():
h = cap.get(cv.CAP_PROP_FRAME_HEIGHT) # float
w = cap.get(cv.CAP_PROP_FRAME_WIDTH) # float
#Calculate Gx and Gy for grid lines
gX = int(w/3)
gY = int(h/3)
gx1 = gX
gy1 = gY
gx2 = gX*2
gy2 = gY*2
gx3 = int(w)
gy3 = int(h)
frameArea = h*w
areaTH = frameArea/250
print( 'Area Threshold', areaTH)
#Lineas de entrada/salida
line_up = int(2*(h/5))
line_down = int(3*(h/5))
up_limit = int(1*(h/5))
down_limit = int(4*(h/5))
#Substractor de fondo
fgbg = cv.createBackgroundSubtractorMOG2(detectShadows = True)
#Elementos estructurantes para filtros morfoogicos
kernelOp = np.ones((3,3),np.uint8)
kernelOp2 = np.ones((5,5),np.uint8)
kernelCl = np.ones((11,11),np.uint8)
#Variables
font = cv.FONT_HERSHEY_SIMPLEX
persons = []
max_p_age = 5
pid = 1
color1 = (255, 255, 255)
color2 = (0, 0, 255)
cg1 = color1
cg2 = color1
cg3 = color1
cg4 = color1
cg5 = color1
cg6 = color1
cg7 = color1
cg8 = color1
cg9 = color1
while(cap.isOpened()):
#Lee una imagen de la fuente de video
ret, frame = cap.read()
#Drawing the grid
# cv.line(frame, (0, gy1), (gx3, gy1), (150, 0, 200), 2)
# cv.line(frame, (0, gy2), (gx3, gy2), (150, 0, 200), 2)
# cv.line(frame, (gx1, 0), (gx1, gy3), (150, 0, 200), 2)
# cv.line(frame, (gx2, 0), (gx2, gy3), (150, 0, 200), 2)
# Row 1
cv.rectangle(frame, (0, 0), (gx1, gy1), cg1, 2)
cv.rectangle(frame, (gx1, 0), (gx2, gy1), cg2, 2)
cv.rectangle(frame, (gx2, 0), (gx3, gy1), cg3, 2)
# Row 2
cv.rectangle(frame, (0, gy1), (gx1, gy2), cg4, 2)
cv.rectangle(frame, (gx1, gy1), (gx2, gy2), cg5, 2)
cv.rectangle(frame, (gx2, gy1), (gx3, gy2), cg6, 2)
# Row 3
cv.rectangle(frame, (0, gy2), (gx1, gy3), cg7, 2)
cv.rectangle(frame, (gx1, gy2), (gx2, gy3), cg8, 2)
cv.rectangle(frame, (gx2, gy2), (gx3, gy3), cg9, 2)
for i in persons:
i.age_one() #age every person one frame
#Aplica substraccion de fondo
fgmask = fgbg.apply(frame)
fgmask2 = fgbg.apply(frame)
#Binariazcion para eliminar sombras (color gris)
try:
ret,imBin= cv.threshold(fgmask,200,255,cv.THRESH_BINARY)
ret,imBin2 = cv.threshold(fgmask2,200,255,cv.THRESH_BINARY)
#Opening (erode->dilate) para quitar ruido.
mask = cv.morphologyEx(imBin, cv.MORPH_OPEN, kernelOp)
mask2 = cv.morphologyEx(imBin2, cv.MORPH_OPEN, kernelOp)
#Closing (dilate -> erode) para juntar regiones blancas.
mask = cv.morphologyEx(mask , cv.MORPH_CLOSE, kernelCl)
mask2 = cv.morphologyEx(mask2, cv.MORPH_CLOSE, kernelCl)
except:
print('EOF')
print( 'UP:',cnt_up)
print ('DOWN:',cnt_down)
break
# RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind.
contours0, hierarchy = cv.findContours(mask2,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
for cnt in contours0:
area = cv.contourArea(cnt)
if area > areaTH:
#Falta agregar condiciones para multipersonas, salidas y entradas de pantalla.
M = cv.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv.circle(frame,(cx,cy), 5, (0,0,255), -1)
rectSize = 50
cv.rectangle(frame,(cx+rectSize,cy+rectSize), (cx-rectSize,cy-rectSize), (0,0,255), 2)
text = cx, cy
cv.putText(frame, str(text), (cx,cy), font, 0.5, (255,0,0), 1, cv.LINE_AA)
# for ccx in range(1, 4):
# for ccy in range(1, 4):
if cx > 0 and cx < gx1 and cy > 0 and cy < gy1:
cg1 = color2
else:
cg1 = color1
str_up = 'UP: '+ str(cnt_up)
str_down = 'DOWN: '+ str(cnt_down)
cv.imshow('Frame',frame)
cv.imshow('Mask',mask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
#END while(cap.isOpened())
log.flush()
log.close()
cap.release()
cv.destroyAllWindows()
| 24.886364 | 98 | 0.598402 | import numpy as np
import cv2 as cv
try:
log = open('log.txt',"w")
except:
print( "No se puede abrir el archivo log")
#Contadores de entrada y salida
cnt_up = 0
cnt_down = 0
#Fuente de video
#cap = cv.VideoCapture(0)
cap = cv.VideoCapture('Test Files/videos/TestVideo.avi')
#Imprime las propiedades de captura a consola
for i in range(19):
print( i, cap.get(i))
if cap.isOpened():
h = cap.get(cv.CAP_PROP_FRAME_HEIGHT) # float
w = cap.get(cv.CAP_PROP_FRAME_WIDTH) # float
#Calculate Gx and Gy for grid lines
gX = int(w/3)
gY = int(h/3)
gx1 = gX
gy1 = gY
gx2 = gX*2
gy2 = gY*2
gx3 = int(w)
gy3 = int(h)
frameArea = h*w
areaTH = frameArea/250
print( 'Area Threshold', areaTH)
#Lineas de entrada/salida
line_up = int(2*(h/5))
line_down = int(3*(h/5))
up_limit = int(1*(h/5))
down_limit = int(4*(h/5))
#Substractor de fondo
fgbg = cv.createBackgroundSubtractorMOG2(detectShadows = True)
#Elementos estructurantes para filtros morfoogicos
kernelOp = np.ones((3,3),np.uint8)
kernelOp2 = np.ones((5,5),np.uint8)
kernelCl = np.ones((11,11),np.uint8)
#Variables
font = cv.FONT_HERSHEY_SIMPLEX
persons = []
max_p_age = 5
pid = 1
color1 = (255, 255, 255)
color2 = (0, 0, 255)
cg1 = color1
cg2 = color1
cg3 = color1
cg4 = color1
cg5 = color1
cg6 = color1
cg7 = color1
cg8 = color1
cg9 = color1
while(cap.isOpened()):
#Lee una imagen de la fuente de video
ret, frame = cap.read()
#Drawing the grid
# cv.line(frame, (0, gy1), (gx3, gy1), (150, 0, 200), 2)
# cv.line(frame, (0, gy2), (gx3, gy2), (150, 0, 200), 2)
# cv.line(frame, (gx1, 0), (gx1, gy3), (150, 0, 200), 2)
# cv.line(frame, (gx2, 0), (gx2, gy3), (150, 0, 200), 2)
# Row 1
cv.rectangle(frame, (0, 0), (gx1, gy1), cg1, 2)
cv.rectangle(frame, (gx1, 0), (gx2, gy1), cg2, 2)
cv.rectangle(frame, (gx2, 0), (gx3, gy1), cg3, 2)
# Row 2
cv.rectangle(frame, (0, gy1), (gx1, gy2), cg4, 2)
cv.rectangle(frame, (gx1, gy1), (gx2, gy2), cg5, 2)
cv.rectangle(frame, (gx2, gy1), (gx3, gy2), cg6, 2)
# Row 3
cv.rectangle(frame, (0, gy2), (gx1, gy3), cg7, 2)
cv.rectangle(frame, (gx1, gy2), (gx2, gy3), cg8, 2)
cv.rectangle(frame, (gx2, gy2), (gx3, gy3), cg9, 2)
for i in persons:
i.age_one() #age every person one frame
#Aplica substraccion de fondo
fgmask = fgbg.apply(frame)
fgmask2 = fgbg.apply(frame)
#Binariazcion para eliminar sombras (color gris)
try:
ret,imBin= cv.threshold(fgmask,200,255,cv.THRESH_BINARY)
ret,imBin2 = cv.threshold(fgmask2,200,255,cv.THRESH_BINARY)
#Opening (erode->dilate) para quitar ruido.
mask = cv.morphologyEx(imBin, cv.MORPH_OPEN, kernelOp)
mask2 = cv.morphologyEx(imBin2, cv.MORPH_OPEN, kernelOp)
#Closing (dilate -> erode) para juntar regiones blancas.
mask = cv.morphologyEx(mask , cv.MORPH_CLOSE, kernelCl)
mask2 = cv.morphologyEx(mask2, cv.MORPH_CLOSE, kernelCl)
except:
print('EOF')
print( 'UP:',cnt_up)
print ('DOWN:',cnt_down)
break
# RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind.
contours0, hierarchy = cv.findContours(mask2,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
for cnt in contours0:
area = cv.contourArea(cnt)
if area > areaTH:
#Falta agregar condiciones para multipersonas, salidas y entradas de pantalla.
M = cv.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv.circle(frame,(cx,cy), 5, (0,0,255), -1)
rectSize = 50
cv.rectangle(frame,(cx+rectSize,cy+rectSize), (cx-rectSize,cy-rectSize), (0,0,255), 2)
text = cx, cy
cv.putText(frame, str(text), (cx,cy), font, 0.5, (255,0,0), 1, cv.LINE_AA)
# for ccx in range(1, 4):
# for ccy in range(1, 4):
if cx > 0 and cx < gx1 and cy > 0 and cy < gy1:
cg1 = color2
else:
cg1 = color1
str_up = 'UP: '+ str(cnt_up)
str_down = 'DOWN: '+ str(cnt_down)
cv.imshow('Frame',frame)
cv.imshow('Mask',mask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
#END while(cap.isOpened())
log.flush()
log.close()
cap.release()
cv.destroyAllWindows()
| 0 | 0 | 0 |
e96277acf15ea7d0cc031a3c4a9c4b0b9fc64235 | 1,344 | py | Python | tests/test_settings.py | stevearc/pyramid_duh | af14b185533d00b69dfdb8ab1cab6f1d1d8d4647 | [
"MIT"
] | 5 | 2015-12-15T09:27:16.000Z | 2017-12-12T12:56:04.000Z | tests/test_settings.py | stevearc/pyramid_duh | af14b185533d00b69dfdb8ab1cab6f1d1d8d4647 | [
"MIT"
] | null | null | null | tests/test_settings.py | stevearc/pyramid_duh | af14b185533d00b69dfdb8ab1cab6f1d1d8d4647 | [
"MIT"
] | null | null | null | """ Tests for settings utils """
from pyramid_duh.settings import asdict
try:
import unittest2 as unittest # pylint: disable=F0401
except ImportError:
import unittest
class TestAsDict(unittest.TestCase):
""" Tests for asdict """
def test_default(self):
""" If provided value is a dict, return that """
self.assertEqual(asdict({}), {})
def test_default_none(self):
""" If provided value is None, return {} """
self.assertEqual(asdict(None), {})
def test_convert(self):
""" Convert a string to a dict """
setting = """
a = b
c=d
"""
data = {
'a': 'b',
'c': 'd',
}
self.assertEqual(asdict(setting), data)
def test_convert_with_equals(self):
""" Properly converts strings that have multiple equals signs """
setting = """
a = KpxYAw==
b = 1+2=3
"""
data = {
'a': 'KpxYAw==',
'b': '1+2=3',
}
self.assertEqual(asdict(setting), data)
def test_convert_value(self):
""" Run a function on dict values """
setting = """
foo = 2
bar = 5
"""
data = {
'foo': 2,
'bar': 5,
}
self.assertEqual(asdict(setting, int), data)
| 23.172414 | 73 | 0.501488 | """ Tests for settings utils """
from pyramid_duh.settings import asdict
try:
import unittest2 as unittest # pylint: disable=F0401
except ImportError:
import unittest
class TestAsDict(unittest.TestCase):
""" Tests for asdict """
def test_default(self):
""" If provided value is a dict, return that """
self.assertEqual(asdict({}), {})
def test_default_none(self):
""" If provided value is None, return {} """
self.assertEqual(asdict(None), {})
def test_convert(self):
""" Convert a string to a dict """
setting = """
a = b
c=d
"""
data = {
'a': 'b',
'c': 'd',
}
self.assertEqual(asdict(setting), data)
def test_convert_with_equals(self):
""" Properly converts strings that have multiple equals signs """
setting = """
a = KpxYAw==
b = 1+2=3
"""
data = {
'a': 'KpxYAw==',
'b': '1+2=3',
}
self.assertEqual(asdict(setting), data)
def test_convert_value(self):
""" Run a function on dict values """
setting = """
foo = 2
bar = 5
"""
data = {
'foo': 2,
'bar': 5,
}
self.assertEqual(asdict(setting, int), data)
| 0 | 0 | 0 |
5744ea12b66d18f2b1460524e3e136d0f6476c11 | 4,769 | py | Python | DocumentParser.py | dadosabertosrn/cota_parlamentar | ef3aacb6d9f332cbe3b992258dc452241291b3b1 | [
"MIT"
] | 1 | 2020-06-02T23:14:27.000Z | 2020-06-02T23:14:27.000Z | DocumentParser.py | dadosabertosrn/cota_parlamentar | ef3aacb6d9f332cbe3b992258dc452241291b3b1 | [
"MIT"
] | 1 | 2020-07-17T21:03:39.000Z | 2020-07-17T21:03:39.000Z | DocumentParser.py | dadosabertosrn/cota_parlamentar | ef3aacb6d9f332cbe3b992258dc452241291b3b1 | [
"MIT"
] | null | null | null | from tika import parser
from dateutil.parser import parser as date_parser
from PortugueseParserInfo import PortugueseParserInfo
| 30.375796 | 153 | 0.549172 | from tika import parser
from dateutil.parser import parser as date_parser
from PortugueseParserInfo import PortugueseParserInfo
class DocumentParser():
def __init__(self):
self.portDateParser = date_parser(info = PortugueseParserInfo())
def isDate(self, token):
'''
Funcao que retorna se token passado representa uma data
'''
try:
day, month, year = token.split('/')
day = int(day)
year = int(year)
return True
except:
return False
def isMonthYear(self, token):
'''
Funcao que retorna se token passado esta no formato Mes/Ano
'''
try:
month, year = token.split('/')
year = int(year)
return month in ["Janeiro", "Fevereiro", "Março", "Abril", "Maio", "Junho", "Julho", "Agosto", "Setembro", "Outubro", "Novembro", "Dezembro"]
except:
return False
def getLineClassification(self, line):
'''
Funcao para indicar classe da linha passada
As classes sao:
- title: titulo do documento
- issue: declaracao do gasto
- detail: detalhamento do gasto
- total: total do que foi gasto
- none: nao tem classe, nao tem importancia
'''
if len(line) == 0:
return "none"
hasVereador = "Vereador(a):" in line # checa se a string "Vereador(a):" esta presente na lihnha
hasMonthYear = self.isMonthYear(line[0]) # checa se o primeiro token da linha esta no formato Mes/Ano
hasRSOnPos = len(line) > 1 and line[-2] == "R$" # checa se tem um cifrao no penultimo token da linha
firstIsDate = self.isDate(line[0]) # checa se o primeiro token da linha eh uma data
# atribui a classe de acordo com o que foi extraido sobre a linha
if hasVereador and hasRSOnPos:
return "total"
if hasVereador and not hasRSOnPos and hasMonthYear:
return "title"
if hasRSOnPos and firstIsDate:
return "detail"
if hasRSOnPos and not firstIsDate and not hasVereador:
return "issue"
return "none"
def getInfoFromTitle(self, line):
'''
Funcao para extrair informacao de class "title"
'''
info = dict()
name = []
nameStart = False
for i in range(len(line)):
if nameStart:
name.append(line[i])
elif line[i] == 'Vereador(a):':
nameStart = True
info["nameVereador"] = " ".join(name)
info["yearDocument"] = line[0].split('/')[1]
info["monthDocument"] = line[0].split('/')[0]
return info
def getInfoFromIssue(self, line):
'''
Funcao para extrair informacao de class "issue"
'''
# last two are "R$" and "float"
info = dict()
info["issueDesc"] = " ".join(line[:-2])
info["issueCost"] = float(line[-1])
return info
def getInfoFromDetail(self, line):
'''
Funcao para extrair informacao de class "detail"
'''
info = dict()
try:
info["dateDatetime"] = self.portDateParser.parse(line[0])
except:
info["dateStr"] = line[0]
info["reciptId"] = line[1]
info["cpfCnpj"] = line[2]
info["fundament"] = line[3]
info["desc"] = " ".join(line[4:-2])
info["detailCost"] = float(line[-1])
return info
def parse(self, filePath):
# usnado o tika para extrair o texto do arquivo PDF passado
raw = parser.from_file(filePath)
lines = raw['content'].split('\n')
# inicializando o dicionario que ira armazenar os dados extraidos
info = {
"issues": []
}
for line in lines: # para cada linha do PDF
# separa a string usando split por ' '
line = line.split(' ')
# corta as linhas vazias
line = [x for x in line if len(x) > 0]
# recuepra a classe da linha
group = self.getLineClassification(line)
# se for um titulo ou issue, extrai info da linha
if group == "title": info.update(self.getInfoFromTitle(line))
elif group == "issue": info["issues"].append(self.getInfoFromIssue(line))
elif group == "detail": # se for um detalhamento, adiciona a lista de detalhamentos da issue corrente
if "details" not in info["issues"][-1]:
info["issues"][-1]["details"] = []
info["issues"][-1]["details"].append(
self.getInfoFromDetail(line)
)
return info
| 1,271 | 3,345 | 23 |
010b3ad09f83a6f90ac2fe5b45458643fe4959c1 | 916 | py | Python | sqds/admin.py | abey79/sqds | acab1d9c6d4a010fff9d8e89a5fdd9d94def7c89 | [
"MIT"
] | null | null | null | sqds/admin.py | abey79/sqds | acab1d9c6d4a010fff9d8e89a5fdd9d94def7c89 | [
"MIT"
] | null | null | null | sqds/admin.py | abey79/sqds | acab1d9c6d4a010fff9d8e89a5fdd9d94def7c89 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Unit, Skill, Gear, Guild, Category
# noinspection PyMethodMayBeStatic,PyUnusedLocal
@admin.register(Unit)
@admin.register(Gear)
@admin.register(Skill)
@admin.register(Category)
@admin.register(Guild)
| 19.913043 | 55 | 0.747817 | from django.contrib import admin
from .models import Unit, Skill, Gear, Guild, Category
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class ReadOnlyMixin:
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class SkillInline(ReadOnlyMixin, admin.TabularInline):
model = Skill
extra = 0
@admin.register(Unit)
class UnitAdmin(ReadOnlyMixin, admin.ModelAdmin):
inlines = [SkillInline]
@admin.register(Gear)
class GearAdmin(ReadOnlyMixin, admin.ModelAdmin):
pass
@admin.register(Skill)
class SkillAdmin(ReadOnlyMixin, admin.ModelAdmin):
pass
@admin.register(Category)
class CategoryAdmin(ReadOnlyMixin, admin.ModelAdmin):
pass
@admin.register(Guild)
class GuildAdmin(ReadOnlyMixin, admin.ModelAdmin):
pass
| 140 | 274 | 235 |
48d538986158de426dee3cd4aacc1139055d2ff5 | 738 | py | Python | aiovkdonate/http/aiohttp.py | vladislavkovalskyi/aiovkdonate | 453c61265393b90a4d9d466ee0028ac415c430fe | [
"MIT"
] | 1 | 2020-09-26T18:26:54.000Z | 2020-09-26T18:26:54.000Z | aiovkdonate/http/aiohttp.py | vladislavkovalskyi/aiovkdonate | 453c61265393b90a4d9d466ee0028ac415c430fe | [
"MIT"
] | null | null | null | aiovkdonate/http/aiohttp.py | vladislavkovalskyi/aiovkdonate | 453c61265393b90a4d9d466ee0028ac415c430fe | [
"MIT"
] | null | null | null | import json
from asyncio import AbstractEventLoop, get_event_loop
from typing import Optional, NoReturn
from aiohttp import TCPConnector, ClientSession
| 28.384615 | 93 | 0.745257 | import json
from asyncio import AbstractEventLoop, get_event_loop
from typing import Optional, NoReturn
from aiohttp import TCPConnector, ClientSession
class AioHTTPClient:
def __init__(
self,
loop: Optional[AbstractEventLoop] = None,
session: Optional[ClientSession] = None
):
self.loop = loop or get_event_loop()
self.session = session or ClientSession(
connector=TCPConnector(ssl=False),
json_serialize=json.dumps()
)
async def request_json(self, method: str, url: str, data: Optional[dict], **kwargs) -> dict:
async with self.session.request(method, url, data=data, **kwargs) as response:
return await response.json(loads=json.loads)
async def exit(self) -> NoReturn:
await self.session.close()
| 491 | -1 | 94 |
1a582f50553344cd344c92b87265938fe47aec68 | 873 | py | Python | doc/examples/writing_benchmarks/pure_numpy.py | fluiddyn/transonic | a460e9f6d1139f79b668cb3306d1e8a7e190b72d | [
"BSD-3-Clause"
] | 88 | 2019-01-08T16:39:08.000Z | 2022-02-06T14:19:23.000Z | doc/examples/writing_benchmarks/pure_numpy.py | fluiddyn/transonic | a460e9f6d1139f79b668cb3306d1e8a7e190b72d | [
"BSD-3-Clause"
] | 13 | 2019-06-20T15:53:10.000Z | 2021-02-09T11:03:29.000Z | doc/examples/writing_benchmarks/pure_numpy.py | fluiddyn/transonic | a460e9f6d1139f79b668cb3306d1e8a7e190b72d | [
"BSD-3-Clause"
] | 1 | 2019-11-05T03:03:14.000Z | 2019-11-05T03:03:14.000Z | import numpy as np
def laplace_numpy(image):
"""Laplace operator in NumPy for 2D images."""
laplacian = (
image[:-2, 1:-1]
+ image[2:, 1:-1]
+ image[1:-1, :-2]
+ image[1:-1, 2:]
- 4 * image[1:-1, 1:-1]
)
thresh = np.abs(laplacian) > 0.05
return thresh
def laplace_loops(image):
"""Laplace operator for 2D images."""
h = image.shape[0]
w = image.shape[1]
laplacian = np.empty((h - 2, w - 2), np.uint8)
for i in range(1, h - 1):
for j in range(1, w - 1):
laplacian[i - 1, j - 1] = (
np.abs(
image[i - 1, j]
+ image[i + 1, j]
+ image[i, j - 1]
+ image[i, j + 1]
- 4 * image[i, j]
)
> 0.05
)
return laplacian
| 24.942857 | 50 | 0.408935 | import numpy as np
def laplace_numpy(image):
"""Laplace operator in NumPy for 2D images."""
laplacian = (
image[:-2, 1:-1]
+ image[2:, 1:-1]
+ image[1:-1, :-2]
+ image[1:-1, 2:]
- 4 * image[1:-1, 1:-1]
)
thresh = np.abs(laplacian) > 0.05
return thresh
def laplace_loops(image):
"""Laplace operator for 2D images."""
h = image.shape[0]
w = image.shape[1]
laplacian = np.empty((h - 2, w - 2), np.uint8)
for i in range(1, h - 1):
for j in range(1, w - 1):
laplacian[i - 1, j - 1] = (
np.abs(
image[i - 1, j]
+ image[i + 1, j]
+ image[i, j - 1]
+ image[i, j + 1]
- 4 * image[i, j]
)
> 0.05
)
return laplacian
| 0 | 0 | 0 |
abfe504ef814e12dd43ea1555a6151e24ceed233 | 960 | py | Python | ev3dev/fonts/__init__.py | gorozcoh/ev3dev-lang-python | 3b2597398bc0bb0b9fe0ec88322837dbbc749e04 | [
"MIT"
] | null | null | null | ev3dev/fonts/__init__.py | gorozcoh/ev3dev-lang-python | 3b2597398bc0bb0b9fe0ec88322837dbbc749e04 | [
"MIT"
] | null | null | null | ev3dev/fonts/__init__.py | gorozcoh/ev3dev-lang-python | 3b2597398bc0bb0b9fe0ec88322837dbbc749e04 | [
"MIT"
] | null | null | null | import pkg_resources
import os.path
from PIL import ImageFont
def available():
"""
Returns list of available font names.
"""
names = []
for f in pkg_resources.resource_listdir('ev3dev.fonts', ''):
name, ext = os.path.splitext(os.path.basename(f))
if ext == '.pil':
names.append(name)
return sorted(names)
def load(name):
"""
Loads the font specified by name and returns it as an instance of
`PIL.ImageFont <http://pillow.readthedocs.io/en/latest/reference/ImageFont.html>`_
class.
"""
try:
pil_file = pkg_resources.resource_filename('ev3dev.fonts', '{}.pil'.format(name))
pbm_file = pkg_resources.resource_filename('ev3dev.fonts', '{}.pbm'.format(name))
return ImageFont.load(pil_file)
except FileNotFoundError:
raise Exception('Failed to load font "{}". '.format(name) +
'Check ev3dev.fonts.available() for the list of available fonts')
| 33.103448 | 89 | 0.652083 | import pkg_resources
import os.path
from PIL import ImageFont
def available():
"""
Returns list of available font names.
"""
names = []
for f in pkg_resources.resource_listdir('ev3dev.fonts', ''):
name, ext = os.path.splitext(os.path.basename(f))
if ext == '.pil':
names.append(name)
return sorted(names)
def load(name):
"""
Loads the font specified by name and returns it as an instance of
`PIL.ImageFont <http://pillow.readthedocs.io/en/latest/reference/ImageFont.html>`_
class.
"""
try:
pil_file = pkg_resources.resource_filename('ev3dev.fonts', '{}.pil'.format(name))
pbm_file = pkg_resources.resource_filename('ev3dev.fonts', '{}.pbm'.format(name))
return ImageFont.load(pil_file)
except FileNotFoundError:
raise Exception('Failed to load font "{}". '.format(name) +
'Check ev3dev.fonts.available() for the list of available fonts')
| 0 | 0 | 0 |
833d0647d15a1de5ddeb8293e544d9462e453c19 | 38,785 | py | Python | justpy/tailwind.py | bnaard/justpy | 19d49257d9be45f713ac23a1d1eabc0ef5c727b7 | [
"Apache-2.0"
] | 855 | 2020-02-08T16:33:34.000Z | 2022-03-30T18:03:19.000Z | justpy/tailwind.py | bnaard/justpy | 19d49257d9be45f713ac23a1d1eabc0ef5c727b7 | [
"Apache-2.0"
] | 289 | 2020-02-10T21:23:57.000Z | 2022-03-25T18:29:06.000Z | justpy/tailwind.py | bnaard/justpy | 19d49257d9be45f713ac23a1d1eabc0ef5c727b7 | [
"Apache-2.0"
] | 88 | 2020-02-10T18:52:57.000Z | 2022-03-30T05:16:34.000Z | # https://tailwindcss.com
| 89.366359 | 169 | 0.454454 | # https://tailwindcss.com
class Tailwind:
# TODO: Customization - https://tailwindcss.com/docs/configuration
pseudo_classes = ['hover', 'focus', 'active', 'group-hover', 'focus-within',
'first', 'last', 'odd', 'even', 'disabled', 'visited',
'sm', 'md', 'lg', 'xl', 'group-focus']
tw_dict = {'container': ['container'],
'screen_reader': ['sr-only', 'not-sr-only'],
'display': ['block', 'inline-block', 'inline', 'flex', 'inline-flex', 'table', 'table-row', 'table-cell',
'table-caption', 'table-column', 'table-column-group', 'table-header-group',
'table-row-group', 'table-footer-group',
'hidden', 'grid', 'inline-grid', 'flow-root'],
'float': ['float-right', 'float-left', 'float-none', 'clearfix'],
'clear': ['clear-right', 'clear-left', 'clear-none', 'clear-both'],
'object_fit': ['object-contain', 'object-cover', 'object-fill', 'object-none', 'object-scale-down'],
'object_position': ['object-bottom', 'object-center', 'object-left', 'object-left-bottom',
'object-left-top',
'object-right', 'object-right-bottom', 'object-right-top', 'object-top'],
'overflow': ['overflow-auto', 'overflow-hidden', 'overflow-visible', 'overflow-scroll',
'overflow-x-auto',
'overflow-y-auto', 'overflow-x-hidden', 'overflow-y-hidden', 'overflow-x-visible',
'overflow-y-visible', 'overflow-x-scroll', 'overflow-y-scroll', 'scrolling-touch',
'scrolling-auto'], 'position': ['static', 'fixed', 'absolute', 'relative', 'sticky'],
'top_right_bottom_left': ['inset-0', 'inset-y-0', 'inset-x-0', 'top-0', 'right-0', 'bottom-0', 'left-0',
'inset-auto', 'inset-y-auto', 'inset-x-auto', 'top-auto', 'bottom-auto',
'left-auto', 'right-auto'], 'visibility': ['visible', 'invisible'],
'z_index': ['z-0', 'z-10', 'z-20', 'z-30', 'z-40', 'z-50', 'z-auto'],
'font_family': ['font-sans', 'font-serif', 'font-mono'],
'font_size': ['text-xs', 'text-sm', 'text-base', 'text-lg', 'text-xl', 'text-2xl', 'text-3xl',
'text-4xl',
'text-5xl', 'text-6xl'], 'font_smoothing': ['antialiased', 'subpixel-antialiased'],
'font_style': ['italic', 'not-italic'],
'font_weight': ['font-hairline', 'font-thin', 'font-light', 'font-normal', 'font-medium',
'font-semibold',
'font-bold', 'font-extrabold', 'font-black'],
'letter_spacing': ['tracking-tighter', 'tracking-tight', 'tracking-normal', 'tracking-wide',
'tracking-wider', 'tracking-widest'],
'line_height': ['leading-none', 'leading-tight', 'leading-snug', 'leading-normal', 'leading-relaxed',
'leading-loose', 'leading-3', 'leading-4', 'leading-5', 'leading-6', 'leading-7',
'leading-8', 'leading-9', 'leading-10'],
'list_style_type': ['list-none', 'list-disc', 'list-decimal'],
'list_style_position': ['list-inside', 'list-outside'],
'text_align': ['text-left', 'text-center', 'text-right', 'text-justify'],
'text_color': ['text-transparent', 'text-current', 'text-black', 'text-white', 'text-gray-100',
'text-gray-200',
'text-gray-300', 'text-gray-400', 'text-gray-500', 'text-gray-600', 'text-gray-700',
'text-gray-800', 'text-gray-900', 'text-red-100', 'text-red-200', 'text-red-300',
'text-red-400', 'text-red-500', 'text-red-600', 'text-red-700', 'text-red-800',
'text-red-900', 'text-orange-100', 'text-orange-200', 'text-orange-300',
'text-orange-400',
'text-orange-500', 'text-orange-600', 'text-orange-700', 'text-orange-800',
'text-orange-900',
'text-yellow-100', 'text-yellow-200', 'text-yellow-300', 'text-yellow-400',
'text-yellow-500',
'text-yellow-600', 'text-yellow-700', 'text-yellow-800', 'text-yellow-900',
'text-green-100',
'text-green-200', 'text-green-300', 'text-green-400', 'text-green-500', 'text-green-600',
'text-green-700', 'text-green-800', 'text-green-900', 'text-teal-100', 'text-teal-200',
'text-teal-300', 'text-teal-400', 'text-teal-500', 'text-teal-600', 'text-teal-700',
'text-teal-800', 'text-teal-900', 'text-blue-100', 'text-blue-200', 'text-blue-300',
'text-blue-400', 'text-blue-500', 'text-blue-600', 'text-blue-700', 'text-blue-800',
'text-blue-900', 'text-indigo-100', 'text-indigo-200', 'text-indigo-300',
'text-indigo-400',
'text-indigo-500', 'text-indigo-600', 'text-indigo-700', 'text-indigo-800',
'text-indigo-900',
'text-purple-100', 'text-purple-200', 'text-purple-300', 'text-purple-400',
'text-purple-500',
'text-purple-600', 'text-purple-700', 'text-purple-800', 'text-purple-900',
'text-pink-100',
'text-pink-200', 'text-pink-300', 'text-pink-400', 'text-pink-500', 'text-pink-600',
'text-pink-700', 'text-pink-800', 'text-pink-900'],
'text_decoration': ['underline', 'line-through', 'no-underline'],
'text_transform': ['uppercase', 'lowercase', 'capitalize', 'normal-case'],
'vertical_align': ['align-baseline', 'align-top', 'align-middle', 'align-bottom', 'align-text-top',
'align-text-bottom'],
'whitespace': ['whitespace-normal', 'whitespace-no-wrap', 'whitespace-pre', 'whitespace-pre-line',
'whitespace-pre-wrap'],
'word_break': ['break-normal', 'break-words', 'break-all', 'truncate'],
'background_attachment': ['bg-fixed', 'bg-local', 'bg-scroll'],
'background_color': ['bg-transparent', 'bg-current', 'bg-black', 'bg-white', 'bg-gray-50','bg-gray-100',
'bg-gray-200',
'bg-gray-300', 'bg-gray-400', 'bg-gray-500', 'bg-gray-600', 'bg-gray-700',
'bg-gray-800',
'bg-gray-900', 'bg-red-100', 'bg-red-200', 'bg-red-300', 'bg-red-400', 'bg-red-500',
'bg-red-600', 'bg-red-700', 'bg-red-800', 'bg-red-900', 'bg-orange-100',
'bg-orange-200', 'bg-orange-300', 'bg-orange-400', 'bg-orange-500', 'bg-orange-600',
'bg-orange-700', 'bg-orange-800', 'bg-orange-900', 'bg-yellow-100', 'bg-yellow-200',
'bg-yellow-300', 'bg-yellow-400', 'bg-yellow-500', 'bg-yellow-600', 'bg-yellow-700',
'bg-yellow-800', 'bg-yellow-900', 'bg-green-100', 'bg-green-200', 'bg-green-300',
'bg-green-400', 'bg-green-500', 'bg-green-600', 'bg-green-700', 'bg-green-800',
'bg-green-900', 'bg-teal-100', 'bg-teal-200', 'bg-teal-300', 'bg-teal-400',
'bg-teal-500', 'bg-teal-600', 'bg-teal-700', 'bg-teal-800', 'bg-teal-900',
'bg-blue-100', 'bg-blue-200', 'bg-blue-300', 'bg-blue-400', 'bg-blue-500',
'bg-blue-600', 'bg-blue-700', 'bg-blue-800', 'bg-blue-900', 'bg-indigo-100',
'bg-indigo-200', 'bg-indigo-300', 'bg-indigo-400', 'bg-indigo-500', 'bg-indigo-600',
'bg-indigo-700', 'bg-indigo-800', 'bg-indigo-900', 'bg-purple-100', 'bg-purple-200',
'bg-purple-300', 'bg-purple-400', 'bg-purple-500', 'bg-purple-600', 'bg-purple-700',
'bg-purple-800', 'bg-purple-900', 'bg-pink-100', 'bg-pink-200', 'bg-pink-300',
'bg-pink-400', 'bg-pink-500', 'bg-pink-600', 'bg-pink-700', 'bg-pink-800',
'bg-pink-900'],
'background_position': ['bg-bottom', 'bg-center', 'bg-left', 'bg-left-bottom', 'bg-left-top', 'bg-right',
'bg-right-bottom', 'bg-right-top', 'bg-top'],
'background_repeat': ['bg-repeat', 'bg-no-repeat', 'bg-repeat-x', 'bg-repeat-y', 'bg-repeat-round',
'bg-repeat-space'], 'background_size': ['bg-auto', 'bg-cover', 'bg-contain'],
'background_opacity': ['bg-opacity-0', 'bg-opacity-25', 'bg-opacity-50', 'bg-opacity-75', 'bg-opacity-100'],
'text_opacity': ['text-opacity-0', 'text-opacity-25', 'text-opacity-50', 'text-opacity-75', 'text-opacity-100'],
'placeholder_opacity': ['placeholder-opacity-0', 'placeholder-opacity-25', 'placeholder-opacity-50', 'placeholder-opacity-75', 'placeholder-opacity-100'],
'border_opacity': ['border-opacity-0', 'border-opacity-25', 'border-opacity-50', 'border-opacity-75', 'border-opacity-100'],
'border_color': ['border-transparent', 'border-current', 'border-black', 'border-white',
'border-gray-100',
'border-gray-200',
'border-gray-300', 'border-gray-400', 'border-gray-500', 'border-gray-600',
'border-gray-700', 'border-gray-800', 'border-gray-900', 'border-red-100',
'border-red-200',
'border-red-300', 'border-red-400', 'border-red-500', 'border-red-600',
'border-red-700',
'border-red-800', 'border-red-900', 'border-orange-100', 'border-orange-200',
'border-orange-300', 'border-orange-400', 'border-orange-500', 'border-orange-600',
'border-orange-700', 'border-orange-800', 'border-orange-900', 'border-yellow-100',
'border-yellow-200', 'border-yellow-300', 'border-yellow-400', 'border-yellow-500',
'border-yellow-600', 'border-yellow-700', 'border-yellow-800', 'border-yellow-900',
'border-green-100', 'border-green-200', 'border-green-300', 'border-green-400',
'border-green-500', 'border-green-600', 'border-green-700', 'border-green-800',
'border-green-900', 'border-teal-100', 'border-teal-200', 'border-teal-300',
'border-teal-400', 'border-teal-500', 'border-teal-600', 'border-teal-700',
'border-teal-800', 'border-teal-900', 'border-blue-100', 'border-blue-200',
'border-blue-300', 'border-blue-400', 'border-blue-500', 'border-blue-600',
'border-blue-700', 'border-blue-800', 'border-blue-900', 'border-indigo-100',
'border-indigo-200', 'border-indigo-300', 'border-indigo-400', 'border-indigo-500',
'border-indigo-600', 'border-indigo-700', 'border-indigo-800', 'border-indigo-900',
'border-purple-100', 'border-purple-200', 'border-purple-300', 'border-purple-400',
'border-purple-500', 'border-purple-600', 'border-purple-700', 'border-purple-800',
'border-purple-900', 'border-pink-100', 'border-pink-200', 'border-pink-300',
'border-pink-400', 'border-pink-500', 'border-pink-600', 'border-pink-700',
'border-pink-800', 'border-pink-900'],
'border_style': ['border-solid', 'border-dashed', 'border-dotted', 'border-none', 'border-double'],
'border_width': ['border', 'border-0', 'border-2', 'border-4', 'border-8'],
'border_width-t': ['border-t', 'border-t-0', 'border-t-2', 'border-t-4', 'border-t-8'],
'border_width-r': ['border-r', 'border-r-0', 'border-r-2', 'border-r-4', 'border-r-8'],
'border_width-b': ['border-b', 'border-b-0', 'border-b-2', 'border-b-4', 'border-b-8'],
'border_width-l': ['border-l', 'border-l-0', 'border-l-2', 'border-l-4', 'border-l-8'],
'border-radius': ['rounded-none', 'rounded-sm', 'rounded', 'rounded-lg', 'rounded-full'],
'border-radius-t': ['rounded-t-none', 'rounded-t-sm', 'rounded-t', 'rounded-t-lg', 'rounded-t-full'],
'border-radius-r': ['rounded-r-none', 'rounded-r-sm', 'rounded-r', 'rounded-r-lg', 'rounded-r-full'],
'border-radius-b': ['rounded-b-none', 'rounded-b-sm', 'rounded-b', 'rounded-b-lg', 'rounded-b-full'],
'border-radius-l': ['rounded-l-none', 'rounded-l-sm', 'rounded-l', 'rounded-l-lg', 'rounded-l-full'],
'border-radius-tl': ['rounded-tl-none', 'rounded-tl-lg', 'rounded-tl', 'rounded-tl-full',
'rounded-tl-sm'],
'border-radius-tr': ['rounded-tr-none', 'rounded-tr-lg', 'rounded-tr-sm', 'rounded-tr',
'rounded-tr-full'],
'border-radius-br': ['rounded-br-none', 'rounded-br-sm', 'rounded-br', 'rounded-br-lg',
'rounded-br-full'],
'border-radius-bl': ['rounded-bl-none', 'rounded-bl-sm', 'rounded-bl', 'rounded-bl-lg',
'rounded-bl-full'],
'placeholder_color': ['placeholder-transparent', 'placeholder-current', 'placeholder-black',
'placeholder-white',
'placeholder-gray-100', 'placeholder-gray-200', 'placeholder-gray-300',
'placeholder-gray-400', 'placeholder-gray-500',
'placeholder-gray-600', 'placeholder-gray-700', 'placeholder-gray-800',
'placeholder-gray-900', 'placeholder-red-100',
'placeholder-red-200', 'placeholder-red-300', 'placeholder-red-400',
'placeholder-red-500', 'placeholder-red-600',
'placeholder-red-700', 'placeholder-red-800', 'placeholder-red-900',
'placeholder-orange-100', 'placeholder-orange-200',
'placeholder-orange-300', 'placeholder-orange-400', 'placeholder-orange-500',
'placeholder-orange-600', 'placeholder-orange-700',
'placeholder-orange-800', 'placeholder-orange-900', 'placeholder-yellow-100',
'placeholder-yellow-200', 'placeholder-yellow-300',
'placeholder-yellow-400', 'placeholder-yellow-500', 'placeholder-yellow-600',
'placeholder-yellow-700', 'placeholder-yellow-800',
'placeholder-yellow-900', 'placeholder-green-100', 'placeholder-green-200',
'placeholder-green-300', 'placeholder-green-400',
'placeholder-green-500', 'placeholder-green-600', 'placeholder-green-700',
'placeholder-green-800', 'placeholder-green-900',
'placeholder-teal-100', 'placeholder-teal-200', 'placeholder-teal-300',
'placeholder-teal-400', 'placeholder-teal-500',
'placeholder-teal-600', 'placeholder-teal-700', 'placeholder-teal-800',
'placeholder-teal-900', 'placeholder-blue-100',
'placeholder-blue-200', 'placeholder-blue-300', 'placeholder-blue-400',
'placeholder-blue-500', 'placeholder-blue-600',
'placeholder-blue-700', 'placeholder-blue-800', 'placeholder-blue-900',
'placeholder-indigo-100', 'placeholder-indigo-200',
'placeholder-indigo-300', 'placeholder-indigo-400', 'placeholder-indigo-500',
'placeholder-indigo-600', 'placeholder-indigo-700',
'placeholder-indigo-800', 'placeholder-indigo-900', 'placeholder-purple-100',
'placeholder-purple-200', 'placeholder-purple-300',
'placeholder-purple-400', 'placeholder-purple-500', 'placeholder-purple-600',
'placeholder-purple-700', 'placeholder-purple-800',
'placeholder-purple-900', 'placeholder-pink-100', 'placeholder-pink-200',
'placeholder-pink-300', 'placeholder-pink-400',
'placeholder-pink-500', 'placeholder-pink-600', 'placeholder-pink-700',
'placeholder-pink-800', 'placeholder-pink-900'],
'flex_direction': ['flex-row', 'flex-row-reverse', 'flex-col', 'flex-col-reverse'],
'flex_wrap': ['flex-no-wrap', 'flex-wrap', 'flex-wrap-reverse'],
'align_items': ['items-stretch', 'items-start', 'items-center', 'items-end', 'items-baseline'],
'align_content': ['content-start', 'content-center', 'content-end', 'content-between', 'content-around'],
'align_self': ['self-auto', 'self-start', 'self-center', 'self-end', 'self-stretch'],
'justify_content': ['justify-start', 'justify-center', 'justify-end', 'justify-between',
'justify-around'],
'flex': ['flex-initial', 'flex-1', 'flex-auto', 'flex-none'], 'flex_grow': ['flex-grow', 'flex-grow-0'],
'flex_shrink': ['flex-shrink', 'flex-shrink-0'],
'order': ['order-first', 'order-last', 'order-none', 'order-1', 'order-2', 'order-3', 'order-4',
'order-5',
'order-6', 'order-7', 'order-8', 'order-9', 'order-10', 'order-11', 'order-12'],
'padding': ['p-0', 'p-1', 'p-2', 'p-3', 'p-4', 'p-5', 'p-6', 'p-8', 'p-10', 'p-12', 'p-16', 'p-20',
'p-24', 'p-32', 'p-40', 'p-48', 'p-56', 'p-64', 'p-px'],
'padding-y':
['py-0', 'py-1', 'py-2', 'py-3', 'py-4', 'py-5', 'py-6', 'py-8', 'py-10', 'py-12', 'py-16', 'py-20',
'py-24', 'py-32', 'py-40', 'py-48', 'py-56', 'py-64', 'py-px'],
'padding-x':
['px-0', 'px-1', 'px-2', 'px-3', 'px-4', 'px-5', 'px-6', 'px-8', 'px-10',
'px-12', 'px-16', 'px-20', 'px-24', 'px-32', 'px-40', 'px-48', 'px-56', 'px-64', 'px-px'],
'padding-t':
['pt-0', 'pt-1', 'pt-2', 'pt-3', 'pt-4', 'pt-5', 'pt-6', 'pt-8', 'pt-10', 'pt-12', 'pt-16', 'pt-20',
'pt-24', 'pt-32', 'pt-40', 'pt-48', 'pt-56', 'pt-64', 'pt-px'],
'padding-r':
['pr-0', 'pr-1', 'pr-2', 'pr-3', 'pr-4', 'pr-5', 'pr-6', 'pr-8', 'pr-10', 'pr-12', 'pr-16', 'pr-20',
'pr-24', 'pr-32', 'pr-40', 'pr-48', 'pr-56', 'pr-64', 'pr-px'],
'padding-b':
['pb-0', 'pb-1', 'pb-2', 'pb-3', 'pb-4', 'pb-5', 'pb-6', 'pb-8', 'pb-10', 'pb-12', 'pb-16', 'pb-20',
'pb-24', 'pb-32', 'pb-40', 'pb-48', 'pb-56', 'pb-64', 'pb-px'],
'padding-l':
['pl-0', 'pl-1', 'pl-2', 'pl-3', 'pl-4', 'pl-5', 'pl-6', 'pl-8', 'pl-10', 'pl-12',
'pl-16', 'pl-20', 'pl-24', 'pl-32', 'pl-40', 'pl-48', 'pl-56', 'pl-64', 'pl-px'],
'margin': ['m-0', 'm-1', 'm-2', 'm-3', 'm-4', 'm-5', 'm-6', 'm-8', 'm-10', 'm-12', 'm-16', 'm-20',
'm-24', 'm-32', 'm-40', 'm-48', 'm-56', 'm-64', 'm-auto', 'm-px', '-m-1', '-m-2', '-m-3',
'-m-4', '-m-5', '-m-6', '-m-8', '-m-10', '-m-12', '-m-16', '-m-20', '-m-24', '-m-32', '-m-40',
'-m-48', '-m-56', '-m-64', '-m-px'],
'margin-y':
['my-0', 'my-1', 'my-2', 'my-3', 'my-4', 'my-5', 'my-6', 'my-8', 'my-10',
'my-12', 'my-16', 'my-20', 'my-24', 'my-32', 'my-40', 'my-48', 'my-56', 'my-64', 'my-auto',
'my-px', '-my-1', '-my-2', '-my-3', '-my-4', '-my-5', '-my-6', '-my-8', '-my-10', '-my-12',
'-my-16', '-my-20', '-my-24', '-my-32', '-my-40', '-my-48', '-my-56', '-my-64', '-my-px'],
'margin-x':
['mx-0', 'mx-1', 'mx-2', 'mx-3', 'mx-4', 'mx-5', 'mx-6', 'mx-8', 'mx-10', 'mx-12', 'mx-16', 'mx-20',
'mx-24', 'mx-32', 'mx-40', 'mx-48', 'mx-56', 'mx-64', 'mx-auto', 'mx-px', '-mx-1', '-mx-2',
'-mx-3', '-mx-4', '-mx-5', '-mx-6', '-mx-8', '-mx-10', '-mx-12', '-mx-16', '-mx-20', '-mx-24',
'-mx-32', '-mx-40', '-mx-48', '-mx-56', '-mx-64', '-mx-px'],
'margin-t':
['mt-0', 'mt-1', 'mt-2', 'mt-3', 'mt-4', 'mt-5', 'mt-6', 'mt-8', 'mt-10', 'mt-12', 'mt-16', 'mt-20',
'mt-24', 'mt-32', 'mt-40', 'mt-48', 'mt-56', 'mt-64', 'mt-auto', 'mt-px', '-mt-1', '-mt-2', '-mt-3',
'-mt-4', '-mt-5', '-mt-6', '-mt-8', '-mt-10', '-mt-12', '-mt-16', '-mt-20', '-mt-24', '-mt-32',
'-mt-40', '-mt-48', '-mt-56', '-mt-64', '-mt-px'],
'margin-r':
['mr-0', 'mr-1', 'mr-2', 'mr-3', 'mr-4', 'mr-5', 'mr-6', 'mr-8', 'mr-10', 'mr-12', 'mr-16', 'mr-20',
'mr-24', 'mr-32', 'mr-40', 'mr-48', 'mr-56', 'mr-64', 'mr-auto', 'mr-px', '-mr-1', '-mr-2', '-mr-3',
'-mr-4', '-mr-5', '-mr-6', '-mr-8', '-mr-10', '-mr-12', '-mr-16', '-mr-20', '-mr-24', '-mr-32',
'-mr-40', '-mr-48', '-mr-56', '-mr-64', '-mr-px'],
'margin-b':
['mb-0', 'mb-1', 'mb-2', 'mb-3', 'mb-4', 'mb-5', 'mb-6', 'mb-8', 'mb-10', 'mb-12',
'mb-16', 'mb-20', 'mb-24', 'mb-32', 'mb-40', 'mb-48', 'mb-56', 'mb-64', 'mb-auto', 'mb-px',
'-mb-1', '-mb-2', '-mb-3', '-mb-4', '-mb-5', '-mb-6', '-mb-8', '-mb-10', '-mb-12', '-mb-16',
'-mb-20', '-mb-24', '-mb-32', '-mb-40', '-mb-48', '-mb-56', '-mb-64', '-mb-px'],
'margin-l':
['ml-0', 'ml-1',
'ml-2', 'ml-3', 'ml-4', 'ml-5', 'ml-6', 'ml-8', 'ml-10', 'ml-12', 'ml-16', 'ml-20', 'ml-24',
'ml-32', 'ml-40', 'ml-48', 'ml-56', 'ml-64', 'ml-auto', 'ml-px', '-ml-1', '-ml-2', '-ml-3',
'-ml-4', '-ml-5', '-ml-6', '-ml-8', '-ml-10', '-ml-12', '-ml-16', '-ml-20', '-ml-24', '-ml-32',
'-ml-40', '-ml-48', '-ml-56', '-ml-64', '-ml-px'],
'width': ['w-0', 'w-1', 'w-2', 'w-3', 'w-4', 'w-5', 'w-6', 'w-8', 'w-10', 'w-12', 'w-16', 'w-20', 'w-24',
'w-32', 'w-40', 'w-48', 'w-56', 'w-64', 'w-auto', 'w-px', 'w-1/2', 'w-1/3', 'w-2/3', 'w-1/4',
'w-2/4', 'w-3/4', 'w-1/5', 'w-2/5', 'w-3/5', 'w-4/5', 'w-1/6', 'w-2/6', 'w-3/6', 'w-4/6',
'w-5/6',
'w-1/12', 'w-2/12', 'w-3/12', 'w-4/12', 'w-5/12', 'w-6/12', 'w-7/12', 'w-8/12', 'w-9/12',
'w-10/12', 'w-11/12', 'w-full', 'w-screen'],
'min_width': ['min-w-0', 'min-w-full'],
'max_width': ['max-w-xs', 'max-w-sm', 'max-w-md', 'max-w-lg', 'max-w-xl', 'max-w-2xl', 'max-w-3xl',
'max-w-4xl', 'max-w-5xl', 'max-w-6xl', 'max-w-full', 'max-w-screen-sm', 'max-w-screen-md',
'max-w-screen-lg', 'max-w-screen-xl', 'max-w-none'],
'height': ['h-0', 'h-1', 'h-2', 'h-3', 'h-4', 'h-5', 'h-6', 'h-8', 'h-10', 'h-12', 'h-16', 'h-20',
'h-24',
'h-32', 'h-40', 'h-48', 'h-56', 'h-64', 'h-auto', 'h-px', 'h-full', 'h-screen'],
'min_height': ['min-h-0', 'min-h-full', 'min-h-screen'], 'max_height': ['max-h-full', 'max-h-screen'],
'border_collapse': ['border-collapse', 'border-separate'], 'table_layout': ['table-auto', 'table-fixed'],
'box_shadow': ['shadow-xs', 'shadow-sm', 'shadow', 'shadow-md', 'shadow-lg', 'shadow-xl', 'shadow-2xl',
'shadow-inner', 'shadow-outline', 'shadow-none'],
'opacity': ['opacity-100', 'opacity-75', 'opacity-50', 'opacity-25', 'opacity-0'],
'appearance': ['appearance-none'],
'cursor': ['cursor-auto', 'cursor-default', 'cursor-pointer', 'cursor-wait', 'cursor-text',
'cursor-move',
'cursor-not-allowed'], 'outline': ['outline-none'],
'pointer_events': ['pointer-events-none', 'pointer-events-auto'],
'resize': ['resize-none', 'resize', 'resize-y', 'resize-x'],
'user_select': ['select-none', 'select-text', 'select-all', 'select-auto'], 'fill': ['fill-current'],
'stroke': ['stroke-current'],
'transition': ['transition-none', 'transition-all', 'transition', 'transition-colors',
'transition-opacity', 'transition-shadow', 'transition-transform'],
'duration': ['duration-75', 'duration-100', 'duration-150', 'duration-200', 'duration-300',
'duration-500', 'duration-700', 'duration-1000'],
'transition_timing': ['ease-linear', 'ease-in', 'ease-out', 'ease-in-out'],
'transition_delay': ['delay-75', 'delay-100', 'delay-150', 'delay-200', 'delay-300', 'delay-500',
'delay-700', 'delay-1000'],
'scale': ['scale-0', 'scale-50', 'scale-75', 'scale-90', 'scale-95', 'scale-100', 'scale-105',
'scale-110', 'scale-125', 'scale-150',
'scale-y-0', 'scale-y-50', 'scale-y-75', 'scale-y-90', 'scale-y-95', 'scale-y-100',
'scale-y-105', 'scale-y-110', 'scale-y-125', 'scale-y-150'],
'rotate': ['rotate-0', 'rotate-45', 'rotate-90', 'rotate-180', '-rotate-180', '-rotate-90',
'-rotate-45'],
'skew': ['skew-x-0', 'skew-x-3', 'skew-x-6', 'skew-x-12', '-skew-x-12', '-skew-x-6', '-skew-x-3',
'skew-y-0', 'skew-y-3', 'skew-y-6', 'skew-y-12', '-skew-y-12', '-skew-y-6', '-skew-y-3'],
'transform_origin': ['origin-center', 'origin-top', 'origin-top-right', 'origin-right',
'origin-bottom-right', 'origin-bottom', 'origin-bottom-left', 'origin-left',
'origin-top-left'],
'translate': ['translate-x-0', 'translate-x-1', 'translate-x-2', 'translate-x-3', 'translate-x-4',
'translate-x-5', 'translate-x-6', 'translate-x-8', 'translate-x-10', 'translate-x-12',
'translate-x-16', 'translate-x-20', 'translate-x-24', 'translate-x-32', 'translate-x-40',
'translate-x-48', 'translate-x-56', 'translate-x-64', 'translate-x-px', '-translate-x-1',
'-translate-x-2', '-translate-x-3', '-translate-x-4', '-translate-x-5', '-translate-x-6',
'-translate-x-8', '-translate-x-10', '-translate-x-12', '-translate-x-16',
'-translate-x-20', '-translate-x-24', '-translate-x-32', '-translate-x-40',
'-translate-x-48', '-translate-x-56', '-translate-x-64', '-translate-x-px',
'-translate-x-full', '-translate-x-1/2', 'translate-x-1/2', 'translate-x-full',
'translate-y-0', 'translate-y-1', 'translate-y-2', 'translate-y-3', 'translate-y-4',
'translate-y-5', 'translate-y-6', 'translate-y-8', 'translate-y-10', 'translate-y-12',
'translate-y-16', 'translate-y-20', 'translate-y-24', 'translate-y-32', 'translate-y-40',
'translate-y-48', 'translate-y-56', 'translate-y-64', 'translate-y-px', '-translate-y-1',
'-translate-y-2', '-translate-y-3', '-translate-y-4', '-translate-y-5', '-translate-y-6',
'-translate-y-8', '-translate-y-10', '-translate-y-12', '-translate-y-16',
'-translate-y-20', '-translate-y-24', '-translate-y-32', '-translate-y-40',
'-translate-y-48', '-translate-y-56', '-translate-y-64', '-translate-y-px',
'-translate-y-full', '-translate-y-1/2', 'translate-y-1/2', 'translate-y-full'],
'divide_width': ['divide-x-0', 'divide-x-2', 'divide-x-4', 'divide-x-8', 'divide-x', 'divide-y-0',
'divide-y-2', 'divide-y-4', 'divide-y-8', 'divide-y', 'divide-x-reverse',
'divide-y-reverse'],
'divide_color': ['divide-transparent', 'divide-current', 'divide-black', 'divide-white',
'divide-gray-100', 'divide-gray-200', 'divide-gray-300', 'divide-gray-400',
'divide-gray-500', 'divide-gray-600', 'divide-gray-700', 'divide-gray-800',
'divide-gray-900', 'divide-red-100', 'divide-red-200', 'divide-red-300',
'divide-red-400', 'divide-red-500', 'divide-red-600', 'divide-red-700',
'divide-red-800', 'divide-red-900', 'divide-orange-100', 'divide-orange-200',
'divide-orange-300', 'divide-orange-400', 'divide-orange-500', 'divide-orange-600',
'divide-orange-700', 'divide-orange-800', 'divide-orange-900', 'divide-yellow-100',
'divide-yellow-200', 'divide-yellow-300', 'divide-yellow-400', 'divide-yellow-500',
'divide-yellow-600', 'divide-yellow-700', 'divide-yellow-800', 'divide-yellow-900',
'divide-green-100', 'divide-green-200', 'divide-green-300', 'divide-green-400',
'divide-green-500', 'divide-green-600', 'divide-green-700', 'divide-green-800',
'divide-green-900', 'divide-teal-100', 'divide-teal-200', 'divide-teal-300',
'divide-teal-400', 'divide-teal-500', 'divide-teal-600', 'divide-teal-700',
'divide-teal-800', 'divide-teal-900', 'divide-blue-100', 'divide-blue-200',
'divide-blue-300', 'divide-blue-400', 'divide-blue-500', 'divide-blue-600',
'divide-blue-700', 'divide-blue-800', 'divide-blue-900', 'divide-indigo-100',
'divide-indigo-200', 'divide-indigo-300', 'divide-indigo-400', 'divide-indigo-500',
'divide-indigo-600', 'divide-indigo-700', 'divide-indigo-800', 'divide-indigo-900',
'divide-purple-100', 'divide-purple-200', 'divide-purple-300', 'divide-purple-400',
'divide-purple-500', 'divide-purple-600', 'divide-purple-700', 'divide-purple-800',
'divide-purple-900', 'divide-pink-100', 'divide-pink-200', 'divide-pink-300',
'divide-pink-400', 'divide-pink-500', 'divide-pink-600', 'divide-pink-700',
'divide-pink-800', 'divide-pink-900'],
'divide_opacity': ['divide-opacity-0', 'divide-opacity-25', 'divide-opacity-50', 'divide-opacity-75',
'divide-opacity-100'],
'grid_template_cols': ['grid-cols-1', 'grid-cols-2', 'grid-cols-3', 'grid-cols-4', 'grid-cols-5',
'grid-cols-6', 'grid-cols-7', 'grid-cols-8', 'grid-cols-9', 'grid-cols-10',
'grid-cols-11', 'grid-cols-12'],
'grid_col_start_end': ['col-auto', 'col-span-1', 'col-span-2', 'col-span-3', 'col-span-4', 'col-span-5',
'col-span-6', 'col-span-7', 'col-span-8', 'col-span-9', 'col-span-10',
'col-span-11'],
'grid_template_rows': ['grid-rows-1', 'grid-rows-2', 'grid-rows-3', 'grid-rows-4', 'grid-rows-5',
'grid-rows-6', 'grid-rows-none'],
'grid_row_start_end': ['row-auto', 'row-span-1', 'row-span-2', 'row-span-3', 'row-span-4', 'row-span-5',
'row-span-6', 'row-start-1', 'row-start-2', 'row-start-3', 'row-start-4',
'row-start-5', 'row-start-6', 'row-start-7', 'row-start-auto', 'row-end-1',
'row-end-2', 'row-end-3', 'row-end-4', 'row-end-5', 'row-end-6', 'row-end-7',
'row-end-auto'],
'grid_gap': ['gap-0', 'gap-1', 'gap-2', 'gap-3', 'gap-4', 'gap-5', 'gap-6', 'gap-8', 'gap-10', 'gap-12',
'gap-16', 'gap-20', 'gap-24', 'gap-32', 'gap-40', 'gap-48', 'gap-56', 'gap-64', 'gap-px',
'row-gap-0', 'row-gap-1', 'row-gap-2', 'row-gap-3', 'row-gap-4', 'row-gap-5', 'row-gap-6',
'row-gap-8', 'row-gap-10', 'row-gap-12', 'row-gap-16', 'row-gap-20', 'row-gap-24',
'row-gap-32', 'row-gap-40', 'row-gap-48', 'row-gap-56', 'row-gap-64', 'row-gap-px',
'col-gap-0', 'col-gap-1', 'col-gap-2', 'col-gap-3', 'col-gap-4', 'col-gap-5', 'col-gap-6',
'col-gap-8', 'col-gap-10', 'col-gap-12', 'col-gap-16', 'col-gap-20', 'col-gap-24',
'col-gap-32', 'col-gap-40', 'col-gap-48', 'col-gap-56', 'col-gap-64', 'col-gap-px'],
'grid_auto_flow': ['grid-flow-row', 'grid-flow-col', 'grid-flow-row-dense', 'grid-flow-col-dense'],
'space_between': ['space-x-0', 'space-x-1', 'space-x-2', 'space-x-3', 'space-x-4', 'space-x-5',
'space-x-6', 'space-x-8', 'space-x-10', 'space-x-12', 'space-x-16', 'space-x-20',
'space-x-24', 'space-x-32', 'space-x-40', 'space-x-48', 'space-x-56', 'space-x-64',
'space-x-px', '-space-x-1', '-space-x-2', '-space-x-3', '-space-x-4', '-space-x-5',
'-space-x-6', '-space-x-8', '-space-x-10', '-space-x-12', '-space-x-16', '-space-x-20',
'-space-x-24', '-space-x-32', '-space-x-40', '-space-x-48', '-space-x-56',
'-space-x-64', '-space-x-px', 'space-y-0', 'space-y-1', 'space-y-2', 'space-y-3',
'space-y-4', 'space-y-5', 'space-y-6', 'space-y-8', 'space-y-10', 'space-y-12',
'space-y-16', 'space-y-20', 'space-y-24', 'space-y-32', 'space-y-40', 'space-y-48',
'space-y-56', 'space-y-64', 'space-y-px', '-space-y-1', '-space-y-2', '-space-y-3',
'-space-y-4', '-space-y-5', '-space-y-6', '-space-y-8', '-space-y-10', '-space-y-12',
'-space-y-16', '-space-y-20', '-space-y-24', '-space-y-32', '-space-y-40',
'-space-y-48', '-space-y-56', '-space-y-64', '-space-y-px', 'space-x-reverse',
'space-y-reverse'],
'animation': ['animate-none', 'animate-spin', 'animate-ping', 'animate-pulse', 'animate-bounce']
}
@staticmethod
def create_reverse_dict(tw):
d = {}
for k, v in tw.items():
for j in v:
d[j] = k
return d
tw_reverse_dict = create_reverse_dict.__func__(tw_dict)
def set_class(self, tw_class, modifier=''):
if modifier and modifier not in Tailwind.pseudo_classes:
raise Exception(f'No Tailwind pseudo-class (modifier) named {modifier}')
if tw_class not in Tailwind.tw_reverse_dict:
raise Exception(f'No Tailwind class named {tw_class}')
class_list = self.classes.split()
if not modifier:
for i in class_list:
if i in Tailwind.tw_dict[Tailwind.tw_reverse_dict[tw_class]]:
class_list.remove(i)
class_list.append(tw_class)
else:
tw_dict_modified = [f'{modifier}:' + i for i in Tailwind.tw_dict[Tailwind.tw_reverse_dict[tw_class]]]
tw_class_modified = f'{modifier}:{tw_class}'
for i in class_list:
if i in tw_dict_modified:
class_list.remove(i)
class_list.append(tw_class_modified)
self.classes = ' '.join(class_list)
return self.classes
def set_classes(self, class_list):
# Takes a string of tailwind classes and sets them all
for c in class_list.split():
c = c.split(':')
if len(c) > 1:
self.set_class(c[1], c[0])
else:
self.set_class(c[0])
| 1,360 | 37,373 | 23 |
afb045246c03689676286648d41b4087ca091d9b | 776 | py | Python | BettingRestAPI/csgo_api/migrations/0004_auto_20200616_1606.py | PatrickKoss/BettingPrediction | 8082bb89d00d28ade774a445a1645dc07ac86127 | [
"MIT"
] | null | null | null | BettingRestAPI/csgo_api/migrations/0004_auto_20200616_1606.py | PatrickKoss/BettingPrediction | 8082bb89d00d28ade774a445a1645dc07ac86127 | [
"MIT"
] | null | null | null | BettingRestAPI/csgo_api/migrations/0004_auto_20200616_1606.py | PatrickKoss/BettingPrediction | 8082bb89d00d28ade774a445a1645dc07ac86127 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-06-16 14:06
from django.db import migrations, models
| 26.758621 | 82 | 0.585052 | # Generated by Django 3.0.2 on 2020-06-16 14:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('csgo_api', '0003_matchresult'),
]
operations = [
migrations.AddField(
model_name='match',
name='mode',
field=models.CharField(default='bo1', max_length=20),
),
migrations.AddField(
model_name='match',
name='team_1_confidence',
field=models.DecimalField(decimal_places=2, default=1, max_digits=10),
),
migrations.AddField(
model_name='match',
name='team_2_confidence',
field=models.DecimalField(decimal_places=2, default=1, max_digits=10),
),
]
| 0 | 662 | 23 |
3976a2b86f0da08d18bae2e23750f9dbfd07df4f | 1,095 | py | Python | test/fixtures/after/nodes_with_global_param.launch.py | aws-robotics/ros2-launch-file-migrator | 36cfc5ed3bdc6ba4ed64cabb1ceef23c3dc33502 | [
"Apache-2.0"
] | 24 | 2019-12-19T16:00:48.000Z | 2021-12-26T12:06:48.000Z | test/fixtures/after/nodes_with_global_param.launch.py | aws-robotics/ros2-launch-file-migrator | 36cfc5ed3bdc6ba4ed64cabb1ceef23c3dc33502 | [
"Apache-2.0"
] | 2 | 2020-07-07T09:11:26.000Z | 2022-01-21T15:35:45.000Z | test/fixtures/after/nodes_with_global_param.launch.py | aws-robotics/ros2-launch-file-migrator | 36cfc5ed3bdc6ba4ed64cabb1ceef23c3dc33502 | [
"Apache-2.0"
] | 11 | 2020-07-07T09:24:58.000Z | 2021-10-30T02:54:57.000Z | import os
import sys
import launch
import launch_ros.actions
if __name__ == '__main__':
generate_launch_description()
| 26.071429 | 92 | 0.550685 | import os
import sys
import launch
import launch_ros.actions
def generate_launch_description():
ld = launch.LaunchDescription([
launch.actions.DeclareLaunchArgument(
name='use_sim_time',
default_value='true'
),
launch_ros.actions.Node(
package='python_launcher',
node_executable='run_consume_memory.sh',
node_name='consume_memory',
output='screen',
on_exit=launch.actions.Shutdown(),
parameters=[
{
'use_sim_time': launch.substitutions.LaunchConfiguration('use_sim_time')
}
]
),
launch_ros.actions.Node(
package='python_launcher',
node_executable='run_cpu_check.sh',
node_name='cpu_check',
parameters=[
{
'use_sim_time': launch.substitutions.LaunchConfiguration('use_sim_time')
}
]
)
])
return ld
if __name__ == '__main__':
generate_launch_description()
| 946 | 0 | 23 |
9103fff44b143b91e4d817b027ea2bcadaecc18c | 12,232 | py | Python | functions/slack/blocks.py | harvard-dce/zoom-ingester | ef5c8910e35e84d33f1c612cfb4643309c52040d | [
"Apache-2.0"
] | null | null | null | functions/slack/blocks.py | harvard-dce/zoom-ingester | ef5c8910e35e84d33f1c612cfb4643309c52040d | [
"Apache-2.0"
] | null | null | null | functions/slack/blocks.py | harvard-dce/zoom-ingester | ef5c8910e35e84d33f1c612cfb4643309c52040d | [
"Apache-2.0"
] | null | null | null | from os import getenv as env
from urllib.parse import quote
from datetime import datetime
from pytz import timezone
import json
from utils import (
TIMESTAMP_FORMAT,
retrieve_schedule,
schedule_days,
schedule_match,
PipelineStatus,
ZoomStatus,
)
import logging
logger = logging.getLogger()
STACK_NAME = env("STACK_NAME")
PRETTY_TIMESTAMP_FORMAT = "%A, %B %d, %Y at %-I:%M%p"
SHORT_TIMESTAMP_FORMAT = "%m/%d/%y %-I:%M%p"
LOCAL_TIME_ZONE = env("LOCAL_TIME_ZONE")
OC_CLUSTER_NAME = env("OC_CLUSTER_NAME")
# Slack places an upper limit of 50 UI blocks per message
# so we must limit the number of records per message
# Should be a multiple of RESULTS_PER_REQUEST
MAX_RECORDS_PER_MSG = 6
RESULTS_PER_REQUEST = 2
"""
Slack results blocks
"""
"""
Helpers
"""
"""
Status descriptions
"""
| 28.579439 | 124 | 0.557472 | from os import getenv as env
from urllib.parse import quote
from datetime import datetime
from pytz import timezone
import json
from utils import (
TIMESTAMP_FORMAT,
retrieve_schedule,
schedule_days,
schedule_match,
PipelineStatus,
ZoomStatus,
)
import logging
logger = logging.getLogger()
STACK_NAME = env("STACK_NAME")
PRETTY_TIMESTAMP_FORMAT = "%A, %B %d, %Y at %-I:%M%p"
SHORT_TIMESTAMP_FORMAT = "%m/%d/%y %-I:%M%p"
LOCAL_TIME_ZONE = env("LOCAL_TIME_ZONE")
OC_CLUSTER_NAME = env("OC_CLUSTER_NAME")
# Slack places an upper limit of 50 UI blocks per message
# so we must limit the number of records per message
# Should be a multiple of RESULTS_PER_REQUEST
MAX_RECORDS_PER_MSG = 6
RESULTS_PER_REQUEST = 2
def slack_help_menu_blocks(cmd):
return [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "These are the available ZIP commands:",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f">`{cmd} [Zoom meeting ID]` See the status of the latest ZIP ingests with the specified Zoom MID.",
},
},
]
def slack_results_blocks(
mid,
meeting_status_data,
newest_start_time=None,
start_index=0,
max_results=RESULTS_PER_REQUEST,
interaction=False,
):
header_blocks = []
metadata_blocks = []
ingest_detail_blocks = []
footer_blocks = []
schedule = retrieve_schedule(mid)
logger.warning(schedule)
events, opencast_mapping = format_schedule_details(schedule, mid)
# Beginning of a search, include meeting metadata header
if start_index == 0:
if meeting_status_data:
topic = meeting_status_data["topic"]
elif schedule:
topic = f"{schedule['course_code']} Zoom Meeting"
else:
topic = f"Zoom Meeting {format_mid(mid)}"
header_blocks = slack_results_header(topic)
metadata_blocks = slack_results_metadata(
meeting_status_data,
mid,
schedule,
opencast_mapping,
events,
)
if not meeting_status_data:
ingest_detail_blocks = [
{"type": "divider"},
{
"type": "section",
"text": {
"type": "plain_text",
"text": "No recent recordings found.",
},
},
]
return header_blocks + metadata_blocks + ingest_detail_blocks
recordings = sorted_filtered_recordings(
meeting_status_data["recordings"],
newest_start_time,
)
# limit amount and range of results
more_results = len(recordings) > start_index + max_results
recordings = recordings[start_index : start_index + max_results]
for rec in recordings:
ingest_detail_blocks += ingest_details(rec, schedule)
footer_blocks = []
if more_results:
start_time = recordings[0]["start_time"]
footer_blocks = more_results_button_blocks(
recordings,
mid,
start_time,
start_index,
)
elif interaction:
footer_blocks = [
{"type": "divider"},
{
"type": "section",
"text": {"type": "plain_text", "text": "End of results."},
},
]
blocks = (
header_blocks + metadata_blocks + ingest_detail_blocks + footer_blocks
)
return blocks
"""
Slack results blocks
"""
def slack_results_header(topic):
return [
{
"type": "header",
"text": {"type": "plain_text", "text": f"{topic}"},
},
{
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Source: {STACK_NAME}",
}
],
},
]
def slack_results_metadata(
meeting_status,
mid,
schedule,
opencast_mapping,
events,
):
blocks = []
if meeting_status or schedule:
blocks.append(
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*Zoom Meeting ID:* {format_mid(mid)} {opencast_mapping}",
},
}
)
blocks.append(
{"type": "section", "text": {"type": "mrkdwn", "text": events}}
)
return blocks
def ingest_details(rec, schedule):
pretty_start_time = formatted_local_time(rec["start_time"])
match = schedule_match(schedule, local_time(rec["start_time"]))
recordings_ready = True
if len(rec["zip_ingests"]) == 1:
recording_status_txt = recording_status_description(
rec["zip_ingests"][0]
)
if recording_status_txt:
recordings_ready = False
if recordings_ready:
ingest_details = ""
# Sort ingests from most to least recent
ingests = sorted(
rec["zip_ingests"],
key=lambda r: r["last_updated"],
reverse=True,
)
for ingest in ingests:
update_time = formatted_local_time(
ingest["last_updated"],
format=SHORT_TIMESTAMP_FORMAT,
)
on_demand = ingest["origin"] == "on_demand"
if "ingest_request_time" in ingest:
request_time = formatted_local_time(
ingest["ingest_request_time"]
)
else:
logger.warning(
f"Ingest missing ingest_request_time field: {ingest}"
)
request_time = "[unknown]"
if on_demand:
ingest_details += f"*+Zoom Ingest on {request_time}*\n"
else:
ingest_details += f"*Automated Ingest on {request_time}*\n"
ingest_details += f"> Status: {pipeline_status_description(ingest, on_demand, match)} (updated {update_time})\n"
if "oc_series_id" in ingest and on_demand:
ingest_details += f"> :arrow_right: Opencast Series: {ingest['oc_series_id']}\n"
else:
ingest_details = f"*Status* : {recording_status_txt}"
blocks = [
{"type": "divider"},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f":movie_camera: *Recording on {pretty_start_time}*\n",
},
},
{
"type": "section",
"text": {"type": "mrkdwn", "text": ingest_details},
},
]
if recordings_ready:
# Add the link to the processed recordings
mgmt_url = (
"https://zoom.us/recording/management/detail?meeting_id="
f"{quote(rec['recording_id'])}"
)
blocks.append(
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"<{mgmt_url}|*View in Zoom*>",
},
}
)
return blocks
def more_results_button_blocks(recordings, mid, start_time, start_index):
return [
{"type": "divider"},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"emoji": True,
"text": f"Next {RESULTS_PER_REQUEST} Results",
},
"value": json.dumps(
{
"version": 1,
"mid": mid,
"newest_start_time": start_time,
"count": len(recordings),
"start_index": start_index,
}
),
}
],
},
]
"""
Helpers
"""
def format_schedule_details(schedule, mid):
if schedule:
logger.info({"schedule": schedule})
events = ""
for i, event in enumerate(schedule["events"]):
event_time = datetime.strptime(event["time"], "%H:%M").strftime(
"%-I:%M%p"
)
events += f":clock3: {schedule['course_code']} {event['title']} "
events += f"on {schedule_days[event['day']]} at {event_time}"
if i + 1 < len(schedule["events"]):
events += "\n"
opencast_mapping = f":arrow_right: *Opencast Series:* {schedule['opencast_series_id']}"
else:
logger.info(f"No matching schedule for mid {mid}")
events = "This Zoom meeting is not configured for ZIP ingests."
opencast_mapping = ""
return events, opencast_mapping
def sorted_filtered_recordings(recordings, newest_start_time=None):
# filter out newer recordings to keep the search consistent
# as the user requests more results
if newest_start_time:
recordings = list(
filter(lambda r: r["start_time"] <= newest_start_time, recordings)
)
# sort by recording start time
recordings = sorted(
recordings,
key=lambda r: r["start_time"],
reverse=True,
)
return recordings
def local_time(ts):
tz = timezone(LOCAL_TIME_ZONE)
utc = datetime.strptime(ts, TIMESTAMP_FORMAT).replace(
tzinfo=timezone("UTC")
)
return utc.astimezone(tz)
def formatted_local_time(ts, format=PRETTY_TIMESTAMP_FORMAT):
return local_time(ts).strftime(format)
def format_mid(mid):
s = str(mid)
if len(s) < 11:
return f"{s[:3]} {s[3:6]} {s[6:]}"
else:
return f"{s[:3]} {s[3:7]} {s[7:]}"
"""
Status descriptions
"""
def recording_status_description(ingest_details):
status = ingest_details["status"]
if status == ZoomStatus.RECORDING_IN_PROGRESS.name:
status_msg = "Meeting in progress. Currently recording."
elif status == ZoomStatus.RECORDING_PAUSED.name:
status_msg = "Meeting in progress. Recording paused."
elif status == ZoomStatus.RECORDING_STOPPED.name:
status_msg = "Meeting in progress. Recording stopped."
elif status == ZoomStatus.RECORDING_PROCESSING.name:
status_msg = "Meeting finished. Recording files processing in Zoom."
else:
return None
return status_msg
def pipeline_status_description(ingest_details, on_demand=False, match=False):
status = ingest_details["status"]
# Processing
if status == PipelineStatus.ON_DEMAND_RECEIVED.name:
status_msg = "Received +Zoom request."
elif (
status == PipelineStatus.WEBHOOK_RECEIVED.name
or status == PipelineStatus.SENT_TO_DOWNLOADER.name
):
if on_demand:
status_msg = "ZIP received +Zoom request."
elif match:
status_msg = "ZIP received scheduled ingest."
else:
status_msg = "Ignored by ZIP."
# Schedule match
elif status == PipelineStatus.OC_SERIES_FOUND.name:
status_msg = "Downloading files."
# Ingesting
elif status == PipelineStatus.SENT_TO_UPLOADER.name:
status_msg = "Ready to ingest to Opencast"
elif status == PipelineStatus.UPLOADER_RECEIVED.name:
status_msg = "Ingesting to Opencast."
# Success
elif status == PipelineStatus.SENT_TO_OPENCAST.name:
status_msg = ":white_check_mark: Complete. Ingested to Opencast."
# Ignored
elif status == PipelineStatus.IGNORED.name:
status_msg = "Ignored by ZIP."
# Failures
elif status == PipelineStatus.WEBHOOK_FAILED.name:
status_msg = ":exclamation: Error while receiving ZIP request."
elif status == PipelineStatus.DOWNLOADER_FAILED.name:
status_msg = ":exclamation: Failed to download files."
elif status == PipelineStatus.UPLOADER_FAILED.name:
status_msg = f":exclamation: Failed to ingest to Opencast cluster {OC_CLUSTER_NAME}."
else:
status_msg = status
if "reason" in ingest_details:
status_msg += f" {ingest_details['reason']}"
return status_msg
| 11,106 | 0 | 299 |
80cd76881a2897a7c1ed288e554e384c605b5f3f | 513 | py | Python | data/waseem_split.py | mbevila/contextualizing-hate-speech-models-with-explanations | 1ebf0f412da85789378a5980989616f0cf28e4c7 | [
"MIT"
] | null | null | null | data/waseem_split.py | mbevila/contextualizing-hate-speech-models-with-explanations | 1ebf0f412da85789378a5980989616f0cf28e4c7 | [
"MIT"
] | null | null | null | data/waseem_split.py | mbevila/contextualizing-hate-speech-models-with-explanations | 1ebf0f412da85789378a5980989616f0cf28e4c7 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
from argparse import ArgumentParser
import fileinput
parser = ArgumentParser()
parser.add_argument('--kind', choices=['sexism', 'racism'])
args = parser.parse_args()
print('doc_id\ttext\tis_hate')
for i, line in enumerate(fileinput.input('-')):
line = line.strip()
if not line:
continue
_, tweet, cls = line.split('\t')
cls = '1' if cls in (args.kind, 'both') else '0'
print(i, tweet, cls, sep='\t')
| 24.428571 | 63 | 0.580897 |
if __name__ == '__main__':
from argparse import ArgumentParser
import fileinput
parser = ArgumentParser()
parser.add_argument('--kind', choices=['sexism', 'racism'])
args = parser.parse_args()
print('doc_id\ttext\tis_hate')
for i, line in enumerate(fileinput.input('-')):
line = line.strip()
if not line:
continue
_, tweet, cls = line.split('\t')
cls = '1' if cls in (args.kind, 'both') else '0'
print(i, tweet, cls, sep='\t')
| 0 | 0 | 0 |
d36c9647903a766ff6e16663eba33a653ac8c6e3 | 462 | py | Python | sfaira/versions/topologies/mouse/embedding/__init__.py | theislab/sfaira | 77a7b49936047a0cdddc5ace4482186a868c3a7a | [
"BSD-3-Clause"
] | 110 | 2020-09-08T07:47:15.000Z | 2022-03-29T03:33:56.000Z | sfaira/versions/topologies/mouse/embedding/__init__.py | theislab/sfaira | 77a7b49936047a0cdddc5ace4482186a868c3a7a | [
"BSD-3-Clause"
] | 405 | 2020-09-15T15:05:46.000Z | 2022-03-16T14:44:23.000Z | sfaira/versions/topologies/mouse/embedding/__init__.py | theislab/sfaira | 77a7b49936047a0cdddc5ace4482186a868c3a7a | [
"BSD-3-Clause"
] | 20 | 2021-03-30T15:30:14.000Z | 2022-03-07T12:52:58.000Z | from sfaira.versions.topologies.mouse.embedding.ae import AE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.linear import LINEAR_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.nmf import NMF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vae import VAE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaeiaf import VAEIAF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaevamp import VAEVAMP_TOPOLOGIES
| 66 | 81 | 0.883117 | from sfaira.versions.topologies.mouse.embedding.ae import AE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.linear import LINEAR_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.nmf import NMF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vae import VAE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaeiaf import VAEIAF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaevamp import VAEVAMP_TOPOLOGIES
| 0 | 0 | 0 |
802ef265660e2c5b22d357edf57453d1060efda3 | 2,810 | py | Python | app/api_dap2/dap2.py | NCEI-NOAAGov/zarrdap | 5cff730a62d53ba9e6fb59b2925c1efeb883a6bf | [
"NetCDF"
] | 17 | 2022-02-24T22:06:02.000Z | 2022-03-20T13:25:05.000Z | app/api_dap2/dap2.py | NCEI-NOAAGov/zarrdap | 5cff730a62d53ba9e6fb59b2925c1efeb883a6bf | [
"NetCDF"
] | null | null | null | app/api_dap2/dap2.py | NCEI-NOAAGov/zarrdap | 5cff730a62d53ba9e6fb59b2925c1efeb883a6bf | [
"NetCDF"
] | 3 | 2022-02-24T22:06:09.000Z | 2022-03-20T13:26:04.000Z | from fastapi import APIRouter, Query, Response, Request
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
import json
from pydantic import BaseModel
import s3fs
from typing import Optional, TypedDict
from .backend import *
router = APIRouter()
s3 = s3fs.S3FileSystem(anon=False, client_kwargs={"region_name": "us-east-1"})
HEADERS = {
"XDODS-Server": "opendap/3.7",
"Accept-Ranges": "bytes",
"Connection": "close"
}
@router.get("/parameters", tags=["dap"], description="Query a dataset's properties",
summary="parameters", response_model=DapParameterResponse,
responses={200: {"content": {"application/json": {}}, "description": "Successful Response"}})
@router.get("/{path:path}.das", tags=["dap"], description="Request a DAS response", summary="DAS")
@router.get("/{path:path}.dds", tags=["dap"], description="Request a DDS response", summary="DDS")
@router.get("/{path:path}.dods", tags=["dap"], description="Request a binary response", summary="DODS")
| 37.972973 | 106 | 0.702491 | from fastapi import APIRouter, Query, Response, Request
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
import json
from pydantic import BaseModel
import s3fs
from typing import Optional, TypedDict
from .backend import *
router = APIRouter()
s3 = s3fs.S3FileSystem(anon=False, client_kwargs={"region_name": "us-east-1"})
HEADERS = {
"XDODS-Server": "opendap/3.7",
"Accept-Ranges": "bytes",
"Connection": "close"
}
class DapParameterResponseTypedDict(TypedDict):
path: str
variables: dict
attributes: dict
class DapParameterResponse(BaseModel):
parameters: DapParameterResponseTypedDict
@router.get("/parameters", tags=["dap"], description="Query a dataset's properties",
summary="parameters", response_model=DapParameterResponse,
responses={200: {"content": {"application/json": {}}, "description": "Successful Response"}})
async def parameters(p: str) -> JSONResponse:
ds = load_dataset(p)
properties = {
"path": p,
"variables": {v: [c for c in ds[v].coords] for v in ds.variables },
"attributes": {k: v for k, v in ds.attrs.items()}
}
results = {"parameters": properties}
return JSONResponse(content=jsonable_encoder(results))
@router.get("/{path:path}.das", tags=["dap"], description="Request a DAS response", summary="DAS")
async def opendap_das(path: str, request: Request) -> Response:
ds = load_dataset(path)
parsed_args = parse_args(str(request.query_params))
das = create_das(ds, parsed_args)
headers = HEADERS.copy()
headers.update({"Content-Description": "dods-das"})
return Response(content=das, media_type="text/plain", status_code=200, headers=headers)
@router.get("/{path:path}.dds", tags=["dap"], description="Request a DDS response", summary="DDS")
async def opendap_dds(path: str, request: Request) -> Response:
ds = load_dataset(path)
name = request.path_params["path"].split("/")[-1]
parsed_args = parse_args(str(request.query_params))
dds = create_dds(ds, parsed_args, name)
headers = HEADERS.copy()
headers.update({"Content-Description": "dods-dds"})
return Response(content=dds, media_type="text/plain", status_code=200, headers=headers)
@router.get("/{path:path}.dods", tags=["dap"], description="Request a binary response", summary="DODS")
async def opendap_dods(path: str, request: Request) -> Response:
ds = load_dataset(path)
name = request.path_params["path"].split("/")[-1]
parsed_args = parse_args(str(request.query_params))
dods = create_dods(ds, parsed_args, name)
headers = HEADERS.copy()
headers.update({"Content-Description": "dods-data"})
return Response(content=dods, media_type="application/octet-stream", status_code=200, headers=headers)
| 1,489 | 144 | 134 |
656fc8d586311c4463d81e3bc91f7c2f2c3dc609 | 5,942 | py | Python | src/PlateRecognition.py | rohanabhishek/License-Plate-Recognition | 8a03e46f3026209a8588483dc978fb67508e5232 | [
"BSD-3-Clause"
] | null | null | null | src/PlateRecognition.py | rohanabhishek/License-Plate-Recognition | 8a03e46f3026209a8588483dc978fb67508e5232 | [
"BSD-3-Clause"
] | null | null | null | src/PlateRecognition.py | rohanabhishek/License-Plate-Recognition | 8a03e46f3026209a8588483dc978fb67508e5232 | [
"BSD-3-Clause"
] | 1 | 2020-07-21T10:25:26.000Z | 2020-07-21T10:25:26.000Z | import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops
import matplotlib.patches as patches
from skimage.color import rgb2gray
from skimage.io import imread
from skimage.filters import threshold_otsu
import pytesseract
from PIL import Image
import imutils
from tasks import *
plate_like_objects = []
filename = sys.argv[1]
# Image converted to binary
car_image = imread(filename, as_gray=True)
print(car_image.shape)
gray_car_image = car_image*255
# thershold value obtained using Otsu's method
threshold_value = threshold_otsu(gray_car_image)
binary_car_image = gray_car_image > threshold_value
# get all the connected regions and group them together
label_image = measure.label(binary_car_image)
# constraints on maximum and minimum values on width, height
plate_dimensions = (0.04*label_image.shape[0], 0.5*label_image.shape[0], 0.2*label_image.shape[1], 0.6*label_image.shape[1])
min_height, max_height, min_width, max_width = plate_dimensions
plate_objects_cordinates = []
fig, (ax1) = plt.subplots(1)
ax1.imshow(gray_car_image, cmap="gray")
# regionprops creates a list of properties of all the labelled regions
for region in regionprops(label_image):
if region.area < 50:
#if the region is very small
continue
# the bounding box coordinates
min_row, min_col, max_row, max_col = region.bbox
region_height = max_row - min_row
region_width = max_col - min_col
# checking the conditions of a typical license plate
if region_height >= min_height and region_height <= max_height and region_width >= min_width and region_width <= max_width and region_width > region_height:
plate_like_objects.append(gray_car_image[min_row:max_row,
min_col:max_col])
plate_objects_cordinates.append((min_row, min_col,
max_row, max_col))
rectBorder = patches.Rectangle((min_col, min_row), max_col - min_col, max_row - min_row, edgecolor="red",
linewidth=2, fill=False)
# red rectangular border added
ax1.add_patch(rectBorder)
Cropped = gray_car_image[min_row:max_row, min_col:max_col]
# text = pytesseract.image_to_string(Cropped, config='--psm 11')
# print("Predicted Number by pytessaract : ",text)
plt.show()
modelName = 'my_model.npy'
nn1 = nn.NeuralNetwork(36, 0.001, 200, 10)
nn1.addLayer(FullyConnectedLayer(400, 50, "relu"))
nn1.addLayer(FullyConnectedLayer(50, 36, "softmax"))
model = np.load(modelName,allow_pickle=True)
k,i = 0,0
for l in nn1.layers:
if type(l).__name__ != "AvgPoolingLayer" and type(l).__name__ != "FlattenLayer":
nn1.layers[i].weights = model[k]
nn1.layers[i].biases = model[k+1]
k+=2
i+=1
print("Model Loaded... ")
list_of_plates = [] # list of characters in all paltes
list_of_columns = [] # to re-order characters as they are in LP
for lp in plate_like_objects:
# invert image
license_plate = (255-lp)
# reaply threshold on the extracted region
threshold_value = threshold_otsu(license_plate)
license_plate = license_plate > threshold_value
labelled_plate = measure.label(license_plate)
fig, ax1 = plt.subplots(1)
license_plate = rgb2gray(license_plate)
ax1.imshow(license_plate, cmap="gray")
# character dimension constraints
character_dimensions = (0.3*license_plate.shape[0], 1.0*license_plate.shape[0], 0.01*license_plate.shape[1], 0.6*license_plate.shape[1])
min_height, max_height, min_width, max_width = character_dimensions
characters = []
column_list = []
for regions in regionprops(labelled_plate):
y0, x0, y1, x1 = regions.bbox
region_height = y1 - y0
region_width = x1 - x0
if region_height > min_height and region_height < max_height and region_width > min_width and region_width < max_width:
roi = license_plate[y0:y1, x0:x1]
# draw a red bordered rectangle over the character.
rect_border = patches.Rectangle((x0, y0), x1 - x0, y1 - y0, edgecolor="red",
linewidth=2, fill=False)
ax1.add_patch(rect_border)
# resize the characters to 20X20 and then append each character into the characters list
resized_char = Image.fromarray(roi).resize((20, 20))
characters.append(resized_char)
# to keep track of the arrangement of the characters(based on x-coordinate)
column_list.append(x0)
list_of_plates.append(characters)
list_of_columns.append(column_list)
plt.show()
list_of_numbers = []
for i in range(len(list_of_plates)):
characters = list_of_plates[i]
plate_num = []
for resized_char in characters:
roi = np.array(resized_char)
# reshape to an array as one input
roi = roi.reshape((1,400))
# predict result using neural network
valActivations = nn1.feedforward(roi)
# get the class with highest prediction
pred = np.argmax(valActivations[-1], axis=1)
# check with threshold to remove non-characters
if(valActivations[-1][0][pred]<0.5):
plate_num.append('')
continue
if(pred<10):
plate_num.append(str(pred[0]))
else:
plate_num.append(str(chr(65+pred[0]-10)))
column = np.array(list_of_columns[i])
# sort characters as they are in LP
sort_idx = np.argsort(column)
plate_num = np.array(plate_num)[sort_idx]
# output licence plate number
plate_num = "".join(plate_num)
list_of_numbers.append(plate_num)
print('Predictions - ',end=' ')
print(list_of_numbers)
final_num = sorted(list_of_numbers, key=len)
print('Final Licence plate - ' + final_num[-1]) | 34.546512 | 160 | 0.683608 | import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops
import matplotlib.patches as patches
from skimage.color import rgb2gray
from skimage.io import imread
from skimage.filters import threshold_otsu
import pytesseract
from PIL import Image
import imutils
from tasks import *
plate_like_objects = []
filename = sys.argv[1]
# Image converted to binary
car_image = imread(filename, as_gray=True)
print(car_image.shape)
gray_car_image = car_image*255
# thershold value obtained using Otsu's method
threshold_value = threshold_otsu(gray_car_image)
binary_car_image = gray_car_image > threshold_value
# get all the connected regions and group them together
label_image = measure.label(binary_car_image)
# constraints on maximum and minimum values on width, height
plate_dimensions = (0.04*label_image.shape[0], 0.5*label_image.shape[0], 0.2*label_image.shape[1], 0.6*label_image.shape[1])
min_height, max_height, min_width, max_width = plate_dimensions
plate_objects_cordinates = []
fig, (ax1) = plt.subplots(1)
ax1.imshow(gray_car_image, cmap="gray")
# regionprops creates a list of properties of all the labelled regions
for region in regionprops(label_image):
if region.area < 50:
#if the region is very small
continue
# the bounding box coordinates
min_row, min_col, max_row, max_col = region.bbox
region_height = max_row - min_row
region_width = max_col - min_col
# checking the conditions of a typical license plate
if region_height >= min_height and region_height <= max_height and region_width >= min_width and region_width <= max_width and region_width > region_height:
plate_like_objects.append(gray_car_image[min_row:max_row,
min_col:max_col])
plate_objects_cordinates.append((min_row, min_col,
max_row, max_col))
rectBorder = patches.Rectangle((min_col, min_row), max_col - min_col, max_row - min_row, edgecolor="red",
linewidth=2, fill=False)
# red rectangular border added
ax1.add_patch(rectBorder)
Cropped = gray_car_image[min_row:max_row, min_col:max_col]
# text = pytesseract.image_to_string(Cropped, config='--psm 11')
# print("Predicted Number by pytessaract : ",text)
plt.show()
modelName = 'my_model.npy'
nn1 = nn.NeuralNetwork(36, 0.001, 200, 10)
nn1.addLayer(FullyConnectedLayer(400, 50, "relu"))
nn1.addLayer(FullyConnectedLayer(50, 36, "softmax"))
model = np.load(modelName,allow_pickle=True)
k,i = 0,0
for l in nn1.layers:
if type(l).__name__ != "AvgPoolingLayer" and type(l).__name__ != "FlattenLayer":
nn1.layers[i].weights = model[k]
nn1.layers[i].biases = model[k+1]
k+=2
i+=1
print("Model Loaded... ")
list_of_plates = [] # list of characters in all paltes
list_of_columns = [] # to re-order characters as they are in LP
for lp in plate_like_objects:
# invert image
license_plate = (255-lp)
# reaply threshold on the extracted region
threshold_value = threshold_otsu(license_plate)
license_plate = license_plate > threshold_value
labelled_plate = measure.label(license_plate)
fig, ax1 = plt.subplots(1)
license_plate = rgb2gray(license_plate)
ax1.imshow(license_plate, cmap="gray")
# character dimension constraints
character_dimensions = (0.3*license_plate.shape[0], 1.0*license_plate.shape[0], 0.01*license_plate.shape[1], 0.6*license_plate.shape[1])
min_height, max_height, min_width, max_width = character_dimensions
characters = []
column_list = []
for regions in regionprops(labelled_plate):
y0, x0, y1, x1 = regions.bbox
region_height = y1 - y0
region_width = x1 - x0
if region_height > min_height and region_height < max_height and region_width > min_width and region_width < max_width:
roi = license_plate[y0:y1, x0:x1]
# draw a red bordered rectangle over the character.
rect_border = patches.Rectangle((x0, y0), x1 - x0, y1 - y0, edgecolor="red",
linewidth=2, fill=False)
ax1.add_patch(rect_border)
# resize the characters to 20X20 and then append each character into the characters list
resized_char = Image.fromarray(roi).resize((20, 20))
characters.append(resized_char)
# to keep track of the arrangement of the characters(based on x-coordinate)
column_list.append(x0)
list_of_plates.append(characters)
list_of_columns.append(column_list)
plt.show()
list_of_numbers = []
for i in range(len(list_of_plates)):
characters = list_of_plates[i]
plate_num = []
for resized_char in characters:
roi = np.array(resized_char)
# reshape to an array as one input
roi = roi.reshape((1,400))
# predict result using neural network
valActivations = nn1.feedforward(roi)
# get the class with highest prediction
pred = np.argmax(valActivations[-1], axis=1)
# check with threshold to remove non-characters
if(valActivations[-1][0][pred]<0.5):
plate_num.append('')
continue
if(pred<10):
plate_num.append(str(pred[0]))
else:
plate_num.append(str(chr(65+pred[0]-10)))
column = np.array(list_of_columns[i])
# sort characters as they are in LP
sort_idx = np.argsort(column)
plate_num = np.array(plate_num)[sort_idx]
# output licence plate number
plate_num = "".join(plate_num)
list_of_numbers.append(plate_num)
print('Predictions - ',end=' ')
print(list_of_numbers)
final_num = sorted(list_of_numbers, key=len)
print('Final Licence plate - ' + final_num[-1]) | 0 | 0 | 0 |
87cd4f9c50ca4bd5548353f5ba0c0cf7fe38746a | 29,795 | py | Python | mripy/paraproc.py | herrlich10/mripy | df9a8e57a21163579af49c59a9dcd2da279cb9fa | [
"MIT"
] | 3 | 2020-08-05T10:18:59.000Z | 2022-01-19T08:28:16.000Z | mripy/paraproc.py | herrlich10/mripy | df9a8e57a21163579af49c59a9dcd2da279cb9fa | [
"MIT"
] | null | null | null | mripy/paraproc.py | herrlich10/mripy | df9a8e57a21163579af49c59a9dcd2da279cb9fa | [
"MIT"
] | 4 | 2020-08-19T05:06:16.000Z | 2021-02-03T09:53:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 herrlich10@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys, os, shlex, time, textwrap, re
import subprocess, multiprocessing, queue, threading, ctypes, uuid
import numpy as np
__author__ = 'herrlich10 <herrlich10@gmail.com>'
__version__ = '0.1.7'
# The following are copied from six
# =================================
if sys.version_info[0] == 3:
string_types = (str,)
from io import StringIO
else:
string_types = (basestring,)
from StringIO import StringIO
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
return wrapper
# =================================
def format_duration(duration, format='standard'):
'''Format duration (in seconds) in a more human friendly way.
'''
if format == 'short':
units = ['d', 'h', 'm', 's']
elif format == 'long':
units = [' days', ' hours', ' minutes', ' seconds']
else: # Assume 'standard'
units = [' day', ' hr', ' min', ' sec']
values = [int(duration//86400), int(duration%86400//3600), int(duration%3600//60), duration%60]
for K in range(len(values)): # values[K] would be the first non-zero value
if values[K] > 0:
break
formatted = ((('%d' if k<len(values)-1 else '%.3f') % values[k]) + units[k] for k in range(len(values)) if k >= K)
return ' '.join(formatted)
def cmd_for_exec(cmd, shell=False):
''' Format cmd appropriately for execution according to whether shell=True.
Split a cmd string into a list, if shell=False.
Join a cmd list into a string, if shell=True.
Do nothing to callable.
Parameters
----------
cmd : str, list, or callable
shell : bool
'''
# If shell=kwargs, its true value is inferred.
if isinstance(shell, dict):
shell = ('shell' in shell and shell['shell'])
if not callable(cmd):
if shell: # cmd string is required
if not isinstance(cmd, string_types):
cmd = ' '.join(cmd)
else: # cmd list is required
if isinstance(cmd, string_types):
cmd = shlex.split(cmd) # Split by space, preserving quoted substrings
return cmd
def cmd_for_disp(cmd):
'''Format cmd for printing.
Parameters
----------
cmd : str, list, or callable
'''
if not callable(cmd):
if isinstance(cmd, string_types):
cmd = shlex.split(cmd) # Remove insignificant whitespaces
cmd = ' '.join(shlex.quote(s) for s in cmd)
return cmd
ERROR_PATTERN = r'error|^\*{2}\s'
def check_output_for_errors(output, error_pattern=None, error_whitelist=None, verbose=1, label=''):
'''
User can skip error checking by setting error_pattern=''
'''
if error_pattern is None:
error_pattern = ERROR_PATTERN
n_errors = 0
if error_pattern != '': # User can skip error checking by setting error_pattern=''
if isinstance(error_pattern, string_types): # User can provide compiled regex if case sensitivity is desired
error_pattern = re.compile(error_pattern, re.IGNORECASE)
if isinstance(error_whitelist, string_types):
error_whitelist = re.compile(error_whitelist, re.IGNORECASE)
for line in output:
if error_pattern.search(line) and (error_whitelist is None or not error_whitelist.search(line)):
if verbose > 0:
print(label, line, end='')
n_errors += 1
return n_errors
def run(cmd, check=True, error_pattern=None, error_whitelist=None, goal_pattern=None, shell=False, verbose=2):
'''Run an external command line.
This function is similar to subprocess.run introduced in Python 3.5, but
provides a slightly simpler and perhaps more convenient API.
Parameters
----------
cmd : str or list
'''
cmd = cmd_for_exec(cmd, shell=shell)
cmd_str = cmd_for_disp(cmd)
if verbose > 0:
print('>>', cmd_str)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell)
res = {'cmd': cmd_str, 'pid': p.pid, 'output': [], 'start_time': time.time()}
for line in iter(p.stdout.readline, b''): # The 2nd argument is sentinel character (there will be no ending empty line)
res['output'].append(line.decode('utf-8'))
if verbose > 1:
print(res['output'][-1], end='')
p.stdout.close() # Notify the child process that the PIPE has been broken
res['returncode'] = p.wait()
res['stop_time'] = time.time()
if verbose > 0:
print('>> Command finished in {0}.'.format(format_duration(res['stop_time'] - res['start_time'])))
if check and (res['returncode'] or check_output_for_errors(res['output'], error_pattern=error_pattern,
error_whitelist=error_whitelist, verbose=verbose)):
print('>> Please pay attention to the above errors.')
raise RuntimeError(f'Error occurs when executing the following command (returncode={p.returncode}):\n{cmd_str}')
if check and not check_output_for_goal(res['output'], goal_pattern=goal_pattern):
raise RuntimeError(f'Expected goal pattern "{goal_pattern}" does not found! Something must be wrong!')
return res
STDOUT = sys.stdout
STDERR = sys.stderr
class PooledCaller(object):
'''
Execute multiple command line programs, as well as python callables,
asynchronously and parallelly across a pool of processes.
'''
def run(self, cmd, *args, _depends=None, _retry=None, _dispatch=False, _error_pattern=None, _error_whitelist=None, _suppress_warning=False, _block=False, **kwargs):
'''Asynchronously run command or callable (queued execution, return immediately).
See subprocess.Popen() for more information about the arguments.
Multiple commands can be separated with ";" and executed sequentially
within a single subprocess in linux/mac, only if shell=True.
Python callable can also be executed in parallel via multiprocessing.
Note that although return values of the callable are retrieved via PIPE,
sometimes it could be advantageous to directly save the computation
results into a shared file (e.g., an HDF5 file), esp. when they're large.
In the later case, a proper lock mechanism via multiprocessing.Lock()
is required.
Parameters
----------
cmd : list, str, or callable
Computation in command line programs is handled with subprocess.
Computation in python callable is handled with multiprocessing.
shell : bool
If provided, must be a keyword argument.
If shell is True, the command will be executed through the shell.
*args :
If cmd is a callable, `*args` are passed to the callable as its arguments.
**kwargs :
If cmd is a callable, `**kwargs` are passed to the callable as its keyword arguments.
If cmd is a list or str, `**kwargs` are passed to subprocess.Popen().
_depends : list
A list of jobs (identified by their uuid) that have to be done
before this job can be scheduled.
_retry: int
Number of retry before accepting failure (if detecting non-zero return code).
_dispatch : bool
Dispatch the job immediately, which will run in the background without blocking.
_error_pattern : str
_suppress_warning : bool
_block : bool
if True, call wait() internally and block.
Returns
-------
_uuid : str
The uuid of current job (which can be used as future jobs' dependency)
'''
cmd = cmd_for_exec(cmd, shell=kwargs)
_uuid = uuid.uuid4().hex[:8]
if _retry is None:
_retry = 0
self.cmd_queue.append((self._n_cmds, cmd, args, kwargs, _uuid, _depends, _retry,
_error_pattern, _error_whitelist, _suppress_warning))
self._n_cmds += 1 # Accumulate by each call to run(), and reset after wait()
if _dispatch:
self.dispatch()
if _block:
self.wait()
return _uuid
def wait(self, pool_size=None, return_codes=False, return_jobs=False):
'''
Wait for all jobs in the queue to finish.
Returns
-------
return_values : list
Return values of executed python callable. Always `None` for command.
codes : list (only when return_codes=True)
The return code of the child process for each job.
jobs : list (only when return_jobs=True)
Detailed information about each child process, including captured stdout and stderr.
'''
if isinstance(pool_size, string_types) and pool_size == 'balanced':
# Make sure each volley has roughly equal number of jobs
n = len(self.cmd_queue)
pool_size = int(np.ceil(n/np.ceil(n/self.pool_size)))
if pool_size is not None:
# Allow temporally adjust pool_size for current batch of jobs
old_size = self.pool_size
self.pool_size = pool_size
start_time = time.time()
ress = []
while len(self.ps) > 0 or len(self.cmd_queue) > 0:
# Dispatch jobs if possible
self.dispatch()
# Poll workers' state
for p in self.ps:
job = self._pid2job[p.pid]
if isinstance(p, subprocess.Popen):
if p.poll() is not None: # If the process is terminated
job['stop_time'] = time.time()
job['returncode'] = p.returncode
job['speed_up'].set()
job['watcher'].join() # Retrieve all remaining output before closing PIPE
p.stdout.close() # Notify the child process that the PIPE has been broken
self.ps.remove(p)
if self.verbose > 0:
print('>> job#{0} finished (return {1}) in {2}.'.format(job['idx'], job['returncode'], format_duration(job['stop_time']-job['start_time'])))
if job['returncode'] != 0: # Failed
if job['retry'] > 0: # Need retry
# Insert a new cmd (as if we automatically run it again)
self.cmd_queue.append((self._n_cmds, job['cmd'], job['args'], job['kwargs'], job['uuid'],
job['depends'], job['retry']-1, job['error_pattern'], job['suppress_warning']))
job['successor'] = self._n_cmds
self._n_cmds += 1
else: # No more retry, accept failure...
raise RuntimeError(f">> job#{job['idx']} failed!\n Full output:\n {''.join(job['output'])}")
else: # Successful
self.res_queue.put([job['idx'], None]) # Return None to mimic callable behavior
self._fulfilled[job['uuid']] = job['log_idx'] # Marked as fulfilled, even with error (TODO: or shall I break all??)
# These helper objects may not be useful for the end users
for key in ['watcher', 'speed_up', 'args', 'kwargs']:
job.pop(key)
else:
pass
# elif isinstance(p, multiprocessing.Process):
elif isinstance(p, self.ctx.Process):
if not p.is_alive(): # If the process is terminated
job['stop_time'] = time.time()
job['returncode'] = p.exitcode # subprocess.Popen and multiprocessing.Process use different names for this
self.ps.remove(p)
if self.verbose > 0:
print('>> job#{0} finished (return {1}) in {2}.'.format(job['idx'], job['returncode'], format_duration(job['stop_time']-job['start_time'])))
# TODO: retry mechanism for callable
self._fulfilled[job['uuid']] = job['log_idx'] # Marked as fulfilled
# Remove potentially very large data
for key in ['args', 'kwargs']:
job.pop(key)
else:
pass
time.sleep(0.1)
# Dequeuing, see https://stackoverflow.com/questions/10028809/maximum-size-for-multiprocessing-queue-item
self._async_get_res(ress)
# Handle return values by callable cmd
while not self.res_queue.empty():
self._async_get_res(ress)
ress = [res[1] for res in sorted(ress, key=lambda res: res[0])]
# Handle return codes by children processes
jobs = sorted([job for job in self._pid2job.values() if job['successor'] is None], key=lambda job: job['idx'])
codes = [job['returncode'] for job in jobs]
if self.verbose > 0:
duration = time.time() - start_time
print('>> All {0} jobs done in {1}.'.format(self._n_cmds, format_duration(duration)))
if np.any(codes):
print('returncodes: {0}'.format(codes))
first_error = np.nonzero(codes)[0][0]
print(f">> Output for job#{first_error} was as follows:\n------------------------------")
print(jobs[first_error]['output'])
else:
print('all returncodes are 0.')
if self.all_successful(jobs=jobs):
print('>> All {0} jobs finished successfully.'.format(len(jobs)))
else:
print('>> Please pay attention to the above errors.')
# Reset object states
self._n_cmds = 0
self._idx2pid = {}
self._pid2job = {}
if pool_size is not None:
self.pool_size = old_size
res = (ress,) + ((codes,) if return_codes else ()) + ((jobs,) if return_jobs else ())
if len(res) == 1:
return res[0]
else:
return res
class ArrayWrapper(type):
'''
This is the metaclass for classes that wrap an np.ndarray and delegate
non-reimplemented operators (among other magic functions) to the wrapped array.
'''
# TODO: 1. Use ctx instead of multiprocessing. 2. Use multiprocessing.shared_memory
@add_metaclass(ArrayWrapper) # Compatibility code from six
class SharedMemoryArray(object):
'''
This class can be used as a usual np.ndarray, but its data buffer
is allocated in shared memory (under Cached Files in memory monitor),
and can be passed across processes without any data copy/duplication,
even when write access happens (which is lock-synchronized).
The idea is to allocate memory using multiprocessing.Array, and
access it from current or another process via a numpy.ndarray view,
without actually copying the data.
So it is both convenient and efficient when used with multiprocessing.
This implementation also demonstrates the power of composition + metaclass,
as opposed to the canonical multiple inheritance.
'''
@classmethod
def zeros(cls, shape, dtype=float, lock=True):
'''
Return a new array of given shape and dtype, filled with zeros.
This is the preferred usage, which avoids holding two copies of the
potentially very large data simultaneously in the memory.
'''
return cls(dtype, shape, lock=lock)
@classmethod
def from_array(cls, arr, lock=True):
'''
Initialize a new shared-memory array with an existing array.
'''
# return cls(arr.dtype, arr.shape, arr.ravel(), lock=lock) # Slow and memory inefficient, why?
a = cls.zeros(arr.shape, dtype=arr.dtype, lock=lock)
a[:] = arr # This is a more efficient way of initialization
return a
_SHARED_ARR_ATTRIBUTES = ['acquire', 'release', 'get_lock']
# At present, only numerical dtypes are supported.
dtype2ctypes = {
bool: ctypes.c_bool,
int: ctypes.c_long,
float: ctypes.c_double,
np.dtype('bool'): ctypes.c_bool,
np.dtype('int64'): ctypes.c_long,
np.dtype('int32'): ctypes.c_int,
np.dtype('int16'): ctypes.c_short,
np.dtype('int8'): ctypes.c_byte,
np.dtype('uint64'): ctypes.c_ulong,
np.dtype('uint32'): ctypes.c_uint,
np.dtype('uint16'): ctypes.c_ushort,
np.dtype('uint8'): ctypes.c_ubyte,
np.dtype('float64'): ctypes.c_double,
np.dtype('float32'): ctypes.c_float,
}
| 48.211974 | 185 | 0.606209 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 herrlich10@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys, os, shlex, time, textwrap, re
import subprocess, multiprocessing, queue, threading, ctypes, uuid
import numpy as np
__author__ = 'herrlich10 <herrlich10@gmail.com>'
__version__ = '0.1.7'
# The following are copied from six
# =================================
if sys.version_info[0] == 3:
string_types = (str,)
from io import StringIO
else:
string_types = (basestring,)
from StringIO import StringIO
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# =================================
def format_duration(duration, format='standard'):
'''Format duration (in seconds) in a more human friendly way.
'''
if format == 'short':
units = ['d', 'h', 'm', 's']
elif format == 'long':
units = [' days', ' hours', ' minutes', ' seconds']
else: # Assume 'standard'
units = [' day', ' hr', ' min', ' sec']
values = [int(duration//86400), int(duration%86400//3600), int(duration%3600//60), duration%60]
for K in range(len(values)): # values[K] would be the first non-zero value
if values[K] > 0:
break
formatted = ((('%d' if k<len(values)-1 else '%.3f') % values[k]) + units[k] for k in range(len(values)) if k >= K)
return ' '.join(formatted)
def cmd_for_exec(cmd, shell=False):
''' Format cmd appropriately for execution according to whether shell=True.
Split a cmd string into a list, if shell=False.
Join a cmd list into a string, if shell=True.
Do nothing to callable.
Parameters
----------
cmd : str, list, or callable
shell : bool
'''
# If shell=kwargs, its true value is inferred.
if isinstance(shell, dict):
shell = ('shell' in shell and shell['shell'])
if not callable(cmd):
if shell: # cmd string is required
if not isinstance(cmd, string_types):
cmd = ' '.join(cmd)
else: # cmd list is required
if isinstance(cmd, string_types):
cmd = shlex.split(cmd) # Split by space, preserving quoted substrings
return cmd
def cmd_for_disp(cmd):
'''Format cmd for printing.
Parameters
----------
cmd : str, list, or callable
'''
if not callable(cmd):
if isinstance(cmd, string_types):
cmd = shlex.split(cmd) # Remove insignificant whitespaces
cmd = ' '.join(shlex.quote(s) for s in cmd)
return cmd
ERROR_PATTERN = r'error|^\*{2}\s'
def check_output_for_errors(output, error_pattern=None, error_whitelist=None, verbose=1, label=''):
'''
User can skip error checking by setting error_pattern=''
'''
if error_pattern is None:
error_pattern = ERROR_PATTERN
n_errors = 0
if error_pattern != '': # User can skip error checking by setting error_pattern=''
if isinstance(error_pattern, string_types): # User can provide compiled regex if case sensitivity is desired
error_pattern = re.compile(error_pattern, re.IGNORECASE)
if isinstance(error_whitelist, string_types):
error_whitelist = re.compile(error_whitelist, re.IGNORECASE)
for line in output:
if error_pattern.search(line) and (error_whitelist is None or not error_whitelist.search(line)):
if verbose > 0:
print(label, line, end='')
n_errors += 1
return n_errors
def check_output_for_goal(output, goal_pattern=None):
if goal_pattern is None:
return True
if isinstance(goal_pattern, string_types): # User can provide compiled regex if case sensitivity is desired
goal_pattern = re.compile(goal_pattern, re.IGNORECASE)
for line in output:
if goal_pattern.search(line):
return True
return False
def run(cmd, check=True, error_pattern=None, error_whitelist=None, goal_pattern=None, shell=False, verbose=2):
'''Run an external command line.
This function is similar to subprocess.run introduced in Python 3.5, but
provides a slightly simpler and perhaps more convenient API.
Parameters
----------
cmd : str or list
'''
cmd = cmd_for_exec(cmd, shell=shell)
cmd_str = cmd_for_disp(cmd)
if verbose > 0:
print('>>', cmd_str)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell)
res = {'cmd': cmd_str, 'pid': p.pid, 'output': [], 'start_time': time.time()}
for line in iter(p.stdout.readline, b''): # The 2nd argument is sentinel character (there will be no ending empty line)
res['output'].append(line.decode('utf-8'))
if verbose > 1:
print(res['output'][-1], end='')
p.stdout.close() # Notify the child process that the PIPE has been broken
res['returncode'] = p.wait()
res['stop_time'] = time.time()
if verbose > 0:
print('>> Command finished in {0}.'.format(format_duration(res['stop_time'] - res['start_time'])))
if check and (res['returncode'] or check_output_for_errors(res['output'], error_pattern=error_pattern,
error_whitelist=error_whitelist, verbose=verbose)):
print('>> Please pay attention to the above errors.')
raise RuntimeError(f'Error occurs when executing the following command (returncode={p.returncode}):\n{cmd_str}')
if check and not check_output_for_goal(res['output'], goal_pattern=goal_pattern):
raise RuntimeError(f'Expected goal pattern "{goal_pattern}" does not found! Something must be wrong!')
return res
STDOUT = sys.stdout
STDERR = sys.stderr
class TeeOut(StringIO):
def __init__(self, err=False, tee=True):
super().__init__()
self.err = err
self.tee = tee
def write(self, s):
super().write(s)
if self.err: # Always output error message
STDERR.write(s)
elif self.tee:
STDOUT.write(s)
class PooledCaller(object):
'''
Execute multiple command line programs, as well as python callables,
asynchronously and parallelly across a pool of processes.
'''
def __init__(self, pool_size=None, verbose=1):
self.ctx = multiprocessing.get_context('fork')
if pool_size is None:
# self.pool_size = multiprocessing.cpu_count() * 3 // 4
self.pool_size = self.ctx.cpu_count() * 3 // 4
else:
self.pool_size = pool_size
self.verbose = verbose
self.ps = []
self.cmd_queue = [] # Queue for commands and callables, as well as any additional args
self._n_cmds = 0 # Auto increased counter for generating cmd idx
self._idx2pid = {}
self._pid2job = {} # Hold all jobs for each wait()
self._log = [] # Hold all jobs across waits (entire execution history for this PooledCaller instance)
self._fulfilled = {} # Fulfilled dependencies across waits (a faster API compared with self._log)
# self.res_queue = multiprocessing.Queue() # Queue for return values of executed python callables
self.res_queue = self.ctx.Queue() # Queue for return values of executed python callables
def run(self, cmd, *args, _depends=None, _retry=None, _dispatch=False, _error_pattern=None, _error_whitelist=None, _suppress_warning=False, _block=False, **kwargs):
'''Asynchronously run command or callable (queued execution, return immediately).
See subprocess.Popen() for more information about the arguments.
Multiple commands can be separated with ";" and executed sequentially
within a single subprocess in linux/mac, only if shell=True.
Python callable can also be executed in parallel via multiprocessing.
Note that although return values of the callable are retrieved via PIPE,
sometimes it could be advantageous to directly save the computation
results into a shared file (e.g., an HDF5 file), esp. when they're large.
In the later case, a proper lock mechanism via multiprocessing.Lock()
is required.
Parameters
----------
cmd : list, str, or callable
Computation in command line programs is handled with subprocess.
Computation in python callable is handled with multiprocessing.
shell : bool
If provided, must be a keyword argument.
If shell is True, the command will be executed through the shell.
*args :
If cmd is a callable, `*args` are passed to the callable as its arguments.
**kwargs :
If cmd is a callable, `**kwargs` are passed to the callable as its keyword arguments.
If cmd is a list or str, `**kwargs` are passed to subprocess.Popen().
_depends : list
A list of jobs (identified by their uuid) that have to be done
before this job can be scheduled.
_retry: int
Number of retry before accepting failure (if detecting non-zero return code).
_dispatch : bool
Dispatch the job immediately, which will run in the background without blocking.
_error_pattern : str
_suppress_warning : bool
_block : bool
if True, call wait() internally and block.
Returns
-------
_uuid : str
The uuid of current job (which can be used as future jobs' dependency)
'''
cmd = cmd_for_exec(cmd, shell=kwargs)
_uuid = uuid.uuid4().hex[:8]
if _retry is None:
_retry = 0
self.cmd_queue.append((self._n_cmds, cmd, args, kwargs, _uuid, _depends, _retry,
_error_pattern, _error_whitelist, _suppress_warning))
self._n_cmds += 1 # Accumulate by each call to run(), and reset after wait()
if _dispatch:
self.dispatch()
if _block:
self.wait()
return _uuid
def run1(self, cmd, *args, _error_pattern=None, _error_whitelist=None, _suppress_warning=False, **kwargs):
self.run(cmd, *args, _error_pattern=_error_pattern, _error_whitelist=_error_whitelist,
_suppress_warning=_suppress_warning, **kwargs)
return self.wait()
def _callable_wrapper(self, idx, cmd, *args, **kwargs):
out = TeeOut(tee=(self.verbose > 1))
err = TeeOut(err=True)
sys.stdout = out # This substitution only affect spawned process
sys.stderr = err
res = None # Initialized in case of exception
try:
res = cmd(*args, **kwargs)
except Exception as e:
print('>> Error occurs in job#{0}'.format(idx), file=err)
print('** ERROR:', e, file=err) # AFNI style error message
raise e # Re-raise and let parent process to handle it
finally:
# Grab all output at the very end of the process (assume that there aren't too much of them)
# TODO: This could be a potential bug...
# https://ryanjoneil.github.io/posts/2014-02-14-capturing-stdout-in-a-python-child-process.html
output = out.getvalue().splitlines(True) + err.getvalue().splitlines(True)
self.res_queue.put([idx, res, output]) # Communicate return value and output (Caution: The underlying pipe has limited size. Have to get() soon in wait().)
def _async_reader(self, idx, f, output_list, speed_up, suppress_warning=False):
while True: # We can use event to tell the thread to stop prematurely, as demonstrated in https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread
line = f.readline()
line = line.decode('utf-8')
if line: # This is not lock protected, because only one thread (i.e., this thread) is going to write
output_list.append(line)
if (line.startswith('*') or line.startswith('\x1b[7m')) and not suppress_warning: # Always print AFNI style WARNING and ERROR through stderr unless explicitly suppressed
# '\x1b[7m' and '\x1b[0m' are 'reverse' and 'reset' respectively (https://gist.github.com/abritinthebay/d80eb99b2726c83feb0d97eab95206c4)
print('>> Something happens in job#{0}'.format(idx), file=sys.stderr)
print(line, end='', file=sys.stderr)
elif self.verbose > 1:
print(line, end='')
else: # Empty line signifies the end of the spawned process
break
if not speed_up.is_set():
time.sleep(0.1) # Don't need to poll for output too aggressively during run time
def dispatch(self):
# If there are free slot and more jobs
# while len(self.ps) < self.pool_size and len(self.cmd_queue) > 0:
if len(self.ps) < self.pool_size and len(self.cmd_queue) > 0:
idx, cmd, args, kwargs, _uuid, _depends, _retry, _error_pattern, _error_whitelist, _suppress_warning = self.cmd_queue.pop(0)
if _depends is None or all([dep in self._fulfilled for dep in _depends]): # No dependency or all fulfilled
# Create a job process only after it is popped from the queue
job = {'idx': idx, 'cmd': cmd, 'args': args, 'kwargs': kwargs, 'uuid': _uuid,
'depends': _depends, 'retry': _retry, 'error_pattern': _error_pattern, 'error_whitelist': _error_whitelist,
'suppress_warning': _suppress_warning, 'output': []}
if self.verbose > 0:
print('>> job#{0}: {1}'.format(idx, cmd_for_disp(job['cmd'])))
if callable(cmd):
# TODO: Add an if-else branch here if shared memory doesn't work for wrapper
# p = multiprocessing.Process(target=self._callable_wrapper, args=(idx, cmd) + args, kwargs=kwargs)
p = self.ctx.Process(target=self._callable_wrapper, args=(idx, cmd) + args, kwargs=kwargs)
p.start()
else:
# Use PIPE to capture output and error message
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs)
# Capture output without blocking (the main thread) by using a separate thread to do the blocking readline()
job['speed_up'] = threading.Event()
job['watcher'] = threading.Thread(target=self._async_reader, args=(idx, p.stdout,
job['output'], job['speed_up'], job['suppress_warning']), daemon=True)
job['watcher'].start()
self.ps.append(p)
job['start_time'] = time.time()
job['pid'] = p.pid
job['successor'] = None
job['log_idx'] = len(self._log)
self._idx2pid[idx] = p.pid
self._pid2job[p.pid] = job
self._log.append(job)
else: # Re-queue the job whose dependencies are not fully fulfilled to the END of the queue
self.cmd_queue.append((idx, cmd, args, kwargs, _uuid, _depends, _retry, _error_pattern, _error_whitelist, _suppress_warning))
def _async_get_res(self, res_list):
try:
res = self.res_queue.get(block=False) # idx, return_value, output
except queue.Empty:
pass
else:
res_list.append(res[:2])
if len(res) > 2: # For callable only
job = self._pid2job[self._idx2pid[res[0]]]
job['output'] = res[2]
def wait(self, pool_size=None, return_codes=False, return_jobs=False):
'''
Wait for all jobs in the queue to finish.
Returns
-------
return_values : list
Return values of executed python callable. Always `None` for command.
codes : list (only when return_codes=True)
The return code of the child process for each job.
jobs : list (only when return_jobs=True)
Detailed information about each child process, including captured stdout and stderr.
'''
if isinstance(pool_size, string_types) and pool_size == 'balanced':
# Make sure each volley has roughly equal number of jobs
n = len(self.cmd_queue)
pool_size = int(np.ceil(n/np.ceil(n/self.pool_size)))
if pool_size is not None:
# Allow temporally adjust pool_size for current batch of jobs
old_size = self.pool_size
self.pool_size = pool_size
start_time = time.time()
ress = []
while len(self.ps) > 0 or len(self.cmd_queue) > 0:
# Dispatch jobs if possible
self.dispatch()
# Poll workers' state
for p in self.ps:
job = self._pid2job[p.pid]
if isinstance(p, subprocess.Popen):
if p.poll() is not None: # If the process is terminated
job['stop_time'] = time.time()
job['returncode'] = p.returncode
job['speed_up'].set()
job['watcher'].join() # Retrieve all remaining output before closing PIPE
p.stdout.close() # Notify the child process that the PIPE has been broken
self.ps.remove(p)
if self.verbose > 0:
print('>> job#{0} finished (return {1}) in {2}.'.format(job['idx'], job['returncode'], format_duration(job['stop_time']-job['start_time'])))
if job['returncode'] != 0: # Failed
if job['retry'] > 0: # Need retry
# Insert a new cmd (as if we automatically run it again)
self.cmd_queue.append((self._n_cmds, job['cmd'], job['args'], job['kwargs'], job['uuid'],
job['depends'], job['retry']-1, job['error_pattern'], job['suppress_warning']))
job['successor'] = self._n_cmds
self._n_cmds += 1
else: # No more retry, accept failure...
raise RuntimeError(f">> job#{job['idx']} failed!\n Full output:\n {''.join(job['output'])}")
else: # Successful
self.res_queue.put([job['idx'], None]) # Return None to mimic callable behavior
self._fulfilled[job['uuid']] = job['log_idx'] # Marked as fulfilled, even with error (TODO: or shall I break all??)
# These helper objects may not be useful for the end users
for key in ['watcher', 'speed_up', 'args', 'kwargs']:
job.pop(key)
else:
pass
# elif isinstance(p, multiprocessing.Process):
elif isinstance(p, self.ctx.Process):
if not p.is_alive(): # If the process is terminated
job['stop_time'] = time.time()
job['returncode'] = p.exitcode # subprocess.Popen and multiprocessing.Process use different names for this
self.ps.remove(p)
if self.verbose > 0:
print('>> job#{0} finished (return {1}) in {2}.'.format(job['idx'], job['returncode'], format_duration(job['stop_time']-job['start_time'])))
# TODO: retry mechanism for callable
self._fulfilled[job['uuid']] = job['log_idx'] # Marked as fulfilled
# Remove potentially very large data
for key in ['args', 'kwargs']:
job.pop(key)
else:
pass
time.sleep(0.1)
# Dequeuing, see https://stackoverflow.com/questions/10028809/maximum-size-for-multiprocessing-queue-item
self._async_get_res(ress)
# Handle return values by callable cmd
while not self.res_queue.empty():
self._async_get_res(ress)
ress = [res[1] for res in sorted(ress, key=lambda res: res[0])]
# Handle return codes by children processes
jobs = sorted([job for job in self._pid2job.values() if job['successor'] is None], key=lambda job: job['idx'])
codes = [job['returncode'] for job in jobs]
if self.verbose > 0:
duration = time.time() - start_time
print('>> All {0} jobs done in {1}.'.format(self._n_cmds, format_duration(duration)))
if np.any(codes):
print('returncodes: {0}'.format(codes))
first_error = np.nonzero(codes)[0][0]
print(f">> Output for job#{first_error} was as follows:\n------------------------------")
print(jobs[first_error]['output'])
else:
print('all returncodes are 0.')
if self.all_successful(jobs=jobs):
print('>> All {0} jobs finished successfully.'.format(len(jobs)))
else:
print('>> Please pay attention to the above errors.')
# Reset object states
self._n_cmds = 0
self._idx2pid = {}
self._pid2job = {}
if pool_size is not None:
self.pool_size = old_size
res = (ress,) + ((codes,) if return_codes else ()) + ((jobs,) if return_jobs else ())
if len(res) == 1:
return res[0]
else:
return res
def all_successful(self, jobs=None, verbose=None):
if jobs is None:
jobs = self._log
if verbose is None:
verbose = self.verbose
# Check return codes
all_zero = not np.any([job['returncode'] for job in jobs])
# Check output
n_errors = sum([check_output_for_errors(job['output'], error_pattern=job['error_pattern'],
error_whitelist=job['error_whitelist'], verbose=verbose, label='[job#{0}]'.format(job['idx']))
for job in jobs])
return all_zero and n_errors == 0
def idss(self, total, batch_size=None):
if batch_size is None:
batch_size = int(np.ceil(total / self.pool_size / 10))
return (range(k, min(k+batch_size, total)) for k in range(0, total, batch_size))
def __call__(self, job_generator, **kwargs):
# This is similar to the joblib.Parallel signature, which is the only way to
# pass both args and kwargs for inner execution.
# >>> pc(pc.run(f"3dvolreg -prefix ... {func}{run}.nii") for run in runs)
#
# It also allows each call to deal with a batch of jobs for better performance,
# if the callable is purposely designed to do so, which is especially useful
# when there are a huge amount of small jobs.
# >>> pc(pc.run(compute_depth, ids, *args) for ids in pc.idss(len(depths)))
n_jobs = 0
for _ in job_generator: # Queue all jobs from the generator
n_jobs += 1
if self.verbose > 0:
print('>> Start with a total of {0} jobs...'.format(n_jobs))
return self.wait(**kwargs) # Wait all jobs to finish
class ArrayWrapper(type):
'''
This is the metaclass for classes that wrap an np.ndarray and delegate
non-reimplemented operators (among other magic functions) to the wrapped array.
'''
def __init__(cls, name, bases, dct):
def make_descriptor(name):
'''
Implementation notes
--------------------
1. Method (or non-data) descriptors are objects that define __get__() method
but not __set__() method. Refer to [here](https://docs.python.org/3.6/howto/descriptor.html).
2. The magic methods of an object (e.g., arr.__add__) are descriptors, not callable.
So here we must return a property (with getter only), not a lambda.
3. Strangely, the whole thing must be wrapped in a nested function. See [here](
https://stackoverflow.com/questions/9057669/how-can-i-intercept-calls-to-pythons-magic-methods-in-new-style-classes).
4. The wrapped array must be named self.arr
'''
return property(lambda self: getattr(self.arr, name))
type.__init__(cls, name, bases, dct)
ignore = 'class mro new init setattr getattr getattribute'
ignore = set('__{0}__'.format(name) for name in ignore.split())
for name in dir(np.ndarray):
if name.startswith('__'):
if name not in ignore and name not in dct:
setattr(cls, name, make_descriptor(name))
# TODO: 1. Use ctx instead of multiprocessing. 2. Use multiprocessing.shared_memory
@add_metaclass(ArrayWrapper) # Compatibility code from six
class SharedMemoryArray(object):
'''
This class can be used as a usual np.ndarray, but its data buffer
is allocated in shared memory (under Cached Files in memory monitor),
and can be passed across processes without any data copy/duplication,
even when write access happens (which is lock-synchronized).
The idea is to allocate memory using multiprocessing.Array, and
access it from current or another process via a numpy.ndarray view,
without actually copying the data.
So it is both convenient and efficient when used with multiprocessing.
This implementation also demonstrates the power of composition + metaclass,
as opposed to the canonical multiple inheritance.
'''
def __init__(self, dtype, shape, initializer=None, lock=True):
self.dtype = np.dtype(dtype)
self.shape = shape
if initializer is None:
# Preallocate memory using multiprocessing is the preferred usage
self.shared_arr = multiprocessing.Array(self.dtype2ctypes[self.dtype], int(np.prod(self.shape)), lock=lock)
else:
self.shared_arr = multiprocessing.Array(self.dtype2ctypes[self.dtype], initializer, lock=lock)
if not lock:
self.arr = np.frombuffer(self.shared_arr, dtype=self.dtype).reshape(self.shape)
else:
self.arr = np.frombuffer(self.shared_arr.get_obj(), dtype=self.dtype).reshape(self.shape)
@classmethod
def zeros(cls, shape, dtype=float, lock=True):
'''
Return a new array of given shape and dtype, filled with zeros.
This is the preferred usage, which avoids holding two copies of the
potentially very large data simultaneously in the memory.
'''
return cls(dtype, shape, lock=lock)
@classmethod
def from_array(cls, arr, lock=True):
'''
Initialize a new shared-memory array with an existing array.
'''
# return cls(arr.dtype, arr.shape, arr.ravel(), lock=lock) # Slow and memory inefficient, why?
a = cls.zeros(arr.shape, dtype=arr.dtype, lock=lock)
a[:] = arr # This is a more efficient way of initialization
return a
def __getattr__(self, attr):
if attr in self._SHARED_ARR_ATTRIBUTES:
return getattr(self.shared_arr, attr)
else:
return getattr(self.arr, attr)
def __dir__(self):
return list(self.__dict__.keys()) + self._SHARED_ARR_ATTRIBUTES + dir(self.arr)
_SHARED_ARR_ATTRIBUTES = ['acquire', 'release', 'get_lock']
# At present, only numerical dtypes are supported.
dtype2ctypes = {
bool: ctypes.c_bool,
int: ctypes.c_long,
float: ctypes.c_double,
np.dtype('bool'): ctypes.c_bool,
np.dtype('int64'): ctypes.c_long,
np.dtype('int32'): ctypes.c_int,
np.dtype('int16'): ctypes.c_short,
np.dtype('int8'): ctypes.c_byte,
np.dtype('uint64'): ctypes.c_ulong,
np.dtype('uint32'): ctypes.c_uint,
np.dtype('uint16'): ctypes.c_ushort,
np.dtype('uint8'): ctypes.c_ubyte,
np.dtype('float64'): ctypes.c_double,
np.dtype('float32'): ctypes.c_float,
}
| 11,301 | 2 | 473 |
c4705393f4cea9ea9aeb47aec1c1aacf321481b1 | 336 | py | Python | assetman/tornadoutils/RequestHandler.py | ASSETIO/Assetman | af6bd4cfef68b913801523cadd70fc37fd006b7b | [
"Apache-2.0"
] | 8 | 2015-08-04T20:47:46.000Z | 2021-07-30T03:32:48.000Z | assetman/tornadoutils/RequestHandler.py | ASSETIO/Assetman | af6bd4cfef68b913801523cadd70fc37fd006b7b | [
"Apache-2.0"
] | 3 | 2015-05-20T14:58:22.000Z | 2021-12-21T20:20:57.000Z | assetman/tornadoutils/RequestHandler.py | ASSETIO/Assetman | af6bd4cfef68b913801523cadd70fc37fd006b7b | [
"Apache-2.0"
] | 4 | 2015-05-20T14:52:33.000Z | 2020-11-23T17:31:39.000Z | from assetman import AssetManager
| 37.333333 | 99 | 0.741071 | from assetman import AssetManager
class AssetmanMixin(object):
def __init__(self, *args, **kwargs):
self.asset_manager = AssetManager("", settings=self.settings['assetman_settings'],**kwargs)
def static_url(self, path, include_host=None):
return self.asset_manager.static_url(path, include_host=include_host)
| 218 | 7 | 76 |
1030908208c568a2e99fa01559e8eec5cf51ef22 | 308 | py | Python | home/urls.py | nicolasmontenegro/emprenred | 906ea24e8b6357b109e24c140fd92fc24bb33e79 | [
"Apache-2.0"
] | null | null | null | home/urls.py | nicolasmontenegro/emprenred | 906ea24e8b6357b109e24c140fd92fc24bb33e79 | [
"Apache-2.0"
] | null | null | null | home/urls.py | nicolasmontenegro/emprenred | 906ea24e8b6357b109e24c140fd92fc24bb33e79 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^login/$', auth_views.login, {'template_name': 'auth/login.html'}, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^$', views.home, name='Home'),
] | 30.8 | 88 | 0.691558 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^login/$', auth_views.login, {'template_name': 'auth/login.html'}, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^$', views.home, name='Home'),
] | 0 | 0 | 0 |
bbf49e60c7e2598d524afa3c9e6b498a5951e4b3 | 1,551 | py | Python | karma.py | bayerj/karma | 5a8415caa6c3d86f46ae098ee8430de6fd6a49a8 | [
"BSD-3-Clause"
] | null | null | null | karma.py | bayerj/karma | 5a8415caa6c3d86f46ae098ee8430de6fd6a49a8 | [
"BSD-3-Clause"
] | null | null | null | karma.py | bayerj/karma | 5a8415caa6c3d86f46ae098ee8430de6fd6a49a8 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import random
import numpy as np
| 22.478261 | 63 | 0.589942 | # -*- coding: utf-8 -*-
import random
import numpy as np
class Agent(object):
def __init__(self, n_state, n_action):
self.n_state = n_state
self.n_action = n_action
def action(self, state, reward=None):
pass
def reset(self):
pass
class Environment(object):
def __init__(self, n_state, n_action):
self.n_state = n_state
self.n_action = n_action
def enter(self):
pass
def transit(self, action):
pass
class ContextBanditEnvironment(Environment):
def enter(self):
self.last_idx = random.choice(range(self.X.shape[0]))
return self.X[self.last_idx]
def transit(self, action):
# Reward for last state.
edible = self.Z[self.last_idx]
eat = action.argmax() == 0
if eat and edible:
reward, regret = 5, 0
elif eat and not edible:
reward = 0 if np.random.randint(0, 2) == 0 else -35
regret = -reward
elif not eat and edible:
reward, regret = 0, 5
elif not eat and not edible:
reward, regret = 0, 0
new_state = self.enter()
return new_state, reward, regret
def rollout(env, agent, n_timesteps):
agent.reset()
state_m1 = env.enter()
reward_m1 = 0
for _ in range(n_timesteps):
action = agent.action(state_m1, reward_m1)
state, reward, regret = env.transit(action)
yield state_m1, action, reward, state, regret
state_m1 = state
reward_m1 = reward
| 1,152 | 27 | 308 |