hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e23831c5c1e4e4aaba38057ace81941b540c3a57 | 1,159 | py | Python | push.py | mikofski/dulwichPorcelain | 2e4aa751ed70f9c4167de5e8aa5297b5cc6f583f | [
"BSD-2-Clause"
] | 4 | 2015-07-13T17:47:51.000Z | 2017-09-10T02:57:07.000Z | push.py | mikofski/dulwichPorcelain | 2e4aa751ed70f9c4167de5e8aa5297b5cc6f583f | [
"BSD-2-Clause"
] | null | null | null | push.py | mikofski/dulwichPorcelain | 2e4aa751ed70f9c4167de5e8aa5297b5cc6f583f | [
"BSD-2-Clause"
] | null | null | null | from dulwich.repo import Repo
from dulwich.client import get_transport_and_path
import sys
def push(remote_url, repo_path='.'):
"""
Push to a remote repository
:param remote_url: <str> url of remote repository
:param repo_path: <str> path of local repository
:return refs: <dict> dictionary of ref-sha pairs
"""
client, path = get_transport_and_path(remote_url)
r = Repo(repo_path)
objsto = r.object_store
refs = r.get_refs()
def update_refs(old):
# TODO: Too complicated, not necessary to find the refs that
# differ - it's fine to update a ref even if it already exists.
# TODO: Also error out if there are non-fast forward updates
same = list(set(refs).intersection(old))
new = dict([(k,refs[k]) for k in same if refs[k] != old[k]])
dfky = list(set(refs) - set(new))
dfrnt = dict([(k,refs[k]) for k in dfky if k != 'HEAD'])
return dict(new.items() + dfrnt.items())
return client.send_pack(path,
update_refs,
objsto.generate_pack_contents,
sys.stdout.write)
| 38.633333 | 71 | 0.612597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 396 | 0.341674 |
e239b2ed1a202d8696559f1fb4f8dfa7181c561f | 711 | py | Python | test/test_regularization.py | rentainhe/glasses | 34300a76985c7fc643094fa8d617114926a0ee75 | [
"MIT"
] | 271 | 2020-10-20T12:30:23.000Z | 2022-03-17T03:02:38.000Z | test/test_regularization.py | rentainhe/glasses | 34300a76985c7fc643094fa8d617114926a0ee75 | [
"MIT"
] | 212 | 2020-07-25T13:02:23.000Z | 2022-02-20T10:33:32.000Z | test/test_regularization.py | rentainhe/glasses | 34300a76985c7fc643094fa8d617114926a0ee75 | [
"MIT"
] | 23 | 2021-01-03T13:53:36.000Z | 2022-03-17T05:40:34.000Z | import torch
from glasses.nn.regularization import DropBlock, StochasticDepth
def test_drop_block():
drop = DropBlock()
x = torch.ones((1, 3, 28, 28))
x_drop = drop(x)
assert not torch.equal(x, x_drop)
assert drop.training
drop = drop.eval()
x_drop = drop(x)
assert torch.equal(x, x_drop)
assert not drop.training
assert drop.__repr__() == "DropBlock(p=0.5)"
def test_stocastic_depth():
stoc = StochasticDepth()
assert stoc.__repr__() == "StochasticDepth(p=0.5)"
x = torch.ones((2, 3, 28, 28))
stoc = StochasticDepth(p=1)
out = stoc(x)
assert out.sum() > 0
stoc = StochasticDepth(p=10e-6)
out = stoc(x)
assert out.sum() == 0
| 20.911765 | 64 | 0.630098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.059072 |
e23a891f09a542df416f2a9cde89a79b43479dfe | 27,200 | py | Python | dD_plots_lon_runbin.py | HannahSus/MercuryPolarCratersDepthDiameter | e96fc6cadfa5ebd0558ebea737c517d51fcb0d8a | [
"CC0-1.0"
] | null | null | null | dD_plots_lon_runbin.py | HannahSus/MercuryPolarCratersDepthDiameter | e96fc6cadfa5ebd0558ebea737c517d51fcb0d8a | [
"CC0-1.0"
] | null | null | null | dD_plots_lon_runbin.py | HannahSus/MercuryPolarCratersDepthDiameter | e96fc6cadfa5ebd0558ebea737c517d51fcb0d8a | [
"CC0-1.0"
] | null | null | null | #! /Users/susorhc1/anaconda/bin/python
##
##
##
# Program: dD_plots_lon_runbin
# Author: Hannah C.M. Susorney
# Date Created: 2020-03-03
#
# Purpose: To compare depth/diameter measurements in overlapping longitude bins
# Used in study
#
# Required Inputs: .csv of data
#
# Updates: 2021-08-31 - Clean and document code
#
#
##
##
##
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker as mticker
from matplotlib.ticker import FixedLocator
################################################################################
export_location = '../analysis/'
longitude_bin_size = 15
max_diam = 10.0
min_diam = 5.0
################################################################################
file_mla = '../crater_measurements/polar_hannah.csv'
data_source_mla = '_mla'
dd_data_mla = np.loadtxt(file_mla,dtype='str',delimiter=',',skiprows=1)
index_diam = np.where((dd_data_mla[:,5].astype(np.float) < max_diam) & (dd_data_mla[:,5].astype(np.float) > min_diam))
dd_data_mla = dd_data_mla[index_diam,:]
dd_data_mla = dd_data_mla[0,:,:]
depth_mla = dd_data_mla[:,7].astype(np.float)
diameter_mla = dd_data_mla[:,5].astype(np.float)
longitude_mla = dd_data_mla[:,2].astype(np.float)
latitude_mla = dd_data_mla[:,1].astype(np.float)
radar_bright_mla = dd_data_mla[:,3]
index_radar_bright_mla = np.where(radar_bright_mla=='Yes')
for k in range(0,len(longitude_mla)):
if longitude_mla[k] > 180:
longitude_mla[k]=longitude_mla[k]-360
################################################################################
file_nancy = '../crater_measurements/depth_diameter_spreadsheet_nancy.csv'
data_source_nancy = '_nancy'
dd_data_nancy = np.loadtxt(file_nancy,dtype='str',delimiter=',',skiprows=1)
index_diam = np.where((dd_data_nancy[:,22].astype(np.float) < max_diam) & (dd_data_nancy[:,22].astype(np.float) > min_diam))
dd_data_nancy = dd_data_nancy[index_diam,:]
dd_data_nancy = dd_data_nancy[0,:,:]
depth_nancy = dd_data_nancy[:,23].astype(np.float)
depth_error_nancy = dd_data_nancy[:,8].astype(np.float)
diameter_nancy = dd_data_nancy[:,22].astype(np.float)
diameter_error_nancy = dd_data_nancy[:,6].astype(np.float)
longitude_nancy = dd_data_nancy[:,36].astype(np.float)
latitude_nancy = dd_data_nancy[:,35].astype(np.float)
for k in range(0,len(longitude_nancy)):
if longitude_nancy[k] > 180:
longitude_nancy[k]=longitude_nancy[k]-360
radar_bright_nancy = dd_data_nancy[:,1]
index_radar_bright_nancy = np.where(radar_bright_nancy=='Yes')
################################################################################
file = '../crater_measurements/Rubanenko_mercury_data.csv'
dd_data_rub = np.loadtxt(file,dtype='str',delimiter=',',skiprows=1)
index_diam = np.where((dd_data_rub[:,3].astype(np.float)/1000.0 < max_diam) & (dd_data_rub[:,3].astype(np.float)/1000.0 > min_diam))
dd_data_rub = dd_data_rub[index_diam,:]
dd_data_rub = dd_data_rub[0,:,:]
depth_rub = dd_data_rub[:,2].astype(np.float)/1000.0
diameter_rub = dd_data_rub[:,3].astype(np.float)/1000.0
longitude_rub = dd_data_rub[:,1].astype(np.float)
latitude_rub = dd_data_rub[:,0].astype(np.float)
for k in range(0,len(longitude_rub)):
if longitude_rub[k] > 180:
longitude_rub[k]=longitude_rub[k]-360
################################################################################
###### finding radar-bright data _mla ##########################################
index_radar_bright_mla = np.where(radar_bright_mla=='Yes')
longitude_radar_bright_mla = longitude_mla[index_radar_bright_mla]
latitude_radar_bright_mla = latitude_mla[index_radar_bright_mla]
depth_radar_bright_mla = depth_mla[index_radar_bright_mla]
diameter_radar_bright_mla = diameter_mla[index_radar_bright_mla]
index_not_radar_bright_mla = np.where(radar_bright_mla!='Yes')
longitude_not_radar_bright_mla = longitude_mla[index_not_radar_bright_mla]
latitude_not_radar_bright_mla = latitude_mla[index_not_radar_bright_mla]
depth_not_radar_bright_mla = depth_mla[index_not_radar_bright_mla]
diameter_not_radar_bright_mla = diameter_mla[index_not_radar_bright_mla]
################################################################################
###### finding radar-bright data _nancy ##########################################
index_radar_bright_nancy = np.where(radar_bright_nancy=='Yes')
longitude_radar_bright_nancy = longitude_nancy[index_radar_bright_nancy]
latitude_radar_bright_nancy = latitude_nancy[index_radar_bright_nancy]
depth_radar_bright_nancy = depth_nancy[index_radar_bright_nancy]
diameter_radar_bright_nancy = diameter_nancy[index_radar_bright_nancy]
index_not_radar_bright_nancy = np.where(radar_bright_nancy!='Yes')
longitude_not_radar_bright_nancy = longitude_nancy[index_not_radar_bright_nancy]
latitude_not_radar_bright_nancy = latitude_nancy[index_not_radar_bright_nancy]
depth_not_radar_bright_nancy = depth_nancy[index_not_radar_bright_nancy]
diameter_not_radar_bright_nancy = diameter_nancy[index_not_radar_bright_nancy]
################################################################################
###### binning data in longitude bins _mla ##########################################
total_lon_bins_mla = int(360/longitude_bin_size)
middle_bins_lon_mla = (np.arange(total_lon_bins_mla)*longitude_bin_size)+(longitude_bin_size/2.0)-(180+(longitude_bin_size/2.0))
mean_dd_bin_mla = np.empty(total_lon_bins_mla)
mean_dd_bin_radar_bright_mla = np.empty(total_lon_bins_mla)
mean_dd_bin_not_radar_bright_mla = np.empty(total_lon_bins_mla)
mean_dd_bin_rub = np.empty(total_lon_bins_mla)
median_dd_bin_mla = np.empty(total_lon_bins_mla)
median_dd_bin_radar_bright_mla = np.empty(total_lon_bins_mla)
median_dd_bin_not_radar_bright_mla = np.empty(total_lon_bins_mla)
median_dd_bin_rub = np.empty(total_lon_bins_mla)
std_dd_bin_mla = np.empty(total_lon_bins_mla)
std_dd_bin_radar_bright_mla = np.empty(total_lon_bins_mla)
std_dd_bin_not_radar_bright_mla = np.empty(total_lon_bins_mla)
std_dd_bin_rub = np.empty(total_lon_bins_mla)
count_dd_bin_mla = np.empty(total_lon_bins_mla)
count_dd_bin_radar_bright_mla = np.empty(total_lon_bins_mla)
count_dd_bin_not_radar_bright_mla = np.empty(total_lon_bins_mla)
count_dd_bin_rub = np.empty(total_lon_bins_mla)
for i in range(0,total_lon_bins_mla):
index_lon_bin_mla = np.where((longitude_mla>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_mla<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_mla[i] = np.mean(depth_mla[index_lon_bin_mla]/diameter_mla[index_lon_bin_mla])
median_dd_bin_mla[i] = np.median(depth_mla[index_lon_bin_mla]/diameter_mla[index_lon_bin_mla])
std_dd_bin_mla[i] = np.std(depth_mla[index_lon_bin_mla]/diameter_mla[index_lon_bin_mla])
count_dd_bin_mla[i] = len(depth_mla[index_lon_bin_mla]/diameter_mla[index_lon_bin_mla])
index_lon_bin_radar_bright_mla = np.where((longitude_radar_bright_mla>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_radar_bright_mla<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_radar_bright_mla[i] = np.mean(depth_radar_bright_mla[index_lon_bin_radar_bright_mla]/diameter_radar_bright_mla[index_lon_bin_radar_bright_mla])
median_dd_bin_radar_bright_mla[i] = np.median(depth_radar_bright_mla[index_lon_bin_radar_bright_mla]/diameter_radar_bright_mla[index_lon_bin_radar_bright_mla])
std_dd_bin_radar_bright_mla[i] = np.std(depth_radar_bright_mla[index_lon_bin_radar_bright_mla]/diameter_radar_bright_mla[index_lon_bin_radar_bright_mla])
count_dd_bin_radar_bright_mla[i] = len(depth_radar_bright_mla[index_lon_bin_radar_bright_mla]/diameter_radar_bright_mla[index_lon_bin_radar_bright_mla])
index_lon_bin_not_radar_bright_mla = np.where((longitude_not_radar_bright_mla>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_not_radar_bright_mla<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_not_radar_bright_mla[i] = np.mean(depth_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla]/diameter_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla])
median_dd_bin_not_radar_bright_mla[i] = np.median(depth_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla]/diameter_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla])
std_dd_bin_not_radar_bright_mla[i] = np.std(depth_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla]/diameter_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla])
count_dd_bin_not_radar_bright_mla[i] = len(depth_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla]/diameter_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla])
index_lon_bin_rub = np.where((longitude_rub>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_rub<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_rub[i] = np.mean(depth_rub[index_lon_bin_mla]/diameter_rub[index_lon_bin_mla])
median_dd_bin_rub[i] = np.median(depth_rub[index_lon_bin_mla]/diameter_rub[index_lon_bin_mla])
std_dd_bin_rub[i] = np.std(depth_rub[index_lon_bin_mla]/diameter_rub[index_lon_bin_mla])
count_dd_bin_rub[i] = len(depth_rub[index_lon_bin_mla]/diameter_rub[index_lon_bin_mla])
################################################################################
###### binning data in longitude bins _nancy ##########################################
total_lon_bins_nancy = int(360/longitude_bin_size)
middle_bins_lon_nancy = (np.arange(total_lon_bins_mla)*longitude_bin_size)+(longitude_bin_size/2.0)-(180+(longitude_bin_size/2.0))
mean_dd_bin_nancy = np.empty(total_lon_bins_nancy)
mean_dd_bin_radar_bright_nancy = np.empty(total_lon_bins_nancy)
mean_dd_bin_not_radar_bright_nancy = np.empty(total_lon_bins_nancy)
median_dd_bin_nancy = np.empty(total_lon_bins_nancy)
median_dd_bin_radar_bright_nancy = np.empty(total_lon_bins_nancy)
median_dd_bin_not_radar_bright_nancy = np.empty(total_lon_bins_nancy)
std_dd_bin_nancy = np.empty(total_lon_bins_nancy)
std_dd_bin_radar_bright_nancy = np.empty(total_lon_bins_nancy)
std_dd_bin_not_radar_bright_nancy = np.empty(total_lon_bins_nancy)
count_dd_bin_nancy = np.empty(total_lon_bins_nancy)
count_dd_bin_radar_bright_nancy = np.empty(total_lon_bins_nancy)
count_dd_bin_not_radar_bright_nancy = np.empty(total_lon_bins_nancy)
for i in range(0,total_lon_bins_nancy):
print(i*longitude_bin_size)
print((i+1)*longitude_bin_size)
index_lon_bin_nancy = np.where((longitude_nancy>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_nancy<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_nancy[i] = np.mean(depth_nancy[index_lon_bin_nancy]/diameter_nancy[index_lon_bin_nancy])
median_dd_bin_nancy[i] = np.median(depth_nancy[index_lon_bin_nancy]/diameter_nancy[index_lon_bin_nancy])
std_dd_bin_nancy[i] = np.std(depth_nancy[index_lon_bin_nancy]/diameter_nancy[index_lon_bin_nancy])
count_dd_bin_nancy[i] = len(depth_nancy[index_lon_bin_nancy]/diameter_nancy[index_lon_bin_nancy])
index_lon_bin_radar_bright_nancy = np.where((longitude_radar_bright_nancy>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_radar_bright_nancy<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_radar_bright_nancy[i] = np.mean(depth_radar_bright_nancy[index_lon_bin_radar_bright_nancy]/diameter_radar_bright_nancy[index_lon_bin_radar_bright_nancy])
median_dd_bin_radar_bright_nancy[i] = np.median(depth_radar_bright_nancy[index_lon_bin_radar_bright_nancy]/diameter_radar_bright_nancy[index_lon_bin_radar_bright_nancy])
std_dd_bin_radar_bright_nancy[i] = np.std(depth_radar_bright_nancy[index_lon_bin_radar_bright_nancy]/diameter_radar_bright_nancy[index_lon_bin_radar_bright_nancy])
count_dd_bin_radar_bright_nancy[i] = len(depth_radar_bright_nancy[index_lon_bin_radar_bright_nancy]/diameter_radar_bright_nancy[index_lon_bin_radar_bright_nancy])
index_lon_bin_not_radar_bright_nancy = np.where((longitude_not_radar_bright_nancy>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_not_radar_bright_nancy<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_not_radar_bright_nancy[i] = np.mean(depth_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy]/diameter_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy])
median_dd_bin_not_radar_bright_nancy[i] = np.median(depth_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy]/diameter_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy])
std_dd_bin_not_radar_bright_nancy[i] = np.std(depth_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy]/diameter_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy])
count_dd_bin_not_radar_bright_nancy[i] = len(depth_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy]/diameter_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy])
print(count_dd_bin_not_radar_bright_nancy[i])
print(count_dd_bin_not_radar_bright_mla[i])
################################################################################
###### Matplotlib formatting ######################################################
tfont = {'family' : 'Times New Roman',
'size' : 18}
mpl.rc('font',**tfont)
###### mean d/D versus binned longitude -180 to 180 _mla####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,mean_dd_bin_mla,'ko',label='All craters')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_mla, yerr=std_dd_bin_mla,fmt='ko',capsize=5)
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_mla.pdf',format='pdf')
plt.close('all')
################################################################################
###### mean d/D versus longitude -180 to 180 _mla ###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,mean_dd_bin_radar_bright_mla,'ko',label='MLA Non-radar-bright craters')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_radar_bright_mla, yerr=std_dd_bin_radar_bright_mla,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_not_radar_bright_mla,'b^',label='MLA Radar-bright craters')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_not_radar_bright_mla, yerr=std_dd_bin_not_radar_bright_mla,fmt='b^',capsize=5)
ax.plot([-180,180],[0.2,0.2],':ko')
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':14})
ax.text(0, 0.202, 'depth=0.2Diameter',size=12)
ax.set_ylim(0.05,0.25)
ax.set_xlim(-40,130)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla.pdf',format='pdf')
plt.close('all')
################################################################################
###### median d/D versus longitude -180 to 180 _mla ####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,median_dd_bin_radar_bright_mla,'ko',label='Non-radar-bright craters')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_radar_bright_mla, yerr=std_dd_bin_radar_bright_mla,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_mla,median_dd_bin_not_radar_bright_mla,'b^',label='Radar-bright craters')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_not_radar_bright_mla, yerr=std_dd_bin_not_radar_bright_mla,fmt='b^',capsize=5)
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Median depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'mediandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla.pdf',format='pdf')
plt.close('all')
################################################################################
###### count d/D versus longitude -180 to 180 _mla####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,count_dd_bin_radar_bright_mla,'ko',label='Non-radar-bright craters')
ax.plot(middle_bins_lon_mla,count_dd_bin_not_radar_bright_mla,'bo',label='Radar-bright craters')
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Number of craters measured')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'countD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla.pdf',format='pdf')
plt.close('all')
################################################################################
###### percentage radar-bright versus longitude -180 to 180 _mla####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,((count_dd_bin_radar_bright_mla/(count_dd_bin_not_radar_bright_mla+count_dd_bin_radar_bright_mla))*100),'ko',label='Percentage measured radar-bright')
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('% of measured craters that are radar-bright')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
plt.tight_layout()
plt.savefig(export_location+'percentage_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla.pdf',format='pdf')
plt.close('all')
###### mean d/D versus binned longitude -180 to 180 _nancy####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_nancy,'ko',label='All craters')
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_nancy, yerr=std_dd_bin_nancy,fmt='ko',capsize=5)
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### mean d/D versus longitude -180 to 180 _nancy ###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_radar_bright_nancy,'ro',label='Gridded Non-radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_radar_bright_nancy, yerr=std_dd_bin_radar_bright_nancy,fmt='ro',capsize=5,alpha=0.5)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_not_radar_bright_nancy,'m^',label='Gridded Radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_not_radar_bright_nancy, yerr=std_dd_bin_not_radar_bright_nancy,fmt='m^',capsize=5,alpha=0.5)
ax.plot([-180,180],[0.2,0.2],':ko')
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':14})
ax.text(0, 0.202, 'depth=0.2Diameter',size=12)
ax.set_ylim(0.05,0.25)
ax.set_xlim(-40,130)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### median d/D versus longitude -180 to 180 _nancy ####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,median_dd_bin_radar_bright_nancy,'ko',label='Non-radar-bright craters')
ax.errorbar(middle_bins_lon_nancy,median_dd_bin_radar_bright_nancy, yerr=std_dd_bin_radar_bright_nancy,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_nancy,median_dd_bin_not_radar_bright_nancy,'bo',label='Radar-bright craters')
ax.errorbar(middle_bins_lon_nancy,median_dd_bin_not_radar_bright_nancy, yerr=std_dd_bin_not_radar_bright_nancy,fmt='bo',capsize=5)
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Median depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'mediandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### count d/D versus longitude -180 to 180 _nancy####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,count_dd_bin_radar_bright_nancy,'ko',label='Non-radar-bright craters')
ax.plot(middle_bins_lon_nancy,count_dd_bin_not_radar_bright_nancy,'bo',label='Radar-bright craters')
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Number of craters measured')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'countD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### percentage radar-bright versus longitude -180 to 180 _nancy####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,((count_dd_bin_radar_bright_nancy/(count_dd_bin_not_radar_bright_nancy+count_dd_bin_radar_bright_nancy))*100),'ko',label='Percentage measured radar-bright')
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('% of measured craters that are radar-bright')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
plt.tight_layout()
plt.savefig(export_location+'percentage_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### mean d/D versus longitude -180 to 180 _mla _nancy###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_radar_bright_nancy,'ro',label='Gridded Non-radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_radar_bright_nancy, yerr=std_dd_bin_radar_bright_nancy,fmt='ro',capsize=5,alpha=0.5)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_not_radar_bright_nancy,'mo',label='Gridded Radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_not_radar_bright_nancy, yerr=std_dd_bin_not_radar_bright_nancy,fmt='mo',capsize=5,alpha=0.5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_radar_bright_mla,'ko',label='MLA Non-radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_radar_bright_mla, yerr=std_dd_bin_radar_bright_mla,fmt='ko',capsize=5,alpha=0.5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_not_radar_bright_mla,'bo',label='MLA Radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_not_radar_bright_mla, yerr=std_dd_bin_not_radar_bright_mla,fmt='bo',capsize=5,alpha=0.5)
ax.plot([-180,180],[0.2,0.2],':ko')
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':14})
ax.text(0, 0.202, 'depth=0.2Diameter',size=12)
ax.set_ylim(0.05,0.25)
ax.set_xlim(-40,130)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### mean d/D versus longitude -180 to 180 _mla _nancy###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla[0:9],mean_dd_bin_mla[0:9],'ko',label='MLA track topography')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_mla, yerr=std_dd_bin_mla,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_nancy,'ro',label='Gridded topography')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_nancy, yerr=std_dd_bin_mla,fmt='ro',capsize=5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_rub,'bo',label='Rubanenko et al., 2019')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_rub, yerr=std_dd_bin_rub,fmt='bo',capsize=5)
ax.set_ylim(0.06,0.2)
ax.set_xlim(-35,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':12})
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_mla_nancy_rub.pdf',format='pdf')
plt.close('all')
################################################################################
###### median d/D versus longitude -180 to 180 _mla _nancy###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,median_dd_bin_mla,'ko',label='MLA track topography')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_mla, yerr=std_dd_bin_mla,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_mla,median_dd_bin_nancy,'ro',label='Gridded topography')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_nancy, yerr=std_dd_bin_mla,fmt='ro',capsize=5)
ax.plot(middle_bins_lon_mla,median_dd_bin_rub,'bo',label='Rubanenko et al., 2019')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_rub, yerr=std_dd_bin_rub,fmt='bo',capsize=5)
ax.set_xlim(-35,121)
ax.set_ylim(0.06,0.2)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':12})
plt.tight_layout()
plt.savefig(export_location+'mediandD_v_runbinned_longitude_mla_nancy_rub.pdf',format='pdf')
plt.close('all')
| 50.746269 | 212 | 0.742096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,625 | 0.243566 |
e23afe1671f81cd4be3382e30d76243c69f070a9 | 24,158 | py | Python | docassemble/michildsupport/misc.py | jpylephilalegal/docassemble-michildsupport | 9bae912240d58d3d04e05abc879f9f66cbbd48a8 | [
"MIT"
] | null | null | null | docassemble/michildsupport/misc.py | jpylephilalegal/docassemble-michildsupport | 9bae912240d58d3d04e05abc879f9f66cbbd48a8 | [
"MIT"
] | null | null | null | docassemble/michildsupport/misc.py | jpylephilalegal/docassemble-michildsupport | 9bae912240d58d3d04e05abc879f9f66cbbd48a8 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup as Soup
import re
import json
import sys
import shutil
import tempfile
import os
import subprocess
from pathlib import Path
from docassemble.base.util import log, path_and_mimetype, validation_error, DADict, DAList, Individual, value, force_ask, space_to_underscore
__all__ = ['run_automation', 'noquote', 'number_with_max', 'retirement_index_increment', 'ParentDict', 'ChildrenList']
class ParentDict(DADict):
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.object_type = Individual
self.auto_gather = False
class ChildrenList(DAList):
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.object_type = Individual
self.ask_number = True
def hook_on_gather(self):
if 'C' in value('child_support_group') and not any(child.lives_with_non_parent_custodian for child in self.elements):
force_ask('no_child_with_guardian')
def hook_after_gather(self):
self.sort(key=lambda y: y.birthdate, reverse=True)
def retirement_index_increment(parent):
if parent.tax_method == 'estimated':
for income_source in parent.income_sources:
if income_source.type == 'Employer Wages' and income_source.must_contribute_to_retirement and income_source.mandatory_percentage > 0:
return 1
return 0
def number_with_max(number, maximum):
if number >= maximum:
return str(maximum) + '+'
return str(number)
def noquote(text):
if re.search(r'[^A-Za-z\' 0-9\_\-\n\r]', text):
raise validation_error("You are only allowed to type characters A-Z, a-z, 0-9, and -.")
return True
def run_automation(feature_file, html_file, png_file, json_file, base_name):
base_name = space_to_underscore(base_name)
try:
with tempfile.TemporaryDirectory(prefix='datemp') as temp_directory:
output_file = os.path.join(temp_directory, 'output.html')
output_png = os.path.join(temp_directory, 'output.png')
features_directory = shutil.copytree(path_and_mimetype('data/sources/features')[0], os.path.join(temp_directory, 'features'))
shutil.copyfile(feature_file, os.path.join(features_directory, 'calculate.feature'))
Path(os.path.join(features_directory, '__init__.py')).touch()
Path(os.path.join(features_directory, 'steps', '__init__.py')).touch()
output = ''
with open(feature_file, encoding='utf-8') as x:
output += x.read()
try:
commands = ["aloe", "--stop", "--verbosity=3", "features/calculate.feature"]
output += "\n\n" + ' '.join(commands) + "\n"
#output += subprocess.check_output(["ls", "-lR"], cwd=temp_directory, stderr=subprocess.STDOUT).decode()
output += subprocess.check_output(commands, cwd=temp_directory, stderr=subprocess.STDOUT).decode()
success = True
except subprocess.CalledProcessError as err:
output += err.output.decode()
success = False
if success:
if os.path.isfile(output_file):
html_file.initialize(filename=base_name + '.html')
html_file.copy_into(output_file)
html_file.commit()
else:
success = False
output += "\nFile not found after process completed.\n"
if os.path.isfile(output_png):
png_file.initialize(filename=base_name + '.png')
png_file.copy_into(output_png)
png_file.commit()
# else:
# success = False
# output += "\nPNG file not found after process completed.\n"
except Exception as err:
success = False
output = err.__class__.__name__ + ": " + str(err)
if success:
try:
output_data = extract_data(html_file.path())
json_file.initialize(filename=base_name + '.json')
json_file.write(json.dumps(output_data, indent=2))
json_file.commit()
except Exception as err:
success = False
output += err.__class__.__name__ + ": " + str(err)
output_data = {"error": err.__class__.__name__, "message": str(err)}
else:
output_data = {}
return success, output, output_data
def process_table(table):
result = dict()
result['title'] = table.get('title', None)
result['columns'] = []
result['rows'] = []
result['footer'] = []
for head in table.find_all('thead', recursive=False):
result['columns'].append(head.get_text().strip())
for body in table.find_all('tbody', recursive=False):
for row in body.find_all('tr', recursive=False):
output_row = []
item = list()
for col in row.find_all('td', recursive=False):
output_row.append(fixup(col))
result['rows'].append(output_row)
for foot in table.find_all('tfoot', recursive=False):
result['footer'].append(foot.get_text().strip())
return result
def fixup(elem):
children = [item for item in elem.find_all(recursive=False) if item.name != 'br']
if len(children) == 1:
orig_elem = elem
elem = children[0]
#log("kids1: found a " + elem.name + " with " + repr(elem.get_text()))
if elem.name == 'output':
text = orig_elem.get_text().strip()
elif elem.name == 'div':
found = False
tables = list()
for table in elem.find_all('table'):
found = True
tables.append(process_table(table))
# for head in table.find_all('thead', recursive=False):
# tables.append(head.get_text().strip())
if found:
return tables
text = orig_elem.get_text().strip()
elif elem.name == 'table':
#tables = list()
#for head in elem.find_all('thead', recursive=False):
# tables.append(head.get_text().strip())
#return tables
return process_table(elem)
elif elem.name == 'input':
text = elem.get('value')
else:
#log("doing get text and strip")
text = elem.text.strip()
#log("doing elem is" + repr(text))
text = re.sub(r'<br/?>', ' ', text)
elif len(children) == 2 and children[0].name == 'table' and children[1].name == 'table':
return [process_table(children[0]), process_table(children[1])]
elif len(children) == 2 and children[0].name == 'a' and children[1].name == 'label':
text = children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'output' and children[1].name == 'output':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 3 and children[0].name == 'div' and children[1].name == 'div' and children[2].name == 'div':
#log("Triple div first kid is " + repr(str(children[0])))
text = children[0].get_text().strip() + " " + children[1].get_text().strip() + " " + children[2].get_text().strip()
#log("Triple div Got " + repr(text))
elif len(children) == 2 and children[0].name == 'div' and children[1].name == 'div':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'strong' and children[1].name == 'strong':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'p' and children[1].name == 'p':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'div' and children[1].name == 'p':
text = children[1].get_text().strip()
else:
#log("found a " + elem.name + " with " + repr(elem.get_text()))
#log("kids is " + ";".join(repr(item.name) for item in children))
text = elem.decode_contents().strip()
#log("elem is" + repr(text))
text = re.sub(r'<br/?>', ' ', text)
if not isinstance(text, str):
return text
text = re.sub(r' ', ' ', text)
text = re.sub(r' +', ' ', text)
text = re.sub(r'\n\t+', ' ', text)
text = text.strip()
m = re.search(r'^\$([0-9]+\.[0-9][0-9])$', text)
if m:
text = float(m.group(1))
return text
def nulltruefalse(item):
if isinstance(item, str):
if item in ('false', 'No'):
return False
if item in ('true', 'Yes'):
return True
if item in ('-', ''):
return None
if re.search(r'^\-?[0-9]+$', item):
try:
return int(item)
except:
pass
if '.' in item and re.search(r'^\-?[0-9\.]+$', item):
try:
return float(item)
except:
pass
if re.search(r'^[0-9\.]+\%$', item):
try:
return float(item[0:-1])/100.0
except:
pass
return item
def get_amount_potential(text):
if not isinstance(text, str):
return (text, False)
if '(PC)' in text:
potential = True
else:
potential = False
m = re.search(r'^\$([0-9\.]+)', text)
if m:
try:
text = float(m.group(1))
except:
pass
return (text, potential)
def extract_data(filename):
results = {"parts": [], "hidden": {}, "summary": []}
with open(filename) as fp:
s = Soup(fp.read(), "html.parser")
for inp in s.select('input[type="hidden"]'):
results['hidden'][inp.get('id') or inp.get('name')] = inp.get('value')
for i in range(3):
for div in s.select('#showResult' + str(i)):
link_text = div.get_text().strip()
link_text = re.sub(r'\s+', ' ', link_text)
link_text = re.sub(r'Show Result [0-9]+: ', '', link_text)
results['summary'].append(link_text)
for div in s.select('#paymentRelationship' + str(i)):
result = {}
for table in div.find_all('table', recursive=False):
heading = None
for head in table.find_all('thead', recursive=False):
heading = head.get_text().strip()
if not heading:
raise Exception("Table has no heading")
heading = re.sub(r'^Section:\s*', '', heading)
result[heading] = []
for body in table.find_all('tbody', recursive=False):
for row in body.find_all('tr', recursive=False):
item = list()
for col in row.find_all('td', recursive=False):
item.append(fixup(col))
result[heading].append(item)
results['parts'].append(result)
#log("Raw:")
#log(json.dumps(results, indent=2))
main_output = {'results': [], 'information': {}, 'summaries': []}
for part in results['parts']:
output = dict()
for item in ('General Information', 'Eliminate Ordinary Medical Expenses', 'Calculation Results', 'Children', 'Financial', 'Base Support Calculation', 'Child Care'):
if item not in part:
raise Exception(item + " not found")
for item in part['General Information']:
if item[0] == 'Court Case Number' and len(item) >= 4:
output['Court Case Number'] = item[1]
if item[2] == 'Court Case County':
output['Court Case County'] = item[3]
elif item[0] == 'Calculation Parties' and len(item) >= 4:
output['Calculation Parties'] = [item[1], item[3]]
elif item[0] == 'Description' and len(item) > 1:
output['Description'] = item[1]
elif item[0] == 'Michigan Child Support Formula Year' and len(item) >= 6:
output[item[0]] = item[1]
output[item[2]] = item[3]
output[item[4]] = item[5]
headers = None
for item in part['Eliminate Ordinary Medical Expenses']:
if item[0] == "":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Eliminate Ordinary Medical Expenses")
subout = dict()
for item in part['Eliminate Ordinary Medical Expenses']:
if item[0] == "":
continue
if len(item) == 1 + len(headers):
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
if len(item) == 2 and item[0] == 'Select Reason for Eliminating the Ordinary Medical Expense(s):':
subout[item[0]] = item[1]
output['Eliminate Ordinary Medical Expenses'] = subout
headers = None
for item in part['Calculation Results']:
if item[0] == "":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Calculation Results")
subout = dict()
for item in part['Calculation Results']:
if item[0] == "":
continue
if len(item) == 1 + len(headers):
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
if len(item) == 2 and item[0] == 'Select Reason for Eliminating the Ordinary Medical Expense(s):':
subout[item[0]] = item[1]
output['Calculation Results'] = subout
headers = None
for item in part['Children']:
if item[0] == "Children's Overnights Spent Per Year":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Children")
subout = dict()
overnights = dict()
for item in part['Children']:
if item[0] == "Children's Overnights Spent Per Year":
continue
if len(item) == 1 + len(headers):
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
if item[0] in ('Additional Children from Other Relationships', 'Child Support Children in Other Payment Relationships', 'Total Other Children', 'Income Adjustment Percentage Multiplier'):
subout[item[0]] = subsubout
else:
for i in range(len(headers)):
if headers[i] not in overnights:
overnights[headers[i]] = dict()
overnights[headers[i]][item[0]] = nulltruefalse(item[i + 1])
subout["Children's Overnights Spent Per Year"] = overnights
output["Children"] = subout
subout = dict(notes=list())
headers = None
for item in part['Financial']:
if item[0] == "See 2021 MCSF 2.01":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Financial")
for item in part['Financial']:
if len(item) > 0 and isinstance(item[0], list):
if len(item[0]) > len(headers):
raise Exception("Unrecognized row of tables in Financial section. Expected " + str(len(headers)) + " and got " + str(len(item[0])) + " where content is " + repr(item[0]) + " and headers are " + repr(headers))
for i in range(len(headers)):
if i >= len(item[0]):
continue
table = item[0][i]
if not isinstance(table, dict) or 'title' not in table or 'columns' not in table or 'rows' not in table:
raise Exception("Unrecognized table " + repr(table) + " in Financial section")
table_title = re.sub(r'^Party [0-9]+ ', '', table['title'])
if table_title not in subout:
subout[table_title] = dict()
subsubout = dict()
for subitem in table['rows']:
if not len(subitem) == 2:
raise Exception("Unrecognized row in table in Financial section")
subsubout[subitem[0]] = subitem[1]
subout[table_title][headers[i]] = subsubout
elif len(item) == 1 and isinstance(item[0], str):
subout['notes'].append(item[0])
elif len(item) == 2:
subout[item[0]] = item[1]
elif len(item) == 1 + len(headers):
if item[0] in ("See 2021 MCSF 2.01", "Additional Deductions"):
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
label = item[0]
label = re.sub(r' See 2021 MCSF 2.01', '', item[0])
subout[label] = subsubout
output["Financial"] = subout
subout = dict()
headers = None
for item in part['Base Support Calculation']:
if item[0] == "See 2021 MCSF 3.02(A)":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Base Support Calculation")
for item in part['Base Support Calculation']:
if not len(item) == 1 + len(headers):
raise Exception("Unrecognized row in Base Support Calculation")
if item[0] == "See 2021 MCSF 3.02(A)":
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
output["Base Support Calculation"] = subout
subout = dict(notes=list())
reimbursement_end_dates = list()
headers = None
for item in part['Child Care']:
if len(item) and item[0] == "See 2021 MCSF 3.06(C) and 2021 MCSF 3.06(D)":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Child Care")
for item in part['Child Care']:
if len(item) > 0 and isinstance(item[0], list):
if len(item[0]) != len(headers):
raise Exception("Unrecognized row of tables in Child Care section")
for i in range(len(headers)):
table = item[0][i]
if not isinstance(table, dict) or 'title' not in table or 'columns' not in table or 'rows' not in table:
raise Exception("Unrecognized table " + repr(table) + " in Child Care section")
if len(table['rows']) == 1:
continue
table_title = re.sub(r'^Party [0-9]+ ', '', table['title'])
table_title = re.sub(r'Child Care Expense Information Table', 'Child Care Expenses Information Table', table_title)
if table_title not in subout:
subout[table_title] = dict()
subsubout = list()
for subitem in table['rows']:
if not len(subitem) == 2:
raise Exception("Unrecognized row in table in Child Care section")
if subitem[0] == 'Months':
if len(subsubout) == 0:
raise Exception("Unrecognized Months row in Child Care section")
subsubout[-1]['months'] = subitem[1]
else:
amount, is_potential = get_amount_potential(subitem[1])
subsubout.append({'child': subitem[0], 'amount': amount, 'potential': is_potential})
subout[table_title][headers[i]] = subsubout
elif len(item) == 0:
continue
elif len(item) == 1 and isinstance(item[0], str):
subout['notes'].append(item[0])
elif len(item) == 2:
reimbursement_end_dates.append({'child': item[0], 'date': item[1]})
elif len(item) == 1 + len(headers):
if item[0] == "See 2021 MCSF 3.06(C) and 2021 MCSF 3.06(D)":
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
subout["Reimbursement End Dates"] = reimbursement_end_dates
output["Medical"] = subout
subout = dict(notes=list())
headers = None
for item in part['Medical']:
if len(item) and item[0] == "See 2021 MCSF 3.05(C) See 2021 MCSF 3.04(B)":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Medical")
for item in part['Medical']:
if len(item) > 0 and isinstance(item[0], list):
if len(item[0]) != len(headers):
raise Exception("Unrecognized row of tables in Medical section")
for i in range(len(headers)):
table = item[0][i]
if not isinstance(table, dict) or 'title' not in table or 'columns' not in table or 'rows' not in table:
raise Exception("Unrecognized table " + repr(table) + " in Medical section")
if len(table['rows']) == 1:
continue
table_title = re.sub(r'^Party [0-9]+ ', '', table['title'])
if table_title not in subout:
subout[table_title] = dict()
subsubout = list()
for subitem in table['rows']:
if not len(subitem) == 2:
raise Exception("Unrecognized row in table in Medical section")
subsubout.append({'child': subitem[0], 'amount': amount})
subout[table_title][headers[i]] = subsubout
if 'footer' in table:
subout[table_title + " Note"] = '\n'.join(table['footer'])
elif len(item) == 0:
continue
elif len(item) == 1 and isinstance(item[0], str):
subout['notes'].append(item[0])
elif len(item) == 2:
subout[item[0]] = item[1]
elif len(item) == 1 + len(headers):
if item[0] in ("See 2021 MCSF 3.05(C) See 2021 MCSF 3.04(B)", "Additional Out-of-pocket Medical Expenses Per Child"):
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
output["Medical"] = subout
main_output['results'].append(output)
for item, val in results['hidden'].items():
main_output["information"][item] = nulltruefalse(val)
for item in results['summary']:
main_output['summaries'].append(item)
return main_output
# if __name__ == "__main__":
# filename = 'mi-results.html'
# raw_data = extract_data('mi-results.html')
# print("Final:")
# print(json.dumps(raw_data, indent=2))
| 47.648915 | 228 | 0.528893 | 642 | 0.026574 | 0 | 0 | 0 | 0 | 0 | 0 | 5,169 | 0.213958 |
e23c6bed89aa6c1b377701c04838eafacf7af7c2 | 2,571 | py | Python | graphenetools/gt_c_one_third_commensurate_command_plot.py | nscottnichols/graphenetools-py | ff2b2ac302d1f81ddc8887036d1915a93cc2adda | [
"MIT"
] | null | null | null | graphenetools/gt_c_one_third_commensurate_command_plot.py | nscottnichols/graphenetools-py | ff2b2ac302d1f81ddc8887036d1915a93cc2adda | [
"MIT"
] | null | null | null | graphenetools/gt_c_one_third_commensurate_command_plot.py | nscottnichols/graphenetools-py | ff2b2ac302d1f81ddc8887036d1915a93cc2adda | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import sys
import argparse
from graphenetools import gt
def create_parser():
parser = argparse.ArgumentParser(description="Plot graphene lattice and C1/3 phase corresponding to printed command line arguments for uniaxially strained graphene (for use with QMC software located at https://code.delmaestro.org)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("m", type=int,
help="Simulation cell parameter to generate `2*m*n` C1/3 adsorption sites")
parser.add_argument("n", type=int,
help="Simulation cell parameter to generate `2*m*n` C1/3 adsorption sites")
parser.add_argument("--strain", type=float, default=0.0,
help="Value of strain in armchair direction")
parser.add_argument("--carbon_carbon_distance", type=float, default=1.42,
help="Distance in angstrom between adjacent carbon atoms in isotropic graphene")
parser.add_argument("--poisson_ratio", type=float, default=0.165,
help="Poisson's ratio, (the ratio of transverse contraction strain to longitudinal extension strain in the direction of the stretching force) for graphene")
parser.add_argument('--mplstylefile', type=str, default="default",
help='Location of stylefile to use with plotting')
parser.add_argument("--dpi", type=float, default=None,
help="DPI of saved plot. Defaults to `rcParams[\"savefig.dpi\"]`")
parser.add_argument("--savefig", type=str, default="",
help="Location to save plot. Image type based on extension. Will not save if empty.")
return parser
def main(argv=None):
"""
:desc: Print command line arguments for uniaxially strained graphene and use with QMC software located at https://code.delmaestro.org
"""
if argv is None:
argv = sys.argv
parser = create_parser()
args = parser.parse_args(argv[1:])
gt.c_one_third_commensurate_command(args.m,args.n,args.strain,carbon_carbon_distance=args.carbon_carbon_distance, poisson_ratio=args.poisson_ratio)
fig,ax = gt.c_one_third_commensurate_command_plot(args.m,args.n,args.strain,carbon_carbon_distance=args.carbon_carbon_distance, poisson_ratio=args.poisson_ratio)
if args.savefig:
with gt.plt.style.context(args.mplstylefile):
fig.savefig(args.savefig,dpi=args.dpi)
fig.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 48.509434 | 236 | 0.686892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,056 | 0.410735 |
e23c850adf3d4c53ba65eb60ae258efc52f8112a | 341 | py | Python | profiles/admin.py | cotebarrientos/4th-milestone-project-your-argentine-shop | 8b098f4a0fea73174b064eacd2b1c53dc1a61bc1 | [
"OLDAP-2.2.1"
] | null | null | null | profiles/admin.py | cotebarrientos/4th-milestone-project-your-argentine-shop | 8b098f4a0fea73174b064eacd2b1c53dc1a61bc1 | [
"OLDAP-2.2.1"
] | 2 | 2021-04-27T16:04:31.000Z | 2021-05-04T22:02:26.000Z | profiles/admin.py | cotebarrientos/4th-milestone-project-your-argentine-shop | 8b098f4a0fea73174b064eacd2b1c53dc1a61bc1 | [
"OLDAP-2.2.1"
] | 1 | 2021-06-22T19:50:03.000Z | 2021-06-22T19:50:03.000Z | from django.contrib import admin
from .models import UserProfile
class ProfileAdmin(admin.ModelAdmin):
"""Display the user profiles created in the Admin panel."""
list_display = (
'user',
'default_full_name',
'default_email',
'default_country',
)
admin.site.register(UserProfile, ProfileAdmin)
| 22.733333 | 63 | 0.680352 | 225 | 0.659824 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.340176 |
e23e5edeb769867120d432db8d1a63dd68cde4ce | 15,509 | py | Python | django_flex_user/models/user.py | ebenh/django-flex-user | efffb21e4ce33d2ea8665756334e2a391f4b5a72 | [
"MIT"
] | 1 | 2021-09-13T20:26:02.000Z | 2021-09-13T20:26:02.000Z | django_flex_user/models/user.py | ebenh/django-flex-user | efffb21e4ce33d2ea8665756334e2a391f4b5a72 | [
"MIT"
] | null | null | null | django_flex_user/models/user.py | ebenh/django-flex-user | efffb21e4ce33d2ea8665756334e2a391f4b5a72 | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS
from phonenumber_field.modelfields import PhoneNumberField
from dirtyfields import DirtyFieldsMixin
from django_flex_user.validators import FlexUserUnicodeUsernameValidator
from django_flex_user.fields import CICharField
# Reference: https://docs.djangoproject.com/en/3.0/topics/auth/customizing/
# Reference: https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html
class FlexUserManager(BaseUserManager):
"""
Our custom implementation of django.contrib.auth.models.UserManager.
"""
@classmethod
def normalize_email(cls, email):
"""
Normalize email by lowercasing and IDNA encoding its domain part.
:param email:
:return:
"""
if email is None:
return None
try:
email_name, domain_part = email.strip().rsplit('@', 1)
email = email_name + '@' + domain_part.lower().encode('idna').decode('ascii')
except UnicodeError:
pass
except ValueError:
pass
return email
def _create_user(self, username=None, email=None, phone=None, password=None, **extra_fields):
user = self.model(username=username, email=email, phone=phone, **extra_fields)
user.set_password(password)
user.full_clean()
user.save(using=self._db)
return user
def create_user(self, username=None, email=None, phone=None, password=None, **extra_fields):
"""
Create a user. You must supply at least one of ``username``, ``email``, or ``phone``.
If ``password`` is None, the user's password will be set using \
:meth:`~django.contrib.auth.models.User.set_unusable_password`.
.. warning::
This method does not run :setting:`AUTH_PASSWORD_VALIDATORS` against ``password``. It's the
caller's responsibility to run password validators before calling this method.
:param username: The username for the user, defaults to None.
:type username: str, optional
:param email: The email address for the user, defaults to None.
:type email: str, optional
:param phone: The phone number for the user, defaults to None.
:type phone: str, optional
:param password: The password for the user, defaults to None.
:type password: str, optional
:param extra_fields: Additional model fields you wish to set for the user.
:type extra_fields: dict, optional
:raises ~django.core.exceptions.ValidationError: If any of the supplied parameters fails model field validation
(e.g. the supplied phone number is already in use by another user, the supplied username is invalid, etc.)
:return: The newly created user.
:rtype: ~django_flex_user.models.user.FlexUser
"""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, phone, password, **extra_fields)
def create_superuser(self, username=None, email=None, phone=None, password=None, **extra_fields):
"""
Create a super user. You must supply at least one of ``username``, ``email``, or ``phone``.
If ``password`` is None, the user's password will be set using \
:meth:`~django.contrib.auth.models.User.set_unusable_password`.
.. warning::
This method does not run :setting:`AUTH_PASSWORD_VALIDATORS` against ``password``. It's the
caller's responsibility to run password validators before calling this method.
:param username: The username for the user, defaults to None.
:type username: str, optional
:param email: The email address for the user, defaults to None.
:type email: str, optional
:param phone: The phone number for the user, defaults to None.
:type phone: str, optional
:param password: The password for the user, defaults to None.
:type password: str, optional
:param extra_fields: Additional model fields you wish to set for the user.
:type extra_fields: dict, optional
:raises ~django.core.exceptions.ValidationError: If any of the supplied parameters fails model field validation
(e.g. the supplied phone number is already in use by another user, the supplied username is invalid, etc.)
:return: The newly created user.
:rtype: ~django_flex_user.models.user.FlexUser
"""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, phone, password, **extra_fields)
def get_by_natural_key(self, username=None, email=None, phone=None):
if username is None and email is None and phone is None:
raise ValueError('You must supply at least one of username, email or phone number')
q = {}
if username is not None:
q.update({'username': username})
if email is not None:
q.update({'email': email})
if phone is not None:
q.update({'phone': phone})
return self.get(**q)
class FlexUser(AbstractBaseUser, PermissionsMixin, DirtyFieldsMixin):
"""
Our implementation django.contrib.auth.models.User.
This user model is designed to give users the flexibility to sign up and sign in using their choice of username,
email address or phone number.
Our implementation is identical to django.contrib.auth.models.User except in the following ways:
username field sets null=True and blank=True.
email field sets null=True and blank = True.
phone field is introduced. It defines unique=True, null=True and blank=True.
first_name and last_name fields are omitted.
For each of username, email and phone we set blank = True to preserve the ordinary functioning of the
admin site. Setting blank = True on model fields results in form fields which have required = False set,
thereby enabling users to supply any subset of username, email and phone when configuring a user on the
admin site. Furthermore, when null = True and blank = True are set together on model fields, the value of empty
form fields are conveniently coerced to None. Unfortunately, setting blank = True on model fields has the
undesirable consequence that empty string values will not by rejected by clean_fields/full_clean methods. To
remedy this, we reject empty string values for username, email and phone in our clean method (see below).
clean method:
- Ensures that at least one of username, email or phone is defined for the user.
- Ensures that none of username, email and phone are equal to the empty string. We must do this
because we set blank = True for each of these fields (see above).
- Normalizes email in addition to username.
get_username method returns one of username, email, phone or id. This method evaluates each of these
fields in order and returns the first truthy value.
natural_key method returns a tuple of username, email and phone.
We place the following restrictions on username, email and phone:
- It shouldn't be possible to interpret username as an email address or phone number
- It shouldn't be possible to interpret email as a username or phone number
- It shouldn't be possible to interpret phone as a username or email address
These restrictions are enforced by field validators which apply the constraints below:
- username may not begin with "+" or a decimal number, nor may it contain "@"
- email must contain "@"
- phone must contain "+" and may not contain "@"
These constraints make it possible to receive an unspecified user identifier and infer whether it is a username,
email address or phone number.
"""
username_validator = FlexUserUnicodeUsernameValidator()
email = models.EmailField(
_('email address'),
unique=True,
null=True, # new
blank=True, # new
error_messages={
'unique': _("A user with that email address already exists."),
},
)
phone = PhoneNumberField( # new
_('phone number'),
unique=True,
null=True,
blank=True,
error_messages={
'unique': _("A user with that phone number already exists."),
},
)
# username = models.CharField(
# _('username'),
# max_length=150,
# unique=True,
# null=True, # new
# blank=True, # new
# help_text=_('150 characters or fewer. Letters, digits and ./-/_ only.'),
# validators=[username_validator],
# error_messages={
# 'unique': _("A user with that username already exists."),
# },
# )
username = CICharField(
_('username'),
max_length=150,
unique=True,
null=True, # new
blank=True, # new
help_text=_('150 characters or fewer. Letters, digits and ./-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
# We remove these fields from our user model implementation
# first_name = models.CharField(_('first name'), max_length=30, blank=True)
# last_name = models.CharField(_('last name'), max_length=150, blank=True)
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = []
objects = FlexUserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def clean(self):
errors = {}
if self.username is None and self.email is None and self.phone is None:
errors[NON_FIELD_ERRORS] = 'You must supply at least one of {username}, {email} or {phone}.'.format(
username=self._meta.get_field('username').verbose_name,
email=self._meta.get_field('email').verbose_name,
phone=self._meta.get_field('phone').verbose_name
)
# For fields which have blank = False:
# django.db.models.fields.Field.clean first executes django.db.models.fields.Field.validate which raises an
# exception if the field contains a blank value. If an exception is raised, the subsequent call to
# django.db.models.fields.Field.run_validators is not made.
#
# For fields which have blank = True:
# django.db.models.base.Model.clean_fields executes django.db.models.fields.Field.clean for each of its fields.
# However, it skips this call for fields which contain a blank value.
#
# Therefore, validators are not run for blank values no matter what. So we cannot depend on validators to reject
# empty values.
if self.username == '':
errors['username'] = 'This field may not be blank.'
if self.email == '':
errors['email'] = 'This field may not be blank.'
if self.phone == '':
errors['phone'] = 'This field may not be blank.'
if errors:
raise ValidationError(errors)
# Normalize username and email
self.username = self.normalize_username(self.username)
self.email = FlexUser.objects.normalize_email(self.email)
def get_username(self):
"""Return the identifying username for this user"""
return self.username or self.email or (str(self.phone) if self.phone else None) or str(self.id)
def natural_key(self):
return self.username, self.email, self.phone
@receiver(pre_save, sender=FlexUser)
def my_pre__save_handler(sender, **kwargs):
pass
@receiver(post_save, sender=FlexUser)
def my_post_save_handler(sender, **kwargs):
user = kwargs['instance']
if kwargs['created']:
if user.email is not None:
user.emailtoken_set.create(user_id=user.id, email=user.email)
if user.phone is not None:
user.phonetoken_set.create(user_id=user.id, phone=user.phone)
else:
dirty_fields = user.get_dirty_fields(verbose=True)
if 'email' in dirty_fields:
if dirty_fields['email']['current'] is None:
# If the new value for email is None, delete the token if it exists
user.emailtoken_set.filter(user_id=user.id).delete()
elif dirty_fields['email']['saved'] is None:
# If the old value for email is None and its new value is not None, create a new token
# todo: construct this instance manually?
user.emailtoken_set.create(user=user, email=dirty_fields['email']['current'])
else:
# Otherwise, update the existing token
email_token = user.emailtoken_set.get(user=user)
email_token.email = user.email
# Reset the password
email_token.verified = False
email_token.password = None
email_token.expiration = None
email_token.save(update_fields=['email', 'verified', 'password', 'expiration'])
if 'phone' in dirty_fields:
if dirty_fields['phone']['current'] is None:
# If the new value for phone is None, delete the token if it exists
user.phonetoken_set.filter(user_id=user.id).delete()
elif dirty_fields['phone']['saved'] is None:
# If the old value for phone is None and its new value is not None, create a new token
# todo: construct this instance manually?
user.phonetoken_set.create(user=user, phone=dirty_fields['phone']['current'])
else:
# Otherwise, update the existing token
phone_token = user.phonetoken_set.get(user=user)
phone_token.phone = user.phone
# Reset the password
phone_token.verified = False
phone_token.password = None
phone_token.expiration = None
phone_token.save(update_fields=['phone', 'verified', 'password', 'expiration'])
| 43.687324 | 120 | 0.649945 | 12,139 | 0.782707 | 0 | 0 | 3,056 | 0.197047 | 0 | 0 | 8,869 | 0.571861 |
e240f8d4c7d37f70aa462fa6abf5e545e2313227 | 40 | py | Python | wiki_search/dataset/__init__.py | WikiMegrez/wikisearch | 89dcd07962bacf0dc3cce55bf529b8af44e8150e | [
"Apache-2.0"
] | null | null | null | wiki_search/dataset/__init__.py | WikiMegrez/wikisearch | 89dcd07962bacf0dc3cce55bf529b8af44e8150e | [
"Apache-2.0"
] | null | null | null | wiki_search/dataset/__init__.py | WikiMegrez/wikisearch | 89dcd07962bacf0dc3cce55bf529b8af44e8150e | [
"Apache-2.0"
] | null | null | null | from .dataset import Dataset, Document
| 13.333333 | 38 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e242645796e590100e62540f3f46eef329d4fb8e | 29,332 | py | Python | MedTAG_Dockerized/MedTAG_sket_dock_App/utils_upload_files.py | MedTAG/medtag-core | f2dae7b38230179d71babede7e4910631d91053f | [
"MIT"
] | 6 | 2021-12-20T12:15:17.000Z | 2022-02-02T15:28:42.000Z | MedTAG_Dockerized/MedTAG_sket_dock_App/utils_upload_files.py | MedTAG/medtag-core | f2dae7b38230179d71babede7e4910631d91053f | [
"MIT"
] | 1 | 2022-03-07T14:57:44.000Z | 2022-03-11T18:11:55.000Z | MedTAG_Dockerized/MedTAG_sket_dock_App/utils_upload_files.py | MedTAG/medtag-core | f2dae7b38230179d71babede7e4910631d91053f | [
"MIT"
] | 2 | 2021-05-29T09:44:38.000Z | 2021-12-28T03:53:40.000Z | import psycopg2
import re
import json
from MedTAG_sket_dock_App.models import *
import os
import pandas as pd
import numpy
from psycopg2.extensions import register_adapter, AsIs
def addapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
def addapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
register_adapter(numpy.float64, addapt_numpy_float64)
register_adapter(numpy.int64, addapt_numpy_int64)
from django.db.models import Count
from django.db import transaction
import datetime
from MedTAG_sket_dock_App.utils import *
def check_uploaded_files(files):
"""This method checks whether the files uploaded by the user to copy the ground-truths are well formatted"""
json_resp = {}
json_resp['message'] = ''
for i in range(len(files)):
# Error if the file is not csv
if not files[i].name.endswith('csv'):
json_resp['message'] = 'ERROR - ' + files[i].name + ' - The file must be .csv'
return json_resp
try:
df = pd.read_csv(files[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True) # Useful if the csv includes only commas
except Exception as e:
print(e)
json_resp['message'] = 'ERROR - ' + files[
i].name + ' - An error occurred while parsing the csv. Check if it is well formatted. Check if it contains as many columns as they are declared in the header.'
return json_resp
else:
# check if colunns are allowed and without duplicates
cols = list(df.columns)
labels = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase', 'label']
mentions = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase',
'start', 'stop',
'mention_text']
concepts = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase',
'concept_url',
'concept_name', 'area']
linking = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase', 'start',
'stop',
'mention_text', 'concept_name', 'concept_url', 'area']
if set(cols) != set(labels) and set(cols) != set(mentions) and set(cols) != set(concepts) and set(cols) != set(linking):
json_resp['message'] = 'ERROR - ' + files[
i].name + ' - The set of columns you inserted in the csv does not correspond to those we ask. ' \
'Check the examples.'
return json_resp
if 'usecase' in cols:
df['usecase'] = df['usecase'].str.lower()
# Check if the csv is empty with 0 rows
if df.shape[0] == 0:
json_resp['message'] = 'ERROR - ' + files[
i].name + ' - You must provide at least a row.'
return json_resp
if len(files) > 0:
if json_resp['message'] == '':
json_resp['message'] = 'Ok'
return json_resp
def upload_files(files,user_to,overwrite):
"""This method handles the upload of csv files to copy th annotations from"""
json_resp = {'message':'Ok'}
mode_rob = NameSpace.objects.get(ns_id='Robot')
mode_hum = NameSpace.objects.get(ns_id='Human')
print(user_to)
username_rob = User.objects.get(username='Robot_user', ns_id=mode_rob)
try:
with transaction.atomic():
for i in range(len(files)):
df = pd.read_csv(files[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True) # Useful if the csv includes only commas
df.sort_values(['id_report','language','annotation_mode'])
cols = list(df.columns)
labels = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'label']
mentions = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'start', 'stop',
'mention_text']
concepts = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'concept_url',
'concept_name', 'area']
linking = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'start', 'stop',
'mention_text', 'concept_name', 'concept_url', 'area']
for i, g in df.groupby(['id_report','language','annotation_mode']):
count_rows = g.shape[0]
deleted_mentions = False
if df.annotation_mode.unique()[0] == 'Manual':
a = 'Human'
else:
a = 'Robot'
report_cur = Report.objects.get(id_report = str(g.id_report.unique()[0]), language = g.language.unique()[0] )
mode = NameSpace.objects.get(ns_id =a)
anno_mode = mode
if a == 'Robot' and GroundTruthLogFile.objects.filter(username = username_rob).count() == 0:
json_resp = {'message':'automatic missing'}
return json_resp
report = report_cur
g = g.reset_index()
action = ''
user = User.objects.get(username=user_to, ns_id=mode)
if set(cols) == set(labels):
user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report, language=report.language,
gt_type='labels')
if overwrite == False:
if mode.ns_id == 'Robot':
if not user_to_gt.exists():
Associate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
else:
GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language,
gt_type='labels').delete()
Associate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
elif set(cols) == set(mentions):
user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report, language=report.language,
gt_type='mentions')
robot_gt = GroundTruthLogFile.objects.filter(username=username_rob, ns_id=mode_rob,
id_report=report, language=report.language,
gt_type='mentions')
# ins_time = ''
# if robot_gt.exists():
# rob_first_gt = robot_gt.first()
# ins_time = rob_first_gt.insertion_time
if overwrite == False:
if mode.ns_id == 'Robot':
if not user_to_gt.exists():
# user_to_gt_first = user_to_gt.first()
# if user_to_gt_first.insertion_time == ins_time:
# GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
# id_report=report,
# language=report.language,
# gt_type='mentions').delete()
if Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).exists():
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concept-mention').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').delete()
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
links = Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language)
for e in links:
concept = e.concept_url
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language, concept_url=concept).delete()
links.delete()
else:
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='mentions').delete()
if Linked.objects.filter(username=user, ns_id=mode,id_report=report,language=report.language).exists():
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concept-mention').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').delete()
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
links = Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language)
for e in links:
concept = e.concept_url
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language, concept_url=concept).delete()
links.delete()
elif set(cols) == set(concepts):
user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report, language=report.language,
gt_type='concepts')
robot_gt = GroundTruthLogFile.objects.filter(username=username_rob, ns_id=mode_rob,
id_report=report, language=report.language,
gt_type='concepts')
# ins_time = ''
# if robot_gt.exists():
# rob_first_gt = robot_gt.first()
# ins_time = rob_first_gt.insertion_time
if overwrite == False:
if mode.ns_id == 'Robot':
if not user_to_gt.exists():
# user_to_gt_first = user_to_gt.first()
# if user_to_gt_first.insertion_time == ins_time:
# GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
# id_report=report,
# language=report.language,
# gt_type='concepts').delete()
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
else:
GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language,
gt_type='concepts').delete()
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
elif set(cols) == set(linking):
user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report, language=report.language,
gt_type='concept-mention')
if overwrite == False:
if mode.ns_id == 'Robot':
if not user_to_gt.exists():
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='mentions').delete()
links = Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language)
for e in links:
concept = e.concept_url
area = e.name
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language, name=area,
concept_url=concept).delete()
links.delete()
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
else:
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concept-mention').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='mentions').delete()
links = Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language)
for ll in links:
concept = ll.concept_url
area = ll.name
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language, concept_url=concept,name = area).delete()
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
links.delete()
for i in range(count_rows):
usecase = str(df.loc[i, 'usecase'])
usecase_obj = UseCase.objects.get(name=usecase)
mode = str(g.loc[i, 'annotation_mode'])
id_report = str(g.loc[i, 'id_report'])
language = str(g.loc[i, 'language'])
institute = str(g.loc[i, 'institute'])
# user_from = str(g.loc[i, 'username'])
if mode == 'Manual':
mode = 'Human'
elif mode == 'Automatic':
mode = 'Robot'
# username_from = User.objects.get(username=user_from, ns_id=mode)
mode = NameSpace.objects.get(ns_id = mode)
report = Report.objects.get(id_report=id_report, language=language, institute=institute)
if set(cols) == set(labels):
label = AnnotationLabel.objects.get(label = str(g.loc[i, 'label']),name = usecase_obj)
if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user,
ns_id=mode,
id_report=report,
language=report.language,
gt_type='labels').exists()) or overwrite == True:
if not Associate.objects.filter(username=user, ns_id=mode, id_report=report, label=label,seq_number=label.seq_number,
language=report.language).exists():
Associate.objects.create(username=user, ns_id=mode, id_report=report, label=label,
seq_number=label.seq_number,
language=report.language, insertion_time=Now())
action = 'labels'
elif set(cols) == set(mentions):
mention = Mention.objects.get(id_report = report, language = language, start = int(g.loc[i, 'start']),
stop = int(g.loc[i, 'stop']))
if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user,
ns_id=mode,
id_report=report,
language=report.language,
gt_type='mentions').exists()) or overwrite == True:
if not Annotate.objects.filter(username=user, ns_id=mode, id_report=report,start = mention,stop = mention.stop,
language=report.language).exists():
Annotate.objects.create(username=user, ns_id=mode, id_report=report,start = mention,stop = mention.stop,
language=report.language, insertion_time=Now())
action = 'mentions'
elif set(cols) == set(concepts):
concept = Concept.objects.get(concept_url = str(g.loc[i, 'concept_url']))
area = SemanticArea.objects.get(name=str(g.loc[i, 'area']))
if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user,
ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').exists()) or overwrite == True:
if not Contains.objects.filter(username = user, ns_id =mode, id_report = report,concept_url = concept,name = area,
language = report.language).exists():
Contains.objects.create(username = user, ns_id =mode, id_report = report,concept_url = concept,name = area,
language = report.language,insertion_time = Now())
action = 'concepts'
elif set(cols) == set(linking):
concept = Concept.objects.get(concept_url = str(g.loc[i, 'concept_url']))
area = SemanticArea.objects.get(name=str(g.loc[i, 'area']))
mention = Mention.objects.get(id_report=report, language=language,start=int(g.loc[i, 'start']),
stop=int(g.loc[i, 'stop']))
if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user,
ns_id=mode,
id_report=report,
language=report.language,
gt_type='concept-mention').exists()) or overwrite == True:
if not deleted_mentions:
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,language=report.language).delete()
deleted_mentions = True
a = Annotate.objects.filter(username = user, ns_id = mode, id_report = report,
language = report.language,start=mention,stop = mention.stop)
c = Contains.objects.filter(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area,
language = report.language)
l = Linked.objects.filter(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area,
language = report.language,start=mention,stop = mention.stop)
if not a.exists():
Annotate.objects.create(username=user, ns_id=mode, id_report=report,
language=report.language, start=mention, stop=mention.stop, insertion_time = Now())
if not c.exists():
Contains.objects.create(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area,
language = report.language,insertion_time = Now())
if not l.exists():
Linked.objects.create(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area,
language = report.language,start=mention,stop = mention.stop,insertion_time = Now())
action = 'concept-mention'
if action != '':
# gt_json = serialize_gt(action, usecase, user_to, report_cur.id_report, report_cur.language,
# anno_mode)
# GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type=action,gt_json=gt_json, insertion_time=Now(),id_report=report_cur, language=language)
if action == 'concept-mention':
gt_json = serialize_gt('mentions', usecase, user_to, report_cur.id_report, report_cur.language,
anno_mode)
GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type='mentions',
gt_json=gt_json, insertion_time=Now(),
id_report=report_cur, language=language)
gt_json = serialize_gt('concepts', usecase, user_to, report_cur.id_report, report_cur.language,
anno_mode)
GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type='concepts',
gt_json=gt_json, insertion_time=Now(),
id_report=report_cur, language=language)
if action == 'mentions':
gt_json = serialize_gt('concepts', usecase, user_to, report_cur.id_report, report_cur.language,
anno_mode)
if Contains.objects.filter(id_report=report_cur, language=language,username=user, ns_id=anno_mode).count()>0 and Linked.objects.filter(id_report=report_cur, language=language,username=user, ns_id=anno_mode).count()>0:
GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type='concepts',
gt_json=gt_json, insertion_time=Now(),
id_report=report_cur, language=language)
except Exception as e:
print(e)
json_resp = {'message':'an error occurred, remember that your configuration must be the same of the one of the user you are uploading the annotations of.'}
finally:
return json_resp
| 68.055684 | 245 | 0.420496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,909 | 0.133267 |
e243469fdf4c02806f27f1c408cf2cf6e88ea291 | 1,159 | py | Python | main.py | yang-233/mmsa | eed7b943746041b735d8a7af8d60b6457f0284f6 | [
"MIT"
] | 1 | 2021-04-20T07:03:50.000Z | 2021-04-20T07:03:50.000Z | main.py | yang-233/mmsa | eed7b943746041b735d8a7af8d60b6457f0284f6 | [
"MIT"
] | null | null | null | main.py | yang-233/mmsa | eed7b943746041b735d8a7af8d60b6457f0284f6 | [
"MIT"
] | null | null | null | import sys
sys.path.append("/home/ly/workspace/mmsa")
seed = 1938
import numpy as np
import torch
from torch import nn
from torch import optim
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
from models.bigru_rcnn_gate import *
from utils.train import *
from typing import *
from utils.load_raw_yelp import *
from utils.dataset import *
from utils.train import *
from utils.train import *
def main():
train_set, valid_set, test_set = load_glove_data(config)
batch_size = 2
workers = 2
train_loader, valid_loader, test_loader = get_loader(batch_size, workers, get_collate_fn(config),
train_set, valid_set, test_set)
model = Model(config)
#X, y = iter(valid_loader).next()
#res = model(X)
loss = nn.CrossEntropyLoss()
# get_parameter_number(model), loss
viz = get_Visdom()
lr = 1e-3
epoches = 20
optimizer = get_regal_optimizer(model, optim.AdamW, lr)
k_batch_train_visdom(model, optimizer, loss, valid_loader, viz, 30, 10, use_cuda=False)
if __name__ == "__main__":
# torch.cuda.set_device(1)
main() | 25.755556 | 102 | 0.707506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.124245 |
e243f2fb56034af4479821d1bde3670f31edfe71 | 2,113 | py | Python | back-end/www/model/timeception/core/const.py | yenchiah/deep-smoke-machine | 5f779f723a3c891145db43663c8825f9ab55dc74 | [
"BSD-3-Clause"
] | 88 | 2019-05-29T07:38:45.000Z | 2022-03-17T01:50:50.000Z | back-end/www/model/timeception/core/const.py | yenchiah/deep-smoke-machine | 5f779f723a3c891145db43663c8825f9ab55dc74 | [
"BSD-3-Clause"
] | 6 | 2019-05-30T08:47:07.000Z | 2021-09-01T07:45:54.000Z | back-end/www/model/timeception/core/const.py | yenchiah/deep-smoke-machine | 5f779f723a3c891145db43663c8825f9ab55dc74 | [
"BSD-3-Clause"
] | 22 | 2019-06-17T01:15:35.000Z | 2021-11-17T10:29:00.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
########################################################################
# GNU General Public License v3.0
# GNU GPLv3
# Copyright (c) 2019, Noureldien Hussein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
########################################################################
"""
Constants for project.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import platform
import numpy as np
DL_FRAMEWORKS = np.array(['caffe', 'tensorflow', 'pytorch', 'keras', 'caffe2'])
DL_FRAMEWORK = None
GPU_CORE_ID = 0
CNN_FEATURE_SIZES = np.array([2048, 2048, 1000, 1024, 1000, 2048, 2048])
CNN_FEATURE_TYPES = np.array(['fc6', 'fc7', 'fc1000', 'fc1024', 'fc365', 'prob', 'pool5', 'fc8a', 'res3b7', 'res4b35', 'res5c'])
CNN_MODEL_TYPES = np.array(['resnet152', 'googlenet1k', 'vgg16', 'places365-resnet152', 'places365-vgg', 'googlenet13k'])
RESIZE_TYPES = np.array(['resize', 'resize_crop', 'resize_crop_scaled', 'resize_keep_aspect_ratio_padded'])
ROOT_PATH_TYPES = np.array(['data', 'project'])
TRAIN_SCHEMES = np.array(['ete', 'tco'])
MODEL_CLASSIFICATION_TYPES = np.array(['ml', 'sl'])
MODEL_MULTISCALE_TYPES = np.array(['dl', 'ks'])
SOLVER_NAMES = np.array(['adam', 'sgd'])
DATASET_NAMES = np.array(['charades', 'kinetics400', 'breakfast_actions', 'you_cook_2', 'multi_thumos'])
DATA_ROOT_PATH = './data'
PROJECT_ROOT_PATH = '../'
MACHINE_NAME = platform.node()
| 39.867925 | 128 | 0.682915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,333 | 0.630857 |
e245ba7d684d92f61958b015b54789b09a890e6e | 262 | py | Python | Exercicios/ex012.py | KaioPlandel/Estudos-Python-3 | 21c3dfb73b9ef0420eac093434050e4aff8fd61e | [
"MIT"
] | null | null | null | Exercicios/ex012.py | KaioPlandel/Estudos-Python-3 | 21c3dfb73b9ef0420eac093434050e4aff8fd61e | [
"MIT"
] | null | null | null | Exercicios/ex012.py | KaioPlandel/Estudos-Python-3 | 21c3dfb73b9ef0420eac093434050e4aff8fd61e | [
"MIT"
] | null | null | null | #mostre quando a pessoa vai pagar com o desconto de 5%
preco = float(input('Digite o Preço do Produto R$: '))
desconto = preco * 0.05
print(f'O preço do Produto é de R${preco:.2f} com o Desconto de R${desconto:.2f} \nO Total vai ficar R$:{preco - desconto:.2f}') | 65.5 | 128 | 0.698473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.792453 |
e246ae17b63ba59e1c28d476250fb493117de794 | 20,534 | py | Python | shahryar_webscrapping_nlp2_WebCrawling.py | ShahryarZaidi/Web-Crawler-and-NLP- | 2dfaecfc20c4ab4a711a633c088113671ffc3a89 | [
"Apache-2.0"
] | null | null | null | shahryar_webscrapping_nlp2_WebCrawling.py | ShahryarZaidi/Web-Crawler-and-NLP- | 2dfaecfc20c4ab4a711a633c088113671ffc3a89 | [
"Apache-2.0"
] | null | null | null | shahryar_webscrapping_nlp2_WebCrawling.py | ShahryarZaidi/Web-Crawler-and-NLP- | 2dfaecfc20c4ab4a711a633c088113671ffc3a89 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[65]:
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn import metrics
from sklearn.metrics import roc_auc_score, accuracy_score
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import warnings
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from nltk.tokenize import word_tokenize
import re
import nltk
import emoji
import string
from textblob import TextBlob
import langid
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from gensim import models, corpora
from sklearn.model_selection import train_test_split
warnings.filterwarnings('ignore')
# In[66]:
from bs4 import BeautifulSoup
import jsonpickle
import requests
from datetime import datetime, timedelta
from textblob import TextBlob
from productClass import Product
def main():
baseUrl = "https://www.amazon.in"
mainCategory = "electronics"
productCategory = "Samsung SSD"
pagesToFetch = 51
productObjectDataset = []
print("Processing...")
## interate over amazon pages where upper limit is a big number as we donts know how many pages there can be
for i in range(1, pagesToFetch + 1):
urlToFetch = baseUrl + "/s?k=" + productCategory + "&i=" + mainCategory
if (i > 1):
urlToFetch += "&page=" + str(i)
#endif
res = requests.get(urlToFetch)
soup = BeautifulSoup(res.text, 'html.parser')
content = soup.find_all('a',
class_='a-link-normal a-text-normal',
href=True)
print("Fetching: " + urlToFetch)
# breaking the loop if page not found
if (len(content) == 0):
print("Nothing found in: " + str(i))
break
#endif
for title in content:
productUrl = baseUrl + title.get('href')
productTitle = title.text
productObject = Product(productTitle, productUrl)
productObjectDataset.append(productObject)
#endfor
#endfor
for productObject in productObjectDataset:
reviews = []
needToReplace = "/product-reviews/"
for i in range(1, 1000000):
urlToFetch = extract_url(productObject).replace(
"/dp/", needToReplace) + "?pageNumber=" + str(i)
res = requests.get(urlToFetch)
soup = BeautifulSoup(res.text, 'html.parser')
content = soup.find_all(
'span', class_='a-size-base review-text review-text-content')
if (len(content) == 0):
break
#endif
for title in content:
reviews.append(title.text.strip())
#endfor
#endfor
productObject.add_reviews(reviews)
print(
extract_url(productObject) +
": status completed!, review found :" + str(len(reviews)))
#endfor
print(len(productObjectDataset))
jsonProductObjectDataset = jsonpickle.encode(productObjectDataset)
outputFile = open('filepath.json', 'w')
outputFile.write(jsonProductObjectDataset)
outputFile.close()
#enddef
def extract_title(productObject):
return productObject.title
#enddef
def extract_url(productObject):
return productObject.url
#enddef
def extract_review_list(productObject):
return productObject.review_list
#enddef
if __name__ == "__main__":
main()
#############################################################################
import requests
from bs4 import BeautifulSoup
# links and Headers
HEADERS = ({'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5'})
# Link to the amazon product reviews
url = 'https://www.amazon.in/Samsung-Internal-Solid-State-MZ-V7S500BW/product-reviews/B07MFBLN7K/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber='
review_list = []
def retrieve_reviews(soup):
# Get only those divs from the website which have a property data-hook and its value is review
reviews = soup.find_all("div", {'data-hook': "review"})
# Retrieving through the raw text inside the reviews
for item in reviews:
review = {
# Get the title of the review
'title': item.find("a", {'data-hook': "review-title"}).text.strip(),
# Get the rating. It will be like 4.5 out of 5 stars. So we have to remove out of 5 stars from it and only keep float value 4.5, 3.4, etc.
'rating': item.find("i", {'data-hook': "review-star-rating"}).text.replace("out of 5 stars", "").strip(),
# Get the actual review text
'review_text': item.find("span", {'data-hook': "review-body"}).text.strip()
}
review_list.append(review)
# Get the page content from amazon
# as we know we have 43 pages to visit and get content from
for pageNumber in range(1, 51):
raw_text = requests.get(url=url+(str(pageNumber)), headers = HEADERS)
soup = BeautifulSoup(raw_text.text, 'lxml')
retrieve_reviews(soup)
for index in range(len(review_list)):
# Print out all the reviews inside of a reviews_list
print(f"{index+1}) {review_list[index]}")
print("")
import csv
import pandas as pd
# Create dataframe out of all the reviews from amazon
reviews_df = pd.DataFrame(review_list)
# Put that dataframe into an excel file
reviews_df.to_excel('samsung.xlsx', index = False)
print("Done.")
# In[67]:
def remove_emojis(text):
reg = emoji.get_emoji_regexp()
emoji_free_text = reg.sub(r'', text)
return emoji_free_text
# Cleaining function
def preprocess(input_text):
lower_text = review.lower()
punctuations = '''`!()-[]{};:'"\,<>./?@#$%^&*_~=+°'''
lower_text = re.sub(r"@[A-Za-z0-9]+", "", lower_text) # Removes the @mentions from the tweets
lower_text = re.sub(r"[0-9]+", "", lower_text) # Removes the Numbers from the tweets
# tokenization
tokens = word_tokenize(lower_text)
stopwords = stopwords.words("english")
# Removing stopwords
filtered_text = [word for word in tokens if word not in stopwords]
# look for empty words or words just made of two letters and remove that
for token in filtered_text:
if token == "":
filtered_text.remove(token)
filtered_text = ' '.join([word for word in filtered_text])
clean_text = remove_emojis(filtered_text)
# Removing punctuations in string
# Using loop + punctuation string
for ele in clean_text:
if ele in punctuations:
clean_text = clean_text.replace(ele, "")
# Removing small words with length less than 3
clean_text = ' '.join([t for t in clean_text.split() if len(t)>=3])
return word_tokenize(clean_text)
# In[70]:
reviews = pd.read_excel("samsung.xlsx")
reviews.head()
# In[71]:
reviews.shape
# In[72]:
plt.figure(figsize = (7, 7))
sns.countplot(reviews["rating"])
# In[73]:
rating_count = pd.DataFrame(reviews["rating"].value_counts().reset_index())
rating_count
# In[74]:
explode = [0.05, 0.04, 0, 0.02, 0]
names = ["Rating 5.0", "Rating 4.0", "Rating 1.0", "Rating 3.0", "Rating 2.0"]
plt.figure(figsize = (10, 10))
plt.pie(rating_count["rating"],
labels = names,
labeldistance=1.05,
wedgeprops = { 'linewidth' : 1.5, 'edgecolor' : 'white' },
explode = explode,
autopct = '%.2f%%',
shadow = True,
pctdistance = .85,
textprops = {"fontsize": 14, "color":'w'},
rotatelabels = True,
radius = 1.3
)
plt.show()
# The most given rating to the product is 5.0 and 4.0. We can say here that the product is working fine.
# In[75]:
review_text = list(reviews["review_text"])
review_text[:5]
# In[76]:
reviews_df.shape
# In[77]:
product_review = list(reviews_df["review_text"])
# In[78]:
product_review[0]
# In[79]:
import emoji
def remove_emojis(text):
reg = emoji.get_emoji_regexp()
emoji_free_text = reg.sub(r'', text)
return emoji_free_text
# In[80]:
# Cleaining function
def preprocess(reviews, stopwords):
cleaned_reviews = []
for review in reviews:
lower_text = review.lower()
punctuations = '''`!()-[]{};:'"\,<>./?@#$%^&*_~=+°'''
lower_text = re.sub(r"@[A-Za-z0-9]+", "", lower_text) # Removes the @mentions from the tweets
lower_text = re.sub(r"[0-9]+", "", lower_text) # Removes the Numbers from the tweets
# tokenization
tokens = word_tokenize(lower_text)
# Removing stopwords
filtered_text = [word for word in tokens if word not in stopwords]
# look for empty words or words just made of two letters and remove that
for token in filtered_text:
if token == "":
filtered_text.remove(token)
filtered_text = ' '.join([word for word in filtered_text])
clean_text = remove_emojis(filtered_text)
# Removing punctuations in string
# Using loop + punctuation string
for ele in clean_text:
if ele in punctuations:
clean_text = clean_text.replace(ele, "")
# Removing small words with length less than 3
clean_text = ' '.join([t for t in clean_text.split() if len(t)>=3])
cleaned_reviews.append(clean_text)
return cleaned_reviews
# In[81]:
from nltk.corpus import stopwords
stopwords = stopwords.words("english")
len(stopwords)
# #### Call the preprocess function and pass the text string to clean data
# In[82]:
clean_reviews = preprocess(product_review, stopwords)
clean_reviews
# #### Stemming and Lemmatization
# In[83]:
wn_lem = nltk.wordnet.WordNetLemmatizer()
stemmer = nltk.stem.PorterStemmer()
def lemmatization(reviews):
lemmatized_reviews = []
for review in reviews:
# Tokenization
tokens = word_tokenize(review)
for index in range(len(tokens)):
tokens[index] = wn_lem.lemmatize(tokens[index])
tokens[index] = stemmer.stem(tokens[index])
lemmatized = ' '.join([token for token in tokens])
lemmatized_reviews.append(lemmatized)
return lemmatized_reviews
# In[84]:
clean_reviews = lemmatization(clean_reviews)
# 5 reviews from the list
for index in range(5):
print(f"{index+1}) {clean_reviews[index]}\n")
# ### Frequencies
# In[85]:
from collections import Counter
frequencies = Counter(' '.join([review for review in clean_reviews]).split())
frequencies.most_common(10)
# In[86]:
# Words with least frequency that is 1
singletons = [k for k, v in frequencies.items() if v == 1]
singletons[0:10]
# In[87]:
print(f"Total words used once are {len(singletons)} out of {len(frequencies)}") # 993 words that have been used only once
# In[88]:
# This function will remove words with less frequencies
def remove_useless_words(reviews, useless_words):
filtered_reviews = []
for single_review in reviews:
tokens = word_tokenize(single_review)
usefull_text = [word for word in tokens if word not in useless_words]
usefull_text = ' '.join([word for word in usefull_text])
filtered_reviews.append(usefull_text)
return filtered_reviews
# In[89]:
# Store a copy so we not need to go back for any mistake
clean_reviews_copy = clean_reviews
# In[90]:
clean_reviews = remove_useless_words(clean_reviews, singletons)
# 5 reviews from the list
for index in range(5):
print(f"{index+1}) {clean_reviews[index]}\n")
# In[91]:
# count vectoriser tells the frequency of a word.
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df = 1, max_df = 0.9)
X = vectorizer.fit_transform(clean_reviews)
word_freq_df = pd.DataFrame({'term': vectorizer.get_feature_names(), 'occurrences':np.asarray(X.sum(axis=0)).ravel().tolist()})
word_freq_df['frequency'] = word_freq_df['occurrences']/np.sum(word_freq_df['occurrences'])
# In[92]:
word_freq_df = word_freq_df.sort_values(by="occurrences", ascending = False)
word_freq_df.head()
# #### TfidfVectorizer
# In[93]:
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(stop_words='english', max_df = 0.5, smooth_idf=True)
doc_vec = vectorizer.fit_transform(clean_reviews)
names_features = vectorizer.get_feature_names()
dense = doc_vec.todense()
denselist = dense.tolist()
df = pd.DataFrame(denselist, columns = names_features)
df.head()
# # N-gram
# In[94]:
#Bi-gram
def get_top_n2_words(corpus, n=None):
vec1 = CountVectorizer(ngram_range=(2,2), #for tri-gram, put ngram_range=(3,3)
max_features=2000).fit(corpus)
bag_of_words = vec1.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in
vec1.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1],
reverse=True)
return words_freq[:n]
# In[95]:
top2_words = get_top_n2_words(clean_reviews, n=200) #top 200
top2_df = pd.DataFrame(top2_words)
top2_df.columns=["Bi-gram", "Freq"]
top2_df.head()
# In[96]:
#Bi-gram plot
import matplotlib.pyplot as plt
import seaborn as sns
top20_bigram = top2_df.iloc[0:20,:]
fig = plt.figure(figsize = (10, 5))
plot=sns.barplot(x=top20_bigram["Bi-gram"],y=top20_bigram["Freq"])
plot.set_xticklabels(rotation=45,labels = top20_bigram["Bi-gram"])
# In[97]:
#Tri-gram
def get_top_n3_words(corpus, n=None):
vec1 = CountVectorizer(ngram_range=(3,3),
max_features=2000).fit(corpus)
bag_of_words = vec1.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in
vec1.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1],
reverse=True)
return words_freq[:n]
# In[98]:
top3_words = get_top_n3_words(clean_reviews, n=200)
top3_df = pd.DataFrame(top3_words)
top3_df.columns=["Tri-gram", "Freq"]
# In[99]:
top3_df
# In[100]:
#Tri-gram plot
import seaborn as sns
top20_trigram = top3_df.iloc[0:20,:]
fig = plt.figure(figsize = (10, 5))
plot=sns.barplot(x=top20_trigram["Tri-gram"],y=top20_trigram["Freq"])
plot.set_xticklabels(rotation=45,labels = top20_trigram["Tri-gram"])
# # WordCloud
# In[101]:
string_Total = " ".join(clean_reviews)
# In[102]:
#wordcloud for entire corpus
plt.figure(figsize=(20, 20))
from wordcloud import WordCloud
wordcloud_stw = WordCloud(
background_color= 'black',
width = 1800,
height = 1500
).generate(string_Total)
plt.imshow(wordcloud_stw)
plt.axis("off")
plt.show()
# #### Singularity and Polarity using the textblob
# In[103]:
from textblob import TextBlob
# In[104]:
# Get Subjectivity of each tweet
def getSubjectivity(tweet):
return TextBlob(tweet).sentiment.subjectivity
# Get polarity of each tweet
def getPolarity(tweet):
return TextBlob(tweet).sentiment.polarity
# In[105]:
sentiment_df = pd.DataFrame(clean_reviews, columns=["reviews"])
# In[106]:
sentiment_df["Subjectivity"] = sentiment_df["reviews"].apply(getSubjectivity)
sentiment_df["Polarity"] = sentiment_df["reviews"].apply(getPolarity)
# In[107]:
sentiment_df.head()
# In[108]:
# Funciton to compute Sentiment Analysis
def getAnalysis(score):
if score < 0:
return "Negative"
elif score == 0:
return "Neutral"
else:
return "Positive"
# In[109]:
sentiment_df["Analysis"] = sentiment_df["Polarity"].apply(getAnalysis)
sentiment_df.head()
# In[110]:
plt.figure(figsize=(3, 6))
sns.countplot(sentiment_df["Analysis"])
# In[111]:
# All Positive Reviews
pos_rvs = sentiment_df[sentiment_df["Analysis"] == "Positive"].sort_values(by = ["Polarity"])
print("All Positive Reviews are: \n")
for index in range(pos_rvs.shape[0]):
print(f"{index + 1} ) {pos_rvs.iloc[index, 0]} \n")
# In[112]:
# All Negative Reviews
neg_rvs = sentiment_df[sentiment_df["Analysis"] == "Negative"].sort_values(by = ["Polarity"])
print("All Negative Reviews are: \n")
for index in range(neg_rvs.shape[0]):
print(f"{index + 1} ) {neg_rvs.iloc[index, 0]} \n")
# In[113]:
token_reviews = []
for review in clean_reviews:
token_reviews.append(word_tokenize(review))
dictionary = corpora.Dictionary(token_reviews)
dictionary.items()
# In[114]:
dictionary = corpora.Dictionary(token_reviews)
for key in dictionary:
print(key, dictionary[key])
# In[115]:
corpus = [dictionary.doc2bow(review) for review in token_reviews]
corpus
# In[116]:
clean_reviews[200]
# In[117]:
corpus[200]
# ### Building a Tfidf model
# In[118]:
tfidf_model = models.TfidfModel(corpus)
corpus_tfidf = tfidf_model[corpus]
corpus_tfidf
# ### LSI Model (Latent Semantic Indexing)
# In[119]:
from gensim.models.lsimodel import LsiModel
from gensim import similarities
# In[120]:
lsi_model = LsiModel(corpus = corpus_tfidf, id2word = dictionary, num_topics = 400)
index = similarities.MatrixSimilarity(lsi_model[corpus])
# ### The function will return 10 similar reviews to a given review
# In[121]:
def text_lsi(new_text, num = 10):
text_tokens = word_tokenize(new_text)
new_vec = dictionary.doc2bow(text_tokens)
vec_lsi = lsi_model[new_vec]
similars = index[vec_lsi]
similars = sorted(enumerate(similars), key = lambda item: -item[1])
return [(s, clean_reviews[s[0]]) for s in similars[:num]]
# In[122]:
clean_reviews[100]
# In[123]:
text_lsi(clean_reviews[100])
# # ML Algorithm
# In[124]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(reviews['review_text'], reviews['rating'], test_size=0.1, random_state=0)
print('Load %d training examples and %d validation examples. \n' %(X_train.shape[0],X_test.shape[0]))
print('Show a review in the training set : \n', X_train.iloc[10])
X_train,y_train
# In[125]:
def cleanText(raw_text, remove_stopwords=False, stemming=False, split_text=False, ):
'''
Convert a raw review to a cleaned review
'''
text = BeautifulSoup(raw_text, 'html.parser').get_text()
letters_only = re.sub("[^a-zA-Z]", " ", text)
words = letters_only.lower().split()
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
if stemming==True:
stemmer = SnowballStemmer('english')
words = [stemmer.stem(w) for w in words]
if split_text==True:
return (words)
return( " ".join(words))
# In[126]:
X_train_cleaned = []
X_test_cleaned = []
for d in X_train:
X_train_cleaned.append(cleanText(d))
print('Show a cleaned review in the training set : \n', X_train_cleaned[10])
for d in X_test:
X_test_cleaned.append(cleanText(d))
# In[127]:
countVect = CountVectorizer()
X_train_countVect = countVect.fit_transform(X_train_cleaned)
mnb = MultinomialNB()
mnb.fit(X_train_countVect, y_train)
# In[128]:
def modelEvaluation(predictions):
print ("\nAccuracy {:.4f}".format(accuracy_score(y_test, predictions)))
print("\nClassification report : \n", metrics.classification_report(y_test, predictions))
# In[129]:
predictions = mnb.predict(countVect.transform(X_test_cleaned))
modelEvaluation(predictions)
# In[130]:
tfidf = TfidfVectorizer(min_df=5)
X_train_tfidf = tfidf.fit_transform(X_train)
# Logistic Regression
lr = LogisticRegression()
lr.fit(X_train_tfidf, y_train)
# In[131]:
feature_names = np.array(tfidf.get_feature_names())
sorted_coef_index = lr.coef_[0].argsort()
print('\nTop 10 features with smallest coefficients :\n{}\n'.format(feature_names[sorted_coef_index[:10]]))
print('Top 10 features with largest coefficients : \n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))
# In[132]:
predictions = lr.predict(tfidf.transform(X_test_cleaned))
modelEvaluation(predictions)
# In[ ]:
| 22.739756 | 177 | 0.665774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,633 | 0.274299 |
e2482cad6890fd01ea34b60d91d3b693d1873e07 | 5,808 | py | Python | src/model_generation/modeling.py | klingj3/subreddit_suggestor | 632a8bb59939978c19bbe5a6974ea3697a8dfedb | [
"MIT"
] | 8 | 2020-04-26T02:14:53.000Z | 2020-05-03T05:51:08.000Z | src/model_generation/modeling.py | klingj3/subreddit_suggestor | 632a8bb59939978c19bbe5a6974ea3697a8dfedb | [
"MIT"
] | 2 | 2021-08-25T16:04:27.000Z | 2022-02-10T01:45:46.000Z | src/model_generation/modeling.py | klingj3/subreddit_suggestor | 632a8bb59939978c19bbe5a6974ea3697a8dfedb | [
"MIT"
] | null | null | null | import json
import numpy as np
import os
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Flatten, Activation, BatchNormalization
from sklearn.model_selection import train_test_split
class SuggestionModeler(object):
"""
A collection of functions to generate a model of subreddit suggestions from the data retreived in
data_retrieval.py
"""
def __init__(self, force_retrain=False):
self.session = tf.Session()
self.graph = tf.get_default_graph()
with open("model_generation/config.json", "r") as infile:
self.config = json.loads(infile.read())
if os.path.exists("config_override.json"):
with open("model_generation/config_override.json", "r") as infile:
self.config.update(json.loads(infile.read()))
self.subreddit_to_rank = dict()
with open(self.config["rank_to_subreddit_path"], 'r') as infile:
self.rank_to_subreddit = json.loads(infile.read())
self.rank_to_subreddit = {int(k): v for k, v in self.rank_to_subreddit.items()}
for rank, subreddit in self.rank_to_subreddit.items():
self.subreddit_to_rank[subreddit] = rank
with open(self.config['rank_to_sfw_status'], 'r') as infile:
self.rank_to_sfw_status = json.loads(infile.read())
self.rank_to_sfw_status = {int(k): v for k, v in self.rank_to_sfw_status.items()}
self.method = self.config["method"]
self.model_path = self.config['model_path'].format(method=self.method)
if self.method == "hot":
model = Sequential()
model.add(Dense(512, activation='relu',
input_shape=(self.config['max_subreddits_in_model'], )))
model.add(Dropout(0.5))
model.add(Dense(self.config['max_subreddits_in_model'], activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['acc'])
else:
raise ValueError("'method' in config not well defined")
self.model = model
if force_retrain or not os.path.exists(self.model_path):
model.summary()
print("Preparing train/test data...")
X, y = self.arrange_training_data(method=self.method)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.config['test_pct'])
train_data, test_data = (X_train, y_train), (X_test, y_test)
print("Starting training process...")
self.train_model(train_data, test_data)
with self.graph.as_default():
with self.session.as_default():
self.model.load_weights(self.model_path)
def arrange_training_data(self, method):
import random
with open(self.config["combined_user_to_subreddit_score_path"], 'r') as infile:
user_subreddit_scores = json.loads(infile.read())
for k, scores in user_subreddit_scores.items():
user_subreddit_scores[k] = sorted(scores, key=lambda x: x[1], reverse=True)
data_length, data_width = len(user_subreddit_scores), self.config['max_subreddits_in_model']
user_subreddit_scores = list(user_subreddit_scores.values())
random.shuffle(user_subreddit_scores)
if method == 'hot': # Input vector is one-hot encoding.
X = np.zeros((data_length, data_width), dtype=np.bool)
for i, scores in enumerate(user_subreddit_scores):
for subreddit_key, score in scores:
if subreddit_key <= data_width:
X[i][subreddit_key - 1] = True
else:
raise ValueError(f"Unhandled training data preparation method {method}")
y = np.zeros((data_length, data_width), dtype=np.bool)
for i, scores in enumerate(user_subreddit_scores):
for subreddit_key, score in scores:
if subreddit_key <= data_width:
y[i][subreddit_key-1] = score > 0
return X, y
def arrange_user_data(self, user_data):
user_data = {k: v for k, v in sorted(user_data.items(), key=lambda x: x[1], reverse=True)
if 0 < self.subreddit_to_rank.get(k, -1) < self.config['max_subreddits_in_model']}
if self.method == 'hot':
data = np.zeros((1, self.config['max_subreddits_in_model']), dtype=np.bool)
for subreddit_name, subreddit_score in user_data.items():
if subreddit_name in self.subreddit_to_rank:
data[0][self.subreddit_to_rank[subreddit_name]-1] = subreddit_score > 0
return data
def train_model(self, train_data, test_data):
X, y = train_data
self.model.fit(X, y, epochs=5, batch_size=256, verbose=1)
self.model.save(self.model_path)
X, y = test_data
scores = self.model.evaluate(X, y, verbose=1)
print(self.model.metrics_names)
print(scores)
def get_user_predictions(self, user_data):
arranged_data = self.arrange_user_data(user_data)
user_known_subreddits = set(list(user_data.keys()))
with self.graph.as_default():
with self.session.as_default():
predictions = self.model.predict(arranged_data)[0]
predictions = [(self.rank_to_subreddit[i+1], round(float(score), 5), i) for i, score
in enumerate(predictions) if self.rank_to_subreddit[i+1] not in user_known_subreddits \
and self.rank_to_sfw_status[i+1] and i > 200]
predictions.sort(key=lambda x: x[1], reverse=True)
return predictions
if __name__ == '__main__':
import os
os.chdir('..')
modeler = SuggestionModeler(True)
| 43.343284 | 110 | 0.635158 | 5,457 | 0.939566 | 0 | 0 | 0 | 0 | 0 | 0 | 744 | 0.128099 |
e24c0ec2c106545bbb10756e5231705f8b4872ad | 203 | py | Python | Curso_em_video/Mundo_1/exercicios/ex012.py | TCGamer123/python | 82ad1f84b52d6cc7253fb4c5522ae8389824930a | [
"MIT"
] | 1 | 2022-03-08T13:29:59.000Z | 2022-03-08T13:29:59.000Z | Curso_em_video/Mundo_1/exercicios/ex012.py | TCGamer123/python | 82ad1f84b52d6cc7253fb4c5522ae8389824930a | [
"MIT"
] | null | null | null | Curso_em_video/Mundo_1/exercicios/ex012.py | TCGamer123/python | 82ad1f84b52d6cc7253fb4c5522ae8389824930a | [
"MIT"
] | null | null | null | preço = float(input('Digite o preço do produto: R$'))
d = preço * 0.05
vD = preço - d
print('Valor original R${:.2f}, desconto de 5% é igual à R${:.2f}, seu novo preço é R${:.2f}'.format(preço, d, vD)) | 33.833333 | 115 | 0.62069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.580189 |
e24d2099347f041d5db988601fd20a61d4cec847 | 3,282 | py | Python | service/licensing.py | profesormig/quimica3a | a453f0d7485ebc4b2d7b06a72b44c6c179a3bbd4 | [
"BSD-3-Clause"
] | null | null | null | service/licensing.py | profesormig/quimica3a | a453f0d7485ebc4b2d7b06a72b44c6c179a3bbd4 | [
"BSD-3-Clause"
] | null | null | null | service/licensing.py | profesormig/quimica3a | a453f0d7485ebc4b2d7b06a72b44c6c179a3bbd4 | [
"BSD-3-Clause"
] | null | null | null | """
Logic related to License, PatternMatch, etc. for atmosphere.
"""
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from core.models.license import License, LicenseType
from core.models.match import PatternMatch, MatchType
from core.models.identity import Identity
from threepio import logger
def create_license(title, description, created_by, allow_imaging=True):
"""
Create a new License, assigning the appropriate LicenseType based on description.
"""
if is_url(description):
license_type = LicenseType.objects.get(name="URL")
else:
license_type = LicenseType.objects.get(name="Raw Text")
new_license = License(
title=title,
license_type=license_type,
license_text=description,
allow_imaging=allow_imaging,
created_by=created_by)
new_license.save()
return new_license
def create_pattern_match(pattern, pattern_type, created_by):
pattern_type = pattern_type.lower()
if "email" in pattern_type:
match_type = MatchType.objects.get(name="BasicEmail")
elif "user" in pattern_type:
match_type = MatchType.objects.get(name="Username")
else:
raise ValueError("Received invalid pattern_type: %s" % pattern_type)
pattern = PatternMatch(
pattern=pattern,
type=match_type,
created_by=created_by)
pattern.save()
return pattern
def _test_license(license, identity):
"""
If license has an access_list, verify that the identity passes the test.
"""
if not license.access_list.count():
return True
for test in license.access_list.iterator():
# TODO: Add more 'type_name' logic here!
if test.type.name == 'BasicEmail':
result = _test_user_email(identity.created_by, test.pattern)
elif test.type.name == "Username":
result = _test_username(identity.created_by, test.pattern)
if result:
return True
return False
def _simple_match(test_string, pattern, contains=False):
if contains:
return pattern in test_string
else:
return pattern == test_string
def _simple_glob_test(test_string, pattern):
from fnmatch import fnmatch
result = fnmatch(test_string, pattern)
return result
def _test_user_email(atmo_user, email_pattern):
email = atmo_user.email.lower()
email_pattern = email_pattern.pattern.lower()
result = _simple_glob_test(
email,
email_pattern) or _simple_match(
email,
email_pattern,
contains=True)
logger.info(
"Email:%s Pattern:%s - Result:%s" %
(email, email_pattern, result))
return result
def _test_username(atmo_user, username_match):
username = atmo_user.username
result = _simple_match(username, username_match, contains=True)
logger.info(
"Username:%s Match On:%s - Result:%s" %
(username, username_match, result))
return result
def is_url(test_string):
val = URLValidator()
try:
val(test_string)
return True
except ValidationError as e:
return False
except:
logger.exception("URL Validation no longer works -- Code fix required")
return False
| 28.53913 | 85 | 0.681597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 526 | 0.160268 |
e24dbdf6d703b006001d5a126067e866e809fe94 | 73 | py | Python | articles/imp/genfigs/tuning.py | parrt/stratx | c190ecc32ac7b8dd3f5532a5d5b0de34a3693a22 | [
"MIT"
] | 54 | 2019-07-17T04:59:39.000Z | 2022-03-18T15:25:00.000Z | articles/imp/genfigs/tuning.py | parrt/stratx | c190ecc32ac7b8dd3f5532a5d5b0de34a3693a22 | [
"MIT"
] | 5 | 2019-07-27T16:18:37.000Z | 2020-12-02T20:16:49.000Z | articles/imp/genfigs/tuning.py | parrt/stratx | c190ecc32ac7b8dd3f5532a5d5b0de34a3693a22 | [
"MIT"
] | 13 | 2019-08-08T22:17:50.000Z | 2022-02-11T10:19:23.000Z | import support
support.tune_all(pairs_to_tune=support.pairs, verbose=1)
| 18.25 | 56 | 0.835616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e24dc2e412714a0bc17cc1fafa7639e2e6028663 | 11,524 | py | Python | reliefparser/models/pointer_net.py | XuezheMax/ReLiefParser | 4ffb2495002809de70809689b84d80d2a59cd2ac | [
"MIT"
] | 6 | 2016-11-02T20:28:01.000Z | 2018-06-25T03:37:25.000Z | reliefparser/models/pointer_net.py | XuezheMax/ReLiefParser | 4ffb2495002809de70809689b84d80d2a59cd2ac | [
"MIT"
] | null | null | null | reliefparser/models/pointer_net.py | XuezheMax/ReLiefParser | 4ffb2495002809de70809689b84d80d2a59cd2ac | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from encoder import Encoder
from decoder import Decoder, TreeDecoder
import bisect
from time import time
class PointerNet(object):
def __init__(self, vsize, esize, hsize, asize, buckets, **kwargs):
super(PointerNet, self).__init__()
self.name = kwargs.get('name', self.__class__.__name__)
self.scope = kwargs.get('scope', self.name)
self.enc_vsize = vsize
self.enc_esize = esize
self.enc_hsize = hsize
self.dec_msize = self.enc_hsize * 2 # concatenation of bidirectional RNN states
self.dec_isize = self.enc_hsize * 2 # concatenation of bidirectional RNN states
self.dec_hsize = hsize
self.dec_asize = asize
self.buckets = buckets
self.max_len = self.buckets[-1]
self.max_grad_norm = kwargs.get('max_grad_norm', 100)
self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
# self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-2)
self.num_layer = kwargs.get('num_layer', 1)
self.rnn_class = kwargs.get('rnn_class', tf.nn.rnn_cell.BasicLSTMCell)
# self.rnn_class = kwargs.get('rnn_class', tf.nn.rnn_cell.GRUCell)
self.encoder = Encoder(self.enc_vsize, self.enc_esize, self.enc_hsize,
rnn_class=self.rnn_class, num_layer = self.num_layer)
if kwargs.get('tree_decoder', False):
self.decoder = TreeDecoder(self.dec_isize, self.dec_hsize, self.dec_msize, self.dec_asize, self.max_len,
rnn_class=self.rnn_class, num_layer = self.num_layer, epsilon=1.0)
else:
self.decoder = Decoder(self.dec_isize, self.dec_hsize, self.dec_msize, self.dec_asize, self.max_len,
rnn_class=self.rnn_class, num_layer = self.num_layer, epsilon=1.0)
self.baselines = []
self.bl_ratio = kwargs.get('bl_ratio', 0.95)
for i in range(self.max_len):
self.baselines.append(tf.Variable(0.0, trainable=False))
def __call__(self, enc_input, dec_input_indices, valid_indices, left_indices, right_indices, values, valid_masks=None):
batch_size = tf.shape(enc_input)[0]
# forward computation graph
with tf.variable_scope(self.scope):
# encoder output
enc_memory, enc_final_state_fw, _ = self.encoder(enc_input)
# decoder
dec_hiddens, dec_actions, dec_act_logps = self.decoder(
enc_memory, dec_input_indices,
valid_indices, left_indices, right_indices,
valid_masks, init_state=enc_final_state_fw)
# cost
costs = []
update_ops = []
for step_idx, (act_logp, value, baseline) in enumerate(zip(dec_act_logps, values, self.baselines)):
# costs.append(-tf.reduce_mean(act_logp * (value - baseline)))
new_baseline = self.bl_ratio * baseline + (1-self.bl_ratio) * tf.reduce_mean(value)
costs.append(-tf.reduce_mean(act_logp * value))
update_ops.append(tf.assign(baseline, new_baseline))
# gradient computation graph
self.params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope)
train_ops = []
for limit in self.buckets:
print '0 ~ %d' % (limit-1)
grad_params = tf.gradients(tf.reduce_sum(tf.pack(costs[:limit])), self.params)
if self.max_grad_norm is not None:
clipped_gradients, norm = tf.clip_by_global_norm(grad_params, self.max_grad_norm)
else:
clipped_gradients = grad_params
train_op = self.optimizer.apply_gradients(
zip(clipped_gradients, self.params))
with tf.control_dependencies([train_op] + update_ops[:limit]):
# train_ops.append(tf.Print(tf.constant(1.), [norm]))
train_ops.append(tf.constant(1.))
return dec_hiddens, dec_actions, train_ops
#### test script
if __name__ == '__main__':
# hyper-parameters
vsize = 1000
esize = 256
hsize = 256
asize = 256
isize = 333
buckets = [10]#, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]
max_len = buckets[-1]
####################
# symbolic section
####################
# model initialization
pointer_net = PointerNet(vsize, esize, hsize, asize, buckets, dec_isize=isize)
# placeholders
enc_input = tf.placeholder(dtype=tf.int32, shape=[None, None], name='enc_input')
input_indices, rewards = [], []
valid_indices, left_indices, right_indices = [], [], []
for i in range(max_len):
rewards.append(tf.placeholder(dtype=tf.float32, name='reward_%d'%i))
input_indices.append(tf.placeholder(dtype=tf.int32, shape=[None, 2], name='input_index_%d'%i))
valid_indices.append(tf.placeholder(dtype=tf.int32, name='valid_index_%d'%i))
left_indices.append (tf.placeholder(dtype=tf.int32, name='left_index_%d'%i))
right_indices.append(tf.placeholder(dtype=tf.int32, name='right_index_%d'%i))
# build computation graph
dec_hiddens, dec_actions, train_ops = pointer_net(enc_input, input_indices, valid_indices, left_indices, right_indices, rewards)
####################
# run-time section
####################
lsize = 10
bsize = 32
all_feeds = []
all_feeds.extend(rewards)
all_feeds.extend(input_indices)
all_feeds.extend(valid_indices)
all_feeds.extend(right_indices)
all_feeds.extend(left_indices)
all_feeds.append(enc_input)
all_fetches = []
all_fetches.extend(dec_hiddens)
all_fetches.extend(dec_actions)
# get from evironment
def take_action(action):
input_idx = np.repeat(np.arange(2).astype(np.int32).reshape(1, -1), bsize, axis=0)
valid_idx = np.repeat(np.arange(lsize).astype(np.int32).reshape(1, -1), bsize, axis=0)
left_idx = np.repeat(np.arange(lsize).astype(np.int32).reshape(1, -1), bsize, axis=0)
right_idx = np.repeat(np.arange(lsize).astype(np.int32).reshape(1, -1), bsize, axis=0)
if action is None:
reward = None
else:
reward = np.ones(action.shape)
return reward, input_idx, valid_idx, left_idx, right_idx
with tf.Session() as sess:
tf.initialize_all_variables().run()
enc_input_np = np.random.randint(0, vsize, size=[bsize, lsize]).astype(np.int32)
_, init_inidx_np, init_vdidx_np, init_ltidx_np, init_rtidx_np = take_action(None)
bucket_id = bisect.bisect_left(buckets, lsize)
train_op = train_ops[bucket_id]
print train_op
# bucket_id = bisect.bisect_left(buckets, lsize)
# grad_w = grad_params_buckets[bucket_id]
##############################
input_indices_np, valid_indices_np, left_indices_np, right_indices_np = [], [], [], []
hiddens_np, actions_np, rewards_np = [], [], []
input_indices_np.append(init_inidx_np)
valid_indices_np.append(init_vdidx_np)
left_indices_np.append(init_ltidx_np)
right_indices_np.append(init_rtidx_np)
# t = time()
# feed_dict={enc_input:enc_input_np}
# for i in range(lsize):
# # t_i = time()
# feed_dict.update({input_indices[i]:input_indices_np[i],
# valid_indices[i]:valid_indices_np[i],
# left_indices[i]:left_indices_np[i],
# right_indices[i]:right_indices_np[i]})
# h_i_np, a_i_np = sess.run([dec_hiddens[i], dec_actions[i]], feed_dict=feed_dict)
# hiddens_np.append(h_i_np)
# actions_np.append(a_i_np)
# reward_i, input_idx_np, valid_idx_np, left_idx_np, right_idx_np = take_action(actions_np[i])
# rewards_np.append(reward_i)
# input_indices_np.append(input_idx_np)
# valid_indices_np.append(valid_idx_np)
# left_indices_np.append(left_idx_np)
# right_indices_np.append(right_idx_np)
# # print i, time() - t_i
# print time() - t
# t = time()
# # feed_dict.update({go:go_np for go, go_np in zip(rewards, rewards_np)})
# # grad_w_np_2 = sess.run(grad_w, feed_dict=feed_dict)
# sess.run(train_op, feed_dict=feed_dict)
# print time() - t
##############################
##############################
input_indices_np, valid_indices_np, left_indices_np, right_indices_np = [], [], [], []
hiddens_np, actions_np, rewards_np = [], [], []
input_indices_np.append(init_inidx_np)
valid_indices_np.append(init_vdidx_np)
left_indices_np.append(init_ltidx_np)
right_indices_np.append(init_rtidx_np)
t = time()
# handle = sess.partial_run_setup(all_fetches+grad_w, all_feeds)
handle = sess.partial_run_setup(all_fetches+[train_op], all_feeds)
for i in range(lsize):
# t_i = time()
feed_dict = {input_indices[i]:input_indices_np[i],
valid_indices[i]:valid_indices_np[i],
left_indices[i]:left_indices_np[i],
right_indices[i]:right_indices_np[i]}
if i == 0:
feed_dict.update({enc_input:enc_input_np})
h_i_np, a_i_np = sess.partial_run(handle, [dec_hiddens[i], dec_actions[i]], feed_dict=feed_dict)
hiddens_np.append(h_i_np)
actions_np.append(a_i_np)
reward_i, input_idx_np, valid_idx_np, left_idx_np, right_idx_np = take_action(actions_np[i])
rewards_np.append(reward_i)
input_indices_np.append(input_idx_np)
valid_indices_np.append(valid_idx_np)
left_indices_np.append(left_idx_np)
right_indices_np.append(right_idx_np)
# print i, time() - t_i
print time() - t
p_before = sess.run(pointer_net.params[0])
t = time()
# grad_w_np_1 = sess.partial_run(handle, grad_w, feed_dict={go:go_np for go, go_np in zip(rewards, rewards_np)})
sess.partial_run(handle, train_op, feed_dict={go:go_np for go, go_np in zip(rewards, rewards_np)})
print time() - t
p_after = sess.run(pointer_net.params[0])
print np.allclose(p_before, p_after)
# # # print type(grad_w_np_1), type(grad_w_np_2)
# for g1, g2 in zip(grad_w_np_1, grad_w_np_2):
# if type(g1) != type(g2):
# print 'diff in type', type(g1), type(g2)
# continue
# elif not isinstance(g1, np.ndarray):
# print 'not numpy array', type(g1), type(g2)
# continue
# if not np.allclose(g1, g2):
# print 'g1', np.max(g1), np.min(g1)
# print 'g2', np.max(g2), np.min(g2)
# else:
# print 'Pass: g1 = g2', g1.shape, g2.shape
# if np.allclose(g1, np.zeros_like(g1)):
# print 'Fail: g1 != 0', np.max(g1), np.min(g1)
# if np.allclose(g2, np.zeros_like(g2)):
# print 'Fail: g2 != 0', np.max(g2), np.min(g2)
| 41.453237 | 132 | 0.596234 | 4,071 | 0.353263 | 0 | 0 | 0 | 0 | 0 | 0 | 3,011 | 0.261281 |
e24ed43a11e8467a1fca0b6860db937ea540c6a5 | 7,695 | py | Python | perceiver_io/flow_perceiver.py | JOBR0/PerceiverIO_Pytorch | 64947cfd998571c89d48e95e482ce50c43cbe2d9 | [
"Apache-2.0"
] | 2 | 2022-02-15T08:11:07.000Z | 2022-02-25T19:48:07.000Z | perceiver_io/flow_perceiver.py | JOBR0/PerceiverIO_Pytorch | 64947cfd998571c89d48e95e482ce50c43cbe2d9 | [
"Apache-2.0"
] | null | null | null | perceiver_io/flow_perceiver.py | JOBR0/PerceiverIO_Pytorch | 64947cfd998571c89d48e95e482ce50c43cbe2d9 | [
"Apache-2.0"
] | null | null | null | import itertools
from typing import Sequence
import torch.nn as nn
import torch
from perceiver_io.io_processors.preprocessors import ImagePreprocessor
from perceiver_io.io_processors.processor_utils import patches_for_flow
from perceiver_io.output_queries import FlowQuery
from perceiver_io.perceiver import PerceiverIO
from timm.models.layers import to_2tuple
import torch.nn.functional as F
from torch.cuda.amp import autocast
from perceiver_io.position_encoding import PosEncodingType
from perceiver_io.io_processors.postprocessors import FlowPostprocessor
class FlowPerceiver(nn.Module):
"""
FlowPerceiver: Perceiver for optical flow
Args:
img_size (Sequence[int]): Size of training images (height x width). Default: (368, 496)
flow_scale_factor (int): Factor by which the output is multiplied
https://github.com/deepmind/deepmind-research/issues/266. Default: 0.2
num_latents (int): Number of latent variables. Default: 2048
n_latent_channels (int): Number of latent channels. Default: 512
num_self_attends_per_block (int): Number of self attention layers. Default: 24
num_blocks (int): Number of blocks. All blocks share weights. Default: 1
mixed_precision (bool): Whether to run the perceiver in mixed precision. Default: False
"""
def __init__(
self,
img_size: Sequence[int] = (368, 496),
flow_scale_factor: int = 20/100,
num_latents: int = 2048,
num_latent_channels=512,
num_self_attends_per_block: int = 24,
num_blocks: int = 1,
mixed_precision: bool = False):
super().__init__()
self._flow_scale_factor = flow_scale_factor
self.mixed_precision = mixed_precision
channels = 3
patch_size = 3
preprocessor_channels = 64
input_preprocessor = ImagePreprocessor(
img_size=img_size,
input_channels=channels * patch_size ** 2,
position_encoding_type=PosEncodingType.FOURIER,
fourier_position_encoding_kwargs=dict(
num_bands=64,
max_resolution=img_size,
sine_only=False,
concat_pos=True,
),
n_extra_pos_mlp=0,
prep_type="patches",
spatial_downsample=1,
conv_after_patching=True,
temporal_downsample=2,
num_channels=preprocessor_channels)
perceiver_encoder_kwargs = dict(
num_self_attend_heads=16,
)
perceiver_decoder_kwargs = dict(
output_w_init="zeros",
)
output_query = FlowQuery(
preprocessed_input_channels=input_preprocessor.n_output_channels(),
output_img_size=img_size,
output_num_channels=2,
)
postprocessor = FlowPostprocessor(
img_size=img_size,
flow_scale_factor=flow_scale_factor
)
self.perceiver = PerceiverIO(
final_project_out_channels=2,
num_blocks=num_blocks,
num_self_attends_per_block=num_self_attends_per_block,
num_latents=num_latents,
num_latent_channels=num_latent_channels,
perceiver_encoder_kwargs=perceiver_encoder_kwargs,
perceiver_decoder_kwargs=perceiver_decoder_kwargs,
output_queries=output_query,
input_preprocessors=input_preprocessor,
output_postprocessors=postprocessor,)
self.H, self.W = to_2tuple(img_size)
def compute_grid_indices(self, image_shape: tuple, min_overlap: int):
"""
Compute top-left corner coordinates for patches
Args:
image_shape (tuple): Height and width of the input image
min_overlap (int): Minimum number of pixels that two patches overlap
"""
if min_overlap >= self.H or min_overlap >= self.W:
raise ValueError(
f"Overlap should be less than size of patch (got {min_overlap}"
f"for patch size {(self.H, self.W)}).")
ys = list(range(0, image_shape[0], self.H - min_overlap))
xs = list(range(0, image_shape[1], self.W - min_overlap))
# Make sure the final patch is flush with the image boundary
ys[-1] = image_shape[0] - self.H
xs[-1] = image_shape[1] - self.W
# Avoid predicting same patch multiple times
if image_shape[0] == self.H:
ys = [0]
if image_shape[1] == self.W:
xs = [0]
return itertools.product(ys, xs)
def _predict_patch(self, patch):
"""Predict flow for one image patch as big as training images"""
with autocast(enabled=self.mixed_precision):
# Extract overlapping 3x3 patches
patch = patches_for_flow(patch).movedim(-1, -3)
output = self.perceiver(patch)
return output
def forward(self, image1: torch.Tensor, image2: torch.Tensor, test_mode: bool = False, min_overlap: int = 20):
"""
Computes forward pass for flow perceiver
Args:
image1 (torch.Tensor): source images (N, C, H, W).
image2 (torch.Tensor): target images (N, C, H, W).
test_mode (bool): If in test mode. Default: False
min_overlap (int): Minimum overlap of patches if images are bigger than training size. Default: 20
"""
height = image1.shape[2]
width = image1.shape[3]
image1 = image1.contiguous()
image2 = image2.contiguous()
# Stack in time dimension
inputs = torch.stack([image1, image2], axis=1)
if height < self.H:
raise ValueError(
f"Height of image (shape: {image1.shape}) must be at least {self.H:}."
"Please pad or resize your image to the minimum dimension."
)
if width < self.W:
raise ValueError(
f"Width of image (shape: {image1.shape}) must be at least {self.W}."
"Please pad or resize your image to the minimum dimension."
)
if test_mode:
# in test_mode, image size can be arbitrary
# the flow is predicted for patches of training size and than stitched together
flows = 0
flow_count = 0
grid_indices = self.compute_grid_indices((height, width), min_overlap)
for y, x in grid_indices:
inp_piece = inputs[..., y: y + self.H, x: x + self.W]
flow_piece = self._predict_patch(inp_piece)
# weights should give more weight to flow from center of patches
weights_y, weights_x = torch.meshgrid(torch.arange(self.H), torch.arange(self.W), indexing="ij")
weights_x = torch.minimum(weights_x + 1, self.W - weights_x)
weights_y = torch.minimum(weights_y + 1, self.H - weights_y)
weights = torch.minimum(weights_x, weights_y)[None, None, :, :]
weights = weights / weights.max()
weights = weights.to(flow_piece.device)
padding = (x, width - x - self.W, y, height - y - self.H)
flows = flows + F.pad(flow_piece * weights, padding)
flow_count = flow_count + F.pad(weights, padding)
flows = flows / flow_count
output = flows
else:
assert height == self.H and width == self.W, \
f"In training mode images must have size equal to specified img_size {(self.H, self.W)}"
output = self._predict_patch(inputs)
return output
| 38.475 | 114 | 0.617024 | 7,126 | 0.926056 | 0 | 0 | 0 | 0 | 0 | 0 | 2,215 | 0.287849 |
e25019f223c74f6ce84bf87834f9d201b41fe7b3 | 489 | bzl | Python | third_party/commons_fileupload.bzl | wix-playground/rules_maven_third_party | ff0b486df194779d7d8e6c9102cd12138e3305c3 | [
"Apache-2.0"
] | null | null | null | third_party/commons_fileupload.bzl | wix-playground/rules_maven_third_party | ff0b486df194779d7d8e6c9102cd12138e3305c3 | [
"Apache-2.0"
] | null | null | null | third_party/commons_fileupload.bzl | wix-playground/rules_maven_third_party | ff0b486df194779d7d8e6c9102cd12138e3305c3 | [
"Apache-2.0"
] | null | null | null | load(":import_external.bzl", import_external = "import_external")
def dependencies():
import_external(
name = "commons_fileupload_commons_fileupload",
artifact = "commons-fileupload:commons-fileupload:1.4",
artifact_sha256 = "a4ec02336f49253ea50405698b79232b8c5cbf02cb60df3a674d77a749a1def7",
srcjar_sha256 = "2acfe29671daf8c94be5d684b8ac260d9c11f78611dff4899779b43a99205291",
excludes = [
"commons-io:commons-io",
],
)
| 37.615385 | 93 | 0.713701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.564417 |
e250f528fd76634c6baafa2a9c6f0344b23f0a5b | 8,654 | py | Python | old/old/model_tests.py | avigad/boole | 2a436c2967dbc968f6a5877c220b9757c3bc17c3 | [
"Apache-2.0"
] | 16 | 2015-01-01T18:21:35.000Z | 2021-11-20T00:39:25.000Z | old/old/model_tests.py | avigad/boole | 2a436c2967dbc968f6a5877c220b9757c3bc17c3 | [
"Apache-2.0"
] | null | null | null | old/old/model_tests.py | avigad/boole | 2a436c2967dbc968f6a5877c220b9757c3bc17c3 | [
"Apache-2.0"
] | 1 | 2021-05-14T11:12:31.000Z | 2021-05-14T11:12:31.000Z | ##################################################
#
# Tests for model.py
#
#
#
#
#
#
#
#
#
#
#
##################################################
from boole.core.model import *
from boole.core.language import clear_default_language
from nose.tools import *
def is_prime(x):
if x == 0 or x == 1:
return False
elif x == 2:
return True
else:
for i in range(2, x):
if x % i == 0:
return False
return True
def test_val_strict():
#It is annoying that types can not be redefined: turn into a warning?
clear_default_language()
x, y, z = Int('x y z')
p, q, r, s = Bool('p q r s')
People = EnumType('People', ['Alice', 'Bob', 'Carol'])
Alice, Bob, Carol = People.make_constants()
u1, u2, u3, u4, u5 = People('u1 u2 u3 u4 u5')
assert_equal(val_strict(ii(3)), 3)
assert_equal(val_strict(rr(4.5)), 4.5)
assert_equal(val_strict(-ii(3) + (4.5) * (2)), 6)
assert_equal(val_strict(Alice), 'Alice')
assert_equal(val_strict(Bob), 'Bob')
assert(val_strict(Forall(u1, (u1 == Alice) | (u1 == Bob) | (u1 == Carol))))
assert(not val_strict(Forall(u1, (u1 == Alice) | (u1 == Bob))))
assert(not val_strict(true != true))
assert(not val_strict(Exists([u1, u2, u3, u4], And(u1 != u2, u1 != u3, u1 != u4,
u2 != u3, u2 != u4, u3 != u4))))
assert(val_strict(true & (false >> true)))
assert(not val_strict(true & ~(false >> true)))
assert(val_strict(Abs([x, y], x + y)((5), (7))))
assert(val_strict(Exists(p, p)))
e = Exists([p, q, r], (p >> q & r) & ~(r >> p & q))
assert(val_strict(e))
assert(not val_strict(Forall([p,q], Exists(r, p >> r & q >> ~r))))
assert(val_strict(Forall([p,q], (((p >> q) >> p) >> p))))
a, b, c = Int('a, b, c')
Even = Const('Even', Int >> Bool)
Prime = Const('Prime', Int >> Bool)
suc, square = (Int >> Int)('suc, square')
a, b, c = Int('a, b, c')
Even = Const('Even', Int >> Bool)
Prime = Const('Prime', Int >> Bool)
suc, square = (Int >> Int)('suc, square')
M = Model({(a, 5), (b, 2), (c, 7)})
M[Int] = dom_range(0,20)
M[Even] = lambda x: x % 2 == 0
M[Prime] = is_prime
M[suc] = lambda x: x + 1
M[square] = lambda x: x * x
assert_equal(val_strict(a, M), 5)
assert_equal(val_strict(a + b * c, M), 19)
assert(val_strict(Exists(x, b + x == c), M))
assert(not val_strict(Even(a), M))
assert(val_strict(Prime((23)), M))
assert(not val_strict(Prime((22)), M))
assert(val_strict(And(Prime(a), Prime(b), Prime(c)), M))
assert(val_strict(Even(c) | And(Prime(a), Prime(b), Prime(c)), M))
assert(not val_strict(Even(c) | And(Prime(suc(a)), Prime(suc(b)), Prime(c)), M))
assert(val_strict(Exists(x, Even(x)), M))
assert(val_strict(Exists(x, And(Prime(x), Even(x))), M))
assert(not val_strict(Exists(x, And(Prime(x), Even(x), c < x)), M))
assert(val_strict(Exists([x, y], And(Prime(x), Prime(y), x < y)), M))
assert(val_strict(Exists([x, y], And(Prime(x), Prime(y), x != y)), M))
assert(not val_strict(Exists([x, y], And(Prime(x), Prime(y), x < y, Even(y))), M))
assert(val_strict(Exists([x, y], And(Prime(x), Prime(y), x < y, Even(x))), M))
assert(not val_strict(Forall(x, Even(x)), M))
assert(val_strict(Forall(x, Or(Even(x), ~Even(x))), M))
assert(val_strict(Forall(x, Even(x) >> ~Even(suc(x))), M))
assert(val_strict(Forall(x, Even(x) >> Even(square(x))), M))
assert(not val_strict(Exists(x, And(Even(x), ~Even(square(x)))), M))
assert(val_strict(Forall(x, Even(square(x)) >> Even(x)), M))
assert(not val_strict(Forall([x, y], And(Prime(x), Prime(y), x < y) >> Even(x)), M))
assert(val_strict(Forall([x, y], And(Prime(x), Prime(y), x < y) >> ~Even(y)), M))
assert(not val_strict(Forall(x, Exists(y, x < y)), M))
assert(not val_strict(Forall([x, y], x < y >> Exists(z, And(x < z, z < y))), M))
assert(val_strict(Forall([x, y], And(Even(x), Even(y), x < y) >>
Exists(z, (x < z) & (z < y))), M))
def precond(n):
return ((2) < n) & Even(n)
def goldbach(n):
return precond(n) >> Exists([x,y], Prime(x) & Prime(y) & (x + y == n))
Goldbach = Forall(z, goldbach(z))
assert(val_strict(Goldbach, M))
def test_val_non_strict():
clear_default_language()
x, y, z = Int('x y z')
p, q, r, s = Bool('p q r s')
People = EnumType('People', ['Alice', 'Bob', 'Carol'])
Alice, Bob, Carol = People.make_constants()
u1, u2, u3, u4, u5 = People('u1 u2 u3 u4 u5')
assert_equal(val_non_strict(ii(3)), 3)
assert_equal(val_non_strict(rr(4.5)), 4.5)
assert_equal(val_non_strict(-(3) + (4.5) * ii(2)), 6)
assert_equal(val_non_strict(Alice), 'Alice')
assert_equal(val_non_strict(Bob), 'Bob')
assert_equal(val_non_strict(x), None)
assert(val_non_strict(Forall(u1, (u1 == Alice) | (u1 == Bob) | (u1 == Carol))))
assert(not val_non_strict(Forall(u1, (u1 == Alice) | (u1 == Bob))))
assert(not val_non_strict(true != true))
assert(not val_non_strict(Exists([u1, u2, u3, u4], And(u1 != u2, u1 != u3, u1 != u4,
u2 != u3, u2 != u4, u3 != u4))))
assert(val_non_strict(true & (false >> true)))
assert(not val_non_strict(true & ~(false >> true)))
assert(val_non_strict(Abs([x, y], x + y)((5), (7))))
assert(val_non_strict(Exists(p, p)))
e = Exists([p, q, r], (p >> q & r) & ~(r >> p & q))
assert(val_non_strict(e))
assert(not val_non_strict(Forall([p,q], Exists(r, p >> r & q >> ~r))))
assert(val_non_strict(Forall([p,q], (((p >> q) >> p) >> p))))
assert(val_non_strict(true | p))
a, b, c = Int('a, b, c')
Even = Const('Even', Int >> Bool)
Prime = Const('Prime', Int >> Bool)
suc, square = (Int >> Int)('suc, square')
a, b, c = Int('a, b, c')
Even = Const('Even', Int >> Bool)
Prime = Const('Prime', Int >> Bool)
suc, square = (Int >> Int)('suc, square')
M = Model({(a, 5), (b, 2), (c, 7)})
M[Int] = dom_range(0,20)
M[Even] = lambda x: x % 2 == 0
M[Prime] = is_prime
M[suc] = lambda x: x + 1
M[square] = lambda x: x * x
assert_equal(val_non_strict(a, M), 5)
assert_equal(val_non_strict(a + b * c, M), 19)
assert(val_non_strict(Exists(x, b + x == c), M))
assert(not val_non_strict(Even(a), M))
assert(val_non_strict(Prime((23)), M))
assert(not val_non_strict(Prime((22)), M))
assert(val_non_strict(And(Prime(a), Prime(b), Prime(c)), M))
assert(val_non_strict(Even(c) | And(Prime(a), Prime(b), Prime(c)), M))
assert(not val_non_strict(Even(c) | And(Prime(suc(a)), Prime(suc(b)), Prime(c)), M))
assert(val_non_strict(Exists(x, Even(x)), M))
assert(val_non_strict(Exists(x, And(Prime(x), Even(x))), M))
assert(not val_non_strict(Exists(x, And(Prime(x), Even(x), c < x)), M))
assert(val_non_strict(Exists([x, y], And(Prime(x), Prime(y), x < y)), M))
assert(val_non_strict(Exists([x, y], And(Prime(x), Prime(y), x != y)), M))
assert(not val_non_strict(Exists([x, y], And(Prime(x), Prime(y), x < y, Even(y))), M))
assert(val_non_strict(Exists([x, y], And(Prime(x), Prime(y), x < y, Even(x))), M))
assert(not val_non_strict(Forall(x, Even(x)), M))
assert(val_non_strict(Forall(x, Or(Even(x), ~Even(x))), M))
assert(val_non_strict(Forall(x, Even(x) >> ~Even(suc(x))), M))
assert(val_non_strict(Forall(x, Even(x) >> Even(square(x))), M))
assert(not val_non_strict(Exists(x, And(Even(x), ~Even(square(x)))), M))
assert(val_non_strict(Forall(x, Even(square(x)) >> Even(x)), M))
assert(not val_non_strict(Forall([x, y], And(Prime(x), Prime(y), x < y) >> Even(x)), M))
assert(val_non_strict(Forall([x, y], And(Prime(x), Prime(y), x < y) >> ~Even(y)), M))
assert(not val_non_strict(Forall(x, Exists(y, x < y)), M))
assert(not val_non_strict(Forall([x, y], x < y >> Exists(z, And(x < z, z < y))), M))
assert(val_non_strict(Forall([x, y], And(Even(x), Even(y), x < y) >>
Exists(z, (x < z) & (z < y))), M))
def precond(n):
return ((2) < n) & Even(n)
def goldbach(n):
return precond(n) >> Exists([x,y], Prime(x) & Prime(y) & (x + y == n))
Goldbach = Forall(z, goldbach(z))
assert(val_non_strict(Goldbach, M))
def test_lazy_models():
clear_default_language()
def nats():
i = 0
while True:
yield i
i += 1
nat_dom = Domain('nat', nats)
Prime = Const('Prime', Int >> Bool)
M = Model()
M[Int] = nat_dom
M[Prime] = is_prime
x = Int('x')
assert(val_strict(Exists(x, Prime(x)), M))
| 38.633929 | 92 | 0.555119 | 0 | 0 | 344 | 0.03975 | 0 | 0 | 0 | 0 | 498 | 0.057546 |
e2519328f39cccd9a36d2bf001e0ce801bf7f290 | 3,739 | py | Python | sniffer.py | meecash/rf4ce-tools | 0f3d11bc14c3fb099c45a54edab2d525672432d5 | [
"MIT"
] | null | null | null | sniffer.py | meecash/rf4ce-tools | 0f3d11bc14c3fb099c45a54edab2d525672432d5 | [
"MIT"
] | null | null | null | sniffer.py | meecash/rf4ce-tools | 0f3d11bc14c3fb099c45a54edab2d525672432d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Sniffs RF4CE packets. Supports encryption.
"""
from __future__ import (absolute_import,
print_function, unicode_literals)
from builtins import *
import argparse
from datetime import datetime
import binascii
from rf4ce import Dot15d4FCS, Dot15d4Data, Raw
from rf4ce import LinkConfig, Rf4ceNode, Rf4ceFrame, Rf4ceException, Rf4ceMakeFCS
from rf4ce.radio import RxFlow
from rf4ce.packetprocessor import PacketProcessor
import huepy as hue
class SnifferProcessor(PacketProcessor):
"""Sniffer Packet processor
Parses incoming packets
If possible, decode them
"""
def __init__(self, link_configs=[]):
PacketProcessor.__init__(self)
self.link_configs = link_configs
def process(self, data):
print(hue.bold(hue.green("\n------ {} ------".format(datetime.now()))))
print(hue.yellow("Full packet data: ") + hue.italic(binascii.hexlify(data)))
# Checks if the 802.15.4 packet is valid
if Rf4ceMakeFCS(data[:-2]) != data[-2:]:
print(hue.bad("Invalid packet"))
return
# Parses 802.15.4 packet
packet = Dot15d4FCS(data)
packet.show()
if packet.fcf_frametype == 2: # ACK
return
# Tries to match received packet with a known link
# configuration
matched = False
for link in self.link_configs:
if packet.dest_panid != link.dest_panid:
continue
if packet.fcf_srcaddrmode == 3: # Long addressing mode
if packet.src_addr != link.source.get_long_address():
continue
if packet.dest_addr != link.destination.get_long_address():
continue
else:
if packet.src_addr != link.source.get_short_address():
continue
if packet.dest_addr != link.destination.get_short_address():
continue
source = link.source
destination = link.destination
key = link.key
matched = True
if not matched:
if packet.fcf_srcaddrmode == 3:
source = Rf4ceNode(packet.src_addr, None)
destination = Rf4ceNode(packet.dest_addr, None)
else:
source = Rf4ceNode(None, packet.src_addr)
destination = Rf4ceNode(None, packet.dest_addr)
key = None
# Process RF4CE payload
frame = Rf4ceFrame()
try:
rf4ce_payload = bytes(packet[3])
except:
_, e, _ = sys.exc_info()
print(hue.bad("Raw payload not present: {}".format(e)))
return
try:
frame.parse_from_string(rf4ce_payload, source, destination, key)
except Rf4ceException:
_, e, _ = sys.exc_info()
print(hue.bad("Cannot parse RF4CE frame: {}".format(e)))
return
print("###[ " + hue.bold(hue.yellow("RF4CE")) + " ]###")
print(frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--link", help="JSON file containing link information")
parser.add_argument("-c", "--channel", help="RF4CE channel (default: 15)", type=int,
choices=[11, 15, 20, 25, 26], default=15)
parser.add_argument("-s", "--sdr", help="SDR Device to use (default: pluto-sdr)",
choices=["hackrf", "pluto-sdr"], default="pluto-sdr")
parser.add_argument("-o", "--output", help="File to store pacp dump")
args = parser.parse_args()
if args.link:
try:
link_config = LinkConfig(args.link)
except:
print(hue.bad("Cannot load configuration file"))
exit(-1)
if args.link:
print(link_config)
print(hue.info("Sniffing on channel {}".format(args.channel)))
if args.link:
sniffer_processor = SnifferProcessor([link_config])
else:
sniffer_processor = SnifferProcessor([])
tb = RxFlow(args.channel, sniffer_processor, args.sdr, args.output)
sniffer_processor.start()
tb.start()
try:
raw_input(hue.info('Sniffing...\n'))
except (EOFError, KeyboardInterrupt):
pass
print(hue.info("Exiting..."))
tb.stop()
tb.wait()
sniffer_processor.stop()
| 26.899281 | 85 | 0.693768 | 2,062 | 0.551484 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.214496 |
e257f22d28c17b249ec2d577cdbb9be878a8be8b | 213 | py | Python | plugins/plot/__init__.py | DeStream-dev/electrum-destream | 18bcaadaea65ff2fb0333787111d8e0ddf81a3d8 | [
"MIT"
] | 26 | 2017-06-09T04:13:13.000Z | 2021-11-15T11:35:30.000Z | plugins/plot/__init__.py | DeStream-dev/electrum-destream | 18bcaadaea65ff2fb0333787111d8e0ddf81a3d8 | [
"MIT"
] | 29 | 2017-05-07T05:08:06.000Z | 2021-02-19T13:15:03.000Z | plugins/plot/__init__.py | DeStream-dev/electrum-destream | 18bcaadaea65ff2fb0333787111d8e0ddf81a3d8 | [
"MIT"
] | 21 | 2017-05-31T14:24:20.000Z | 2021-01-30T17:35:43.000Z | # from electrum_stratis.i18n import _
# fullname = 'Plot History'
# description = _("Ability to plot transaction history in graphical mode.")
# requires = [('matplotlib', 'matplotlib')]
# available_for = ['qt']
| 30.428571 | 75 | 0.70892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.971831 |
e2590978c70875453f6ec382067fe3808d7c2f53 | 2,600 | py | Python | setup.py | mrsantos321/customhead | bb70867afc34abe7a2ef76bb0f92c08d7ff38214 | [
"MIT"
] | 3 | 2019-10-08T06:02:23.000Z | 2020-01-22T09:14:35.000Z | setup.py | mrsantos321/cushead | bb70867afc34abe7a2ef76bb0f92c08d7ff38214 | [
"MIT"
] | 297 | 2019-08-22T19:45:23.000Z | 2022-03-26T02:30:25.000Z | setup.py | mrsantos321/cushead | bb70867afc34abe7a2ef76bb0f92c08d7ff38214 | [
"MIT"
] | 5 | 2019-09-25T02:35:04.000Z | 2021-03-31T04:23:47.000Z | #!/usr/bin/env python3
"""
Host the setup function.
"""
import pathlib
import setuptools
from cushead import info
def setup() -> None:
"""
Execute the setup.
"""
assets_path = pathlib.Path(info.PACKAGE_NAME) / "console" / "assets" / "images"
templates_path = pathlib.Path(info.PACKAGE_NAME) / "generator" / "templates" / "jinja" / "templates"
setuptools.setup(
name=info.PACKAGE_NAME,
version=info.PACKAGE_VERSION,
entry_points={"console_scripts": [f"{info.PACKAGE_NAME}={info.PACKAGE_NAME}.console.console:main"]},
url=info.SOURCE,
project_urls={
"Documentation": info.DOCUMENTATION,
"Source": info.SOURCE,
},
python_requires=f">={info.PYTHON_MIN_VERSION[0]}.{info.PYTHON_MIN_VERSION[1]}",
packages=setuptools.find_packages(exclude=(str(file) for file in pathlib.Path("").iterdir() if str(file) != info.PACKAGE_NAME)),
include_package_data=True,
data_files=[
("", ["requirements.txt", "LICENSE.md", "README.md"]),
(assets_path, [str(file) for file in pathlib.Path(assets_path).iterdir()]),
(templates_path, [str(file) for file in pathlib.Path(templates_path).iterdir()]),
],
zip_safe=False,
install_requires=pathlib.Path("requirements.txt").read_text().split(),
author=info.AUTHOR,
author_email=info.EMAIL,
description=info.DESCRIPTION,
long_description=pathlib.Path("README.md").read_text(),
long_description_content_type="text/markdown",
license=info.PACKAGE_LICENSE,
keywords=info.KEYWORDS,
platforms="any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Utilities",
"Topic :: Software Development",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: User Interfaces",
],
)
if __name__ == "__main__":
setup()
| 37.681159 | 136 | 0.602692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,049 | 0.403462 |
e259758863b658de0af08d2a0152919880996008 | 414 | py | Python | firmware/camera.py | iasapomeavoc1/bike-computer | f1a2a0752bc0e4e745e104ab7f7bf27741d00ebe | [
"MIT"
] | null | null | null | firmware/camera.py | iasapomeavoc1/bike-computer | f1a2a0752bc0e4e745e104ab7f7bf27741d00ebe | [
"MIT"
] | 3 | 2021-06-08T22:38:16.000Z | 2022-01-13T03:28:03.000Z | firmware/camera.py | iasapomeavoc1/bike-computer | f1a2a0752bc0e4e745e104ab7f7bf27741d00ebe | [
"MIT"
] | null | null | null | #!~/bike-computer/.venv/bin python
import picamera
import datetime
path = '/home/pi/bike-computer/data/'
with picamera.PiCamera(resolution=(1640,1232),framerate=30) as camera:
try:
while True:
#start_time = datetime.datetime.now().timestamp()
for filename in camera.record_sequence(path+'%d.h264' % i for i in range(1, 6)):
camera.wait_recording(300)
finally:
camera.stop_recording()
| 24.352941 | 86 | 0.707729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.294686 |
e25abae44cf8de38d1a836f6f244ba6f1798442d | 2,687 | py | Python | tests/test_chars_duplicates.py | xv44586/pdfplumber | a20d4d1eea522753717ea09f30527efa519b6a91 | [
"MIT"
] | null | null | null | tests/test_chars_duplicates.py | xv44586/pdfplumber | a20d4d1eea522753717ea09f30527efa519b6a91 | [
"MIT"
] | null | null | null | tests/test_chars_duplicates.py | xv44586/pdfplumber | a20d4d1eea522753717ea09f30527efa519b6a91 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import unittest
import pytest
import sys, os
import logging
import pdfplumber
from pdfplumber import table
from pdfplumber.utils import Decimal
logging.disable(logging.ERROR)
HERE = os.path.abspath(os.path.dirname(__file__))
class Test(unittest.TestCase):
@classmethod
def setup_class(self):
path = os.path.join(HERE, "pdfs/issue-chars-duplicates.pdf")
self.pdf = pdfplumber.open(path)
@classmethod
def teardown_class(self):
self.pdf.close()
def test_extract_table(self):
page = self.pdf.pages[0]
table_without_drop_duplicates = page.extract_table(drop_duplicates=False)
table_with_drop_duplicates = page.extract_table(drop_duplicates=True)
last_line_without_drop = table_without_drop_duplicates[1][1].split('\n')[-1]
last_line_with_drop = table_with_drop_duplicates[1][1].split('\n')[-1]
assert last_line_without_drop == '微微软软 培培训训课课程程:: 名名模模意意义义一一些些有有意意义义一一些些'
assert last_line_with_drop == '微软 培训课程: 名模意义一些有意义一些'
def test_extract_words(self):
page = self.pdf.pages[0]
x0 = Decimal('440.143')
x1_without_drop = Decimal('534.992')
x1_with_drop = Decimal('534.719')
top_windows = Decimal('791.849')
top_linux = Decimal('794.357')
bottom = Decimal('802.961')
last_words_without_drop = page.extract_words(drop_duplicates=False)[-1]
last_words_with_drop = page.extract_words(drop_duplicates=True)[-1]
assert last_words_without_drop['x0'] == x0
assert last_words_without_drop['x1'] == x1_without_drop
assert last_words_without_drop['top'] == top_windows or last_words_without_drop['top'] == top_linux
assert last_words_without_drop['bottom'] == bottom
assert last_words_without_drop['upright'] == 1
assert last_words_without_drop['text'] == '名名模模意意义义一一些些有有意意义义一一些些'
assert last_words_with_drop['x0'] == x0
assert last_words_with_drop['x1'] == x1_with_drop
assert last_words_with_drop['top'] == top_windows or last_words_with_drop['top'] == top_linux
assert last_words_with_drop['bottom'] == bottom
assert last_words_with_drop['upright'] == 1
assert last_words_with_drop['text'] == '名模意义一些有意义一些'
def test_extract_text(self):
page = self.pdf.pages[0]
last_line_without_drop = page.extract_text(drop_duplicates=False).split('\n')[-1]
last_line_with_drop = page.extract_text(drop_duplicates=True).split('\n')[-1]
assert last_line_without_drop == '微微软软 培培训训课课程程:: 名名模模意意义义一一些些有有意意义义一一些些'
assert last_line_with_drop == '微软 培训课程: 名模意义一些有意义一些'
| 38.385714 | 107 | 0.696316 | 2,715 | 0.914449 | 0 | 0 | 216 | 0.072752 | 0 | 0 | 651 | 0.219266 |
e25cea03950fb0b1dd153a0a8cd9fe1c0925b105 | 546 | py | Python | project_1_1/src/model.py | jonassoebro/Deep-Learning-in-Computer-Vision | 3167ee9a4c433bf4491194e334b5c14de972c7cf | [
"MIT"
] | null | null | null | project_1_1/src/model.py | jonassoebro/Deep-Learning-in-Computer-Vision | 3167ee9a4c433bf4491194e334b5c14de972c7cf | [
"MIT"
] | null | null | null | project_1_1/src/model.py | jonassoebro/Deep-Learning-in-Computer-Vision | 3167ee9a4c433bf4491194e334b5c14de972c7cf | [
"MIT"
] | 1 | 2021-06-08T09:28:01.000Z | 2021-06-08T09:28:01.000Z | import torchvision.models as models
from torch.nn import Module, Sequential, Linear
class Model(Module):
def __init__(self, pretrained: bool = False, in_dim: int = 2048, out_dim: int = 256):
super(Model, self).__init__()
self.resnet = Sequential(*list(models.resnet50(pretrained=pretrained).children())[:-1])
self.linear = Linear(in_features=in_dim, out_features=out_dim, bias=True)
def forward(self, x):
x = self.resnet(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
| 32.117647 | 95 | 0.648352 | 459 | 0.840659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e25d5a2f9fb54ae920deb8742f67c3b04abb5bc0 | 17,625 | py | Python | load_data.py | UrbanInstitute/nccs-public | fcf45aa51f39645049ab8bee244f5971bc7beed9 | [
"MIT"
] | 7 | 2019-01-22T19:22:01.000Z | 2022-03-02T22:18:58.000Z | load_data.py | UrbanInstitute/nccs-public | fcf45aa51f39645049ab8bee244f5971bc7beed9 | [
"MIT"
] | 1 | 2021-07-20T04:17:50.000Z | 2022-03-02T22:23:43.000Z | load_data.py | UrbanInstitute/nccs-public | fcf45aa51f39645049ab8bee244f5971bc7beed9 | [
"MIT"
] | null | null | null | import os, sys
import re
import zipfile
import requests
import warnings
import logging
import pandas as pd
import numpy as np
from stat import S_IREAD, S_IRGRP, S_IROTH
import getpass
import pymysql
# Code by Jeff Levy (jlevy@urban.org), 2016-2017
class LoadData():
"""
This class is inherited by the Data class, and contains the methods related to retrieving data remotely.
From the web, that includes the raw 990 IRS data, the raw epostcard (990N) IRS data, and the raw BMR IRS
data. From NCCS MySQL, it has the methods for nteedocAllEins, lu_fipsmsa, and all of the prior NCCS
core file releases.
"""
def get_urls(self):
"""
Base method for loading the URLs necessary for downloads into memory.
Main core file URL: https://www.irs.gov/uac/soi-tax-stats-annual-extract-of-tax-exempt-organization-financial-data
ARGUMENTS
None
RETURNS
None
"""
main = self.main
path = main.path
entries = {'PF':{}, 'EZ':{}, 'Full':{}, 'BMF':{}, 'epostcard':{}}
entries = self.form_urls(entries, path)
entries = self.epost_urls(entries, path)
entries = self.bmf_urls(entries, path)
self.urls = entries
def form_urls(self, entries, path):
"""
Processes the text file in the "settings/urls" folder for EZ, Full and PF download paths.
ARGUMENTS
entries (dict) : A dictionary with keys=form and values=URLs or dict of URLs
path (str) : The base path on the local system
RETURNS
entries (dict) : Updated with the core file URLs as an entry.
"""
main = self.main
urlregex = re.compile(r'(\d{4})\s*=\s*(https?:\/\/.+\.(dat|zip|csv|txt))\s*')
skipline = re.compile(r'^#')
for form in main.forms:
with open(os.path.join(path, 'settings', 'urls', form.lower()+'.txt')) as f:
for line in f:
regex_match = urlregex.match(line)
skip_match = skipline.match(line)
if regex_match and not skip_match:
year = int(regex_match.group(1))
url = regex_match.group(2)
entries[form][year] = url
print('')
return entries
def epost_urls(self, entries, path):
"""
Processes the text file in the "settings/urls" folder for the epostcard (990N) download path.
ARGUMENTS
entries (dict) : A dictionary with keys=form and values=URLs or dict of URLs
path (str) : The base path on the local system
RETURNS
entries (dict) : Updated with the epostcard URLs as an entry.
"""
epostregex = re.compile(r'(epostcard)\s*=\s*(https?:\/\/.+\.(dat|zip|csv|txt))\s*')
skipline = re.compile(r'^#')
with open(os.path.join(path, 'settings', 'urls', 'epostcard.txt')) as f:
for line in f:
regex_match = epostregex.match(line)
skip_match = skipline.match(line)
if regex_match and not skip_match:
url = regex_match.group(2)
entries['epostcard'] = url
return entries
def bmf_urls(self, entries, path):
"""
Processes the text file in the "settings/urls" folder for BMF download path.
ARGUMENTS
entries (dict) : A dictionary with keys=form and values=URLs or dict of URLs
path (str) : The base path on the local system
RETURNS
entries (dict) : Updated with the BMF URLs as an entry.
"""
bmfregex = re.compile(r'(region\d)\s*=\s*(https?:\/\/.+\.(dat|zip|csv|txt))\s*')
skipline = re.compile(r'^#')
with open(os.path.join(path, 'settings', 'urls', 'bmf.txt')) as f:
for line in f:
regex_match = bmfregex.match(line)
skip_match = skipline.match(line)
if regex_match and not skip_match:
url = regex_match.group(2)
region = regex_match.group(1)
entries['BMF'][region] = url
return entries
def download(self):
"""
Base method for downloading the main core files from the IRS, setting the EIN as the index, and
updating the SOURCE column with the appropriate file name.
ARGUMENTS
None
RETURNS
None
"""
main = self.main
delim = self.irs_delim
current_yr = self.core_file_year #int
main.logger.info('Beginning any necessary downloads from the IRS.')
for form in main.forms:
try:
url = self.urls[form][current_yr]
except KeyError:
raise Exception('URL not found for core file year {}, form {}. Please check the "urls" folder.'.format(current_year, form))
df = pd.read_csv(self.download_file(url), sep=delim, dtype='str')
#Most IRS files have EIN in caps, but at least one (2012 EZ) has it in lowercase
if 'ein' in df.columns:
df.rename(columns={'ein':'EIN'}, inplace=True)
df.set_index('EIN', inplace=True)
#adds the source file name as a column
df['SOURCE'] = url.split('/')[-1]
self.data_dict[form] = df
main.logger.info('Downloading complete.\n')
def sql_auth(self):
"""
Handles logging into the NCCS MySQL server, including prompting for credentials.
ARGUMENTS
None
RETURNS
None
"""
if self.get_from_sql:
self.main.logger.info('Authenticating connection to MySQL server...')
un = input(' MySQL user name: ')
if sys.stdin.isatty():
#program is being run in an interactive interpreter, and the password echo can't be shut off
pw = input(' MySQL password: ')
else:
#system is running from the command line, and password echo can be off
pw = getpass.getpass(prompt=' MySQL password: ')
try:
self.sql_connection = pymysql.connect(host=self.sql_server_name, db='nccs', user=un, password=pw)
except pymysql.OperationalError:
self.main.logger.info(' failed to connect to server; will try to load from downloads/nccs folder.\n')
self.sql_connection = None
else:
self.main.logger.info(' login successful, will attempt to retrieve all necessary data from the SQL database.\n')
else:
self.main.logger.info('Without logging into NCCS MySQL server, will look for all files in downloads/nccs folder.\n')
self.sql_connection = None
def close_sql(self):
"""
Cleanly shuts down the NCCS MySQL connection.
ARGUMENTS
None
RETURNS
None
"""
if self.get_from_sql:
self.main.logger.info('Cosing MySQL connection.')
self.sql_connection.close()
def get_sql(self, fname, dbase, cols='*', index_col='EIN', match_dtypes=None, force_sql_cols=False):
"""
Method for downloading a file, passed as the "fname" argument, from the MySQL connection established
in the sql_auth method.
It will first check its own cache to see if it has already downloaded the file and is holding it in
memory, then it will look in the "downloads/nccs" folder to see if that exact fname has already been
downloaded. Only if both of those are false will it connect to MySQL to retrieve the file.
For users off the Urban campus or without a login to the NCCS MySQL server, having all the necessary
files as .csv documents in the "downloads/nccs" folder means the program can still build. See
"folder instructions.txt" in that folder for more details.
ARGUMENTS
cols (str or list): Default '*', used when only a subset of the data should be returned.
index_col (str): Default 'EIN', specifies the column to use as the index.
match_dtypes (DataFrame): Default None, if a dataframe is passed it will extract the schema from
it and apply it to the data specified in fname; otherwise it uses the
MySQL defaults.
force_sql_cols (bool): Default False, If True it will force the columns specified in the cols argument
to become a part of the SQL statement; otherwise it downloads * in the SELECT
statement and then subsets it later. This is used, for example, in
nteedocAllEINS because the full file is 1.5 gigabytes but only 1/3rd of that is
needed.
RETURNS
DataFrame
"""
file_path = os.path.join(self.main.path, self.nccs_download_folder)
existing_downloads = os.listdir(file_path)
existing_downloads = [f for f in existing_downloads if f.endswith('.csv')]
if fname in self.sql_cache:
self.main.logger.info('File already cached; trying version in memory.')
if isinstance(cols, list):
try:
return self.sql_cache[fname][cols]
except KeyError:
self.main.logger.info(' Specified columns not in memory.')
pass #if the dataframe is cached already but the desired cols are missing, continue with sql loading
else:
return self.sql_cache[fname]
if fname+'.csv' in existing_downloads:
self.main.logger.info('File found in NCCS downloads; using already-downloaded version.')
if match_dtypes is not None:
dtype = match_dtypes.dtypes.to_dict()
dtype['EIN'] = 'str'
else:
dtype = 'str'
df = pd.read_csv(os.path.join(file_path, fname+'.csv'), dtype=dtype, low_memory=False, encoding='utf-8')
if index_col is not None: df.set_index(index_col, inplace=True)
if match_dtypes is None:
num_cols = [c for c in self.numeric_columns if c in df]
for col in num_cols:
df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0) #recast the str columns to float64 or int64
str_cols = df.select_dtypes(include=[np.object_]).columns.values #fill string NA columns with empty strings
df.loc[:, str_cols] = df.loc[:, str_cols].fillna('')
elif self.sql_connection is not None:
con = self.sql_connection
con.select_db(dbase)
if force_sql_cols:
sql_cols = ', '.join(cols)
else:
sql_cols = '*'
df = pd.read_sql('SELECT {} FROM {}'.format(sql_cols, fname), con=con, index_col=index_col)
df.columns = [c.upper() for c in df.columns.values]
if match_dtypes is not None:
self.main.logger.info(' standardizing dtypes for {}...'.format(fname))
def _dtype_matcher(c):
if c.name in match_dtypes.columns:
desired_type = match_dtypes[c.name].dtype.type
if desired_type is np.object_:
return c.astype(str)
elif desired_type in [np.float64, np.int64, np.float32, np.int32]:
return pd.to_numeric(c, errors='coerce').fillna(0)
else:
return c.astype(str) #assume strings for anything else (e.g. dates)
#raise Exception('Unknown dtype: {}, {}'.format(c.name, desired_type))
else:
return c.astype(str)
df = df.apply(_dtype_matcher) #this is not very efficient, but I haven't found a better way to make sure all dtypes match from SQL
df.to_csv(os.path.join(file_path, fname+'.csv'), index=df.index.name is not None)
else:
raise Exception('No active connection to NCCS MySQL database, and file not found in downloads/nccs folder: {}'.format(fname))
self.sql_cache[fname] = df #save all dataframes loaded from sql in case they are needed later, because sql load times are slow
if cols == '*':
return df
else:
return df.loc[:, [c.upper() for c in cols if c.upper() != 'EIN']]
def download_epostcard(self, usecols=[0, 1], names=['EIN', 'EPOSTCARD'], date_col='EPOSTCARD'):
"""
Method for downloading the epostcard (990N) data from the IRS.
ARGUMENTS
usecols (list) : Default [0, 1], this data comes without headers, so the subset needed is given as
indexes.
names (list) : Default ['EIN', 'EPOSTCARD'], provides the header names. Must be the same dimension
as usecols.
date_col (str) : Default 'EPOSTCARD', specifies the column to be converted to date dtype.
RETURNS
DataFrame
"""
url = self.urls['epostcard']
delim = self.epostcard_delim
#a df of 'EIN', 'YEAR' from the epostcard records
df = pd.read_csv(self.download_file(url, force=True),
skip_blank_lines=True,
sep=delim,
usecols=usecols,
names=names,
dtype='str')
df.set_index('EIN', inplace=True)
df = df[df[date_col] != ''] #drop null dates
assert(df.index.is_unique), 'Expected unique EINs in epostcard data.'
return df
def download_bmf(self):
"""
Accesses the stored URLs for the raw BMF files from the IRS, then passes the necessary information
into the download_file method.
ARGUMENTS
None
RETURNS
DataFrame
"""
bmf_data = {}
delim = self.bmf_delim
for region in self.urls['BMF'].keys():
url = self.urls['BMF'][region]
bmf_data[region] = pd.read_csv(self.download_file(url), sep=delim, dtype='str')
df = pd.concat(bmf_data).set_index('EIN')
assert(df.index.is_unique), 'Expected unique EINs in BMF data.'
return df
def download_file(self, url, force=False):
"""
Method for downloading the specified URL, then unzipping it if necessary. All newly-downloaded
files are set to read-only.
ARGUMENTS
url (str) : Any valid URL
force (bool) : Default False, when True it will ignore existing files in the "downloads/IRS" folder,
when False it will only download a new version if the file does not already exist.
RETURNS
str : Location on local file system of the downloaded (or pre-existing) file.
"""
main = self.main
output_path = os.path.join(main.path, self.irs_download_folder)
fname = url.split('/')[-1] #extracts the file name from the end of the url
output_file = os.path.join(output_path, fname) #full location of file to write to
if main.force_new_download or force or not os.path.exists(output_file):
r = requests.get(url, headers=self.headers)
#this catches invalid URLs entered into the url text files: the IRS website returns a
#page saying "404 error code" but since that page is a valid page, it returns an actual
#success code of 200. Simply searching for 'Page Not Found' in the body is very slow
#when it is an actual download link with a large file, so it first checks the headers
#to make sure it's not ['Content-Type'] = 'application/zip'
if 'text/html' in r.headers['Content-Type'] and 'Page Not Found' in r.text:
raise Exception('Warning: the url {} appears to be invalid.')
with open(output_file, 'wb') as ofile:
ofile.write(r.content)
os.chmod(output_file, S_IREAD|S_IRGRP|S_IROTH) #sets the download to read-only
main.logger.info('File {} downloaded.'.format(fname))
if fname.endswith('.zip'):
zip_ref = zipfile.ZipFile(output_file, 'r')
zip_ref.extractall(output_path+os.sep) #unzips into the download path
#looks at the list of unizpped items, warns if there is more than 1
unzipped_files = zip_ref.namelist()
zip_ref.close() #finished with the zip object
if len(unzipped_files) != 1:
main.logger.info('WARNING: More or less than one file in {}; system may not be using the right one as data.'.format(fname))
#sets the unzipped files to read-only
for nfile in unzipped_files:
output_file = os.path.join(output_path, nfile)
os.chmod(output_file, S_IREAD|S_IRGRP|S_IROTH)
main.logger.info('File {} extracted from zip.'.format(nfile))
#returns the contents of a zip file as the output file
return output_file
else:
#returns the output file if it's not zipped
return output_file
else:
main.logger.info('Using existing contents of {} in downloads.'.format(fname))
return output_file
| 43.41133 | 146 | 0.583603 | 17,374 | 0.985759 | 0 | 0 | 0 | 0 | 0 | 0 | 8,630 | 0.489645 |
e25d9a100686e9143d42ae75160f3586fc25f217 | 2,737 | py | Python | functions/finalize-lambda/finalize.py | aws-samples/aws-elemental-mediaconvert-visual-narrations | 5820b259b69ea6d65da533306fa6ba04e38804b7 | [
"MIT-0"
] | 4 | 2021-12-18T17:20:04.000Z | 2021-12-29T05:07:20.000Z | functions/finalize-lambda/finalize.py | giusedroid/aws-elemental-mediaconvert-visual-narrations | 5820b259b69ea6d65da533306fa6ba04e38804b7 | [
"MIT-0"
] | 1 | 2022-02-15T02:51:38.000Z | 2022-02-15T02:51:38.000Z | functions/finalize-lambda/finalize.py | giusedroid/aws-elemental-mediaconvert-visual-narrations | 5820b259b69ea6d65da533306fa6ba04e38804b7 | [
"MIT-0"
] | 1 | 2021-12-20T16:17:58.000Z | 2021-12-20T16:17:58.000Z | import boto3
import os
import json
from botocore.exceptions import ClientError
try:
POLLY_METADATA_STORE = os.environ['POLLY_METADATA_STORE']
except KeyError as e:
print(f"Missing env variable: {e}")
exit(1)
dynamo = boto3.resource("dynamodb")
polly_metadata_store = dynamo.Table(POLLY_METADATA_STORE)
def create_media_object(item):
# "output/full/hls/62de657b-7884-4dc0-8286-b9b63c521351/template62de657b-7884-4dc0-8286-b9b63c521351.m3u8"
# output/preview/62de657b-7884-4dc0-8286-b9b63c521351.mp4
media_key = item['s3']['object']['key']
media_type = media_key.split('/')[1]
media_bucket = item['s3']['bucket']['name']
if media_type == "preview":
media_id = media_key.split("/")[2].replace(".mp4", ".json")
elif media_type == "full":
media_id = media_key.split("/")[3] + ".json"
else:
media_id = None
return {
"media_id": media_id,
"media_type": media_type,
"media_key": media_key,
"media_bucket": media_bucket
}
def ddb_value(item):
return {
"Value": item
}
def is_successful_ops(media_object):
return media_object["metadata_updated"]
def is_failed_ops(media_object):
return not is_successful_ops(media_object)
def update_metadata(media_object):
media_object['metadata_updated'] = False
attribute_updates = {}
full_path = f"s3://{media_object['media_bucket']}/{media_object['media_key']}"
if media_object['media_type'] == "preview":
attribute_updates['PreviewVideoFile'] = ddb_value(full_path)
if media_object['media_type'] == "full":
attribute_updates['FullVideoStream'] = ddb_value(full_path)
print(attribute_updates)
if len(attribute_updates) == 0:
return media_object
asset_id = media_object["media_id"]
try:
dynamo_response = polly_metadata_store.update_item(
Key={"AssetId": asset_id},
AttributeUpdates=attribute_updates,
)
media_object["metadata_updated"] = True
except ClientError as e:
print(e)
return media_object
def handler(event, context):
media_objects = [ create_media_object(item) for item in event['Records'] ]
print(media_objects)
updates = [ update_metadata(media_object) for media_object in media_objects ]
print(updates)
successful_ops = [is_successful_ops(update) for update in updates]
failed_ops = [ is_failed_ops(update) for update in updates]
return {
"statusCode":200,
"body": json.dumps({
"SuccessfulOps" : successful_ops,
"FailedOps": failed_ops
}, default=str)
}
| 28.510417 | 110 | 0.648886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 622 | 0.227256 |
e25e27a47f5510c4ab4d334a3f28637aba126e3c | 888 | py | Python | src/Phidget22/Encoding.py | MikaSoftware/mikathing | 60621b01e2fcbeefe2181a58fb0f6104115d1ae6 | [
"BSD-3-Clause"
] | 1 | 2021-11-23T09:02:46.000Z | 2021-11-23T09:02:46.000Z | Phidget22/Encoding.py | keysie/phidget-python-interface | ebf392fe6d8d25bd5c178edbf095cf29e0daa4af | [
"MIT"
] | null | null | null | Phidget22/Encoding.py | keysie/phidget-python-interface | ebf392fe6d8d25bd5c178edbf095cf29e0daa4af | [
"MIT"
] | 1 | 2020-02-26T12:43:12.000Z | 2020-02-26T12:43:12.000Z | import sys
import ctypes
class Encoding:
# Unknown - the default value
IR_ENCODING_UNKNOWN = 1
# Space encoding, or Pulse Distance Modulation
IR_ENCODING_SPACE = 2
# Pulse encoding, or Pulse Width Modulation
IR_ENCODING_PULSE = 3
# Bi-Phase, or Manchester encoding
IR_ENCODING_BIPHASE = 4
# RC5 - a type of Bi-Phase encoding
IR_ENCODING_RC5 = 5
# RC6 - a type of Bi-Phase encoding
IR_ENCODING_RC6 = 6
@classmethod
def getName(self, val):
if val == self.IR_ENCODING_UNKNOWN:
return "IR_ENCODING_UNKNOWN"
if val == self.IR_ENCODING_SPACE:
return "IR_ENCODING_SPACE"
if val == self.IR_ENCODING_PULSE:
return "IR_ENCODING_PULSE"
if val == self.IR_ENCODING_BIPHASE:
return "IR_ENCODING_BIPHASE"
if val == self.IR_ENCODING_RC5:
return "IR_ENCODING_RC5"
if val == self.IR_ENCODING_RC6:
return "IR_ENCODING_RC6"
return "<invalid enumeration value>"
| 27.75 | 47 | 0.748874 | 862 | 0.970721 | 0 | 0 | 472 | 0.531532 | 0 | 0 | 365 | 0.411036 |
e25eae1d244a1b206707499e17ed96145b02e14d | 13,525 | py | Python | PedSimulation/gui/panel.py | HDL951236874/PedSimulation | 153882289477c93306c38f9e3a41f9cfeb53f261 | [
"Apache-2.0"
] | 1 | 2018-08-15T17:42:58.000Z | 2018-08-15T17:42:58.000Z | PedSimulation/gui/panel.py | HDL951236874/PedSimulation | 153882289477c93306c38f9e3a41f9cfeb53f261 | [
"Apache-2.0"
] | null | null | null | PedSimulation/gui/panel.py | HDL951236874/PedSimulation | 153882289477c93306c38f9e3a41f9cfeb53f261 | [
"Apache-2.0"
] | null | null | null | from PedSimulation.scene import *
from PedSimulation.gui.ui.mainwindow_main import Ui_MainWindow_Main
from PedSimulation.gui.ui.mainwindow_setting import Ui_MainWindow_Setting, Dragebutton
from PedSimulation.gui.drawer.drawer_register import SceneDrawerRegister
from PyQt5 import QtCore
from PyQt5.QtWidgets import QDesktopWidget
from PedSimulation.generator import *
from PedSimulation.example.model.sfmodel import SFModel
from PyQt5.QtWidgets import QWidget
from PyQt5 import QtGui
from PedSimulation.example.strategy import NearestGoalStrategy
from PedSimulation.example.listener import PedestrianEscapeListener
from PyQt5.QtCore import Qt
class MainWindow(Ui_MainWindow_Main):
"""
Extends UI class with window and drawer methods
"""
def __init__(self, title, fps=16):
super().__init__()
self._translate = QtCore.QCoreApplication.translate
self.setupUi(self)
self.setWindowTitle(title)
self.center()
self.retranslateUi(self)
# init paint area and assigned to scroll area
self.area = PaintArea(self, fps)
self.scrollArea.setWidget(self.area)
self.settingwindow = SettingWindow("Setting", 16, self)
self.scenePool = self.settingwindow.scenePool
self.scene = None
self.enable = False
self.pause_flag = False
self.pushButton_11.clicked.connect(self.hide)
self.pushButton_11.clicked.connect(self.settingwindow.handle_click)
self.pushButton_13.clicked.connect(self.start)
self.pushButton_12.clicked.connect(self.pause)
self.pushButton_12.setEnabled(False)
self.pushButton_14.setEnabled(False)
self.horizontalSlider.valueChanged.connect(self.velocity_change)
self.comboBox.currentIndexChanged.connect(self.scene_select)
def center(self):
"""
move window to screen center
"""
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def start(self):
if self.enable == False:
self.scene.start()
self.pushButton_13.setText(self._translate("MainWindow", "Terminate"))
self.pushButton_12.setEnabled(True)
self.pushButton_14.setEnabled(True)
self.enable = not self.enable
return
if self.enable == True:
self.scene.stop()
self.pushButton_13.setText(self._translate("MainWindow", "Run"))
self.pushButton_12.setEnabled(False)
self.pushButton_12.setText(self._translate("MainWindow", "Pause"))
self.pushButton_14.setEnabled(False)
self.enable = not self.enable
def pause(self):
if self.pause_flag == False:
self.pushButton_12.setText(self._translate("MainWindow", "Resume"))
self.pause_flag = not self.pause_flag
return
if self.pause_flag == True:
self.pushButton_12.setText(self._translate("MainWindow", "Pause"))
self.pause_flag = not self.pause_flag
def velocity_change(self):
self.scene.model.time_per_step = self.horizontalSlider.value() * 0.0001919 + 0.001
def handle_click(self):
self.scene_select()
if not self.isVisible():
self.show()
def default_generate(self):
pass
def scene_select(self):
if self.scene != None and self.scene.is_alive():
self.scene.stop()
self.pushButton_13.setText(self._translate("MainWindow", "Run"))
self.pushButton_12.setEnabled(False)
self.pushButton_12.setText(self._translate("MainWindow", "Pause"))
self.pushButton_14.setEnabled(False)
self.enable = False
if self.scenePool != None and self.comboBox.currentIndex() != -1:
self.scene = self.scenePool[self.comboBox.currentIndex()]
class SettingWindow(Ui_MainWindow_Setting):
def __init__(self, title, fps, mainwindow):
super(SettingWindow, self).__init__()
self._translate = QtCore.QCoreApplication.translate
self.setupUi(self)
self.setWindowTitle(title)
self.center()
self.retranslateUi(self)
self.scene = None
self.generator = Generator(self.scene)
self.mainwindow = mainwindow
self.scenePool = []
self.drag_entity = ''
# init paint area and assigned to scroll area
self.area = PaintArea(self, fps)
self.scrollArea.setWidget(self.area)
self.setAcceptDrops(True)
self.dragbutton = Dragebutton("Agent", self)
self.dragbutton.setGeometry(820, 520, 81, 30)
self.dragbutton_2 = Dragebutton("Wall", self)
self.dragbutton_2.setGeometry(910, 520, 81, 30)
self.dragbutton_3 = Dragebutton("Generate Region", self)
self.dragbutton_3.setGeometry(1000, 520, 141, 30)
self.dragbutton_4 = Dragebutton("Safe Region", self)
self.dragbutton_4.setGeometry(1150, 520, 151, 30)
self.pushButton_11.clicked.connect(self.hide)
self.pushButton_11.clicked.connect(self.mainwindow.handle_click)
self.pushButton_26.clicked.connect(self.common_generate)
self.pushButton_25.clicked.connect(self.random_generate)
self.pushButton_24.clicked.connect(self.grid_generate)
self.pushButton_19.clicked.connect(self.remove_all_entity)
self.pushButton_20.clicked.connect(self.create_scene)
self.pushButton_21.clicked.connect(self.cancel)
self.comboBox.currentIndexChanged.connect(self.scene_select)
# self.comboBox_2.
def create_scene(self):
"""
use the button to create a scene(Thread)
:return:the scene
"""
scene = Scene()
if self.comboBox_2.currentText() == "SFModel":
scene.model = SFModel(0.004)
scene.add_listener(PedestrianEscapeListener())
scene.add_listener(NearestGoalStrategy())
if self.comboBox_2.currentText() == "CsvModel":
scene.add_model(CSVModel(0.004,"/home/hdl/PycharmProjects/PedSimulation/PedSimulation/example/resources/ffffffffffff.csv", scene))
self.scenePool.append(scene)
self.comboBox.addItem(scene.getName())
self.mainwindow.comboBox.addItem(scene.getName())
def dfault_generator(self):
txt_r = open("PedSimulation/example/resources/original.txt","r")
for row in txt_r:
self.common_generate()
# pass
def scene_select(self):
# print(self.scene)
if self.scene !=None:
pass
# print(self.scene.entities)
self.scene = self.scenePool[self.comboBox.currentIndex()]
def cancel(self):
for entity in self.generator.last_time_generate:
self.scene.remove_entity(entity)
def dragEnterEvent(self, e):
e.accept()
def dropEvent(self, e):
radius = float(self.lineEdit_49.text())
length = float(self.lineEdit_50.text())
width = float(self.lineEdit_51.text())
shape = self.comboBox_7.currentText()
a = float(self.lineEdit_64.text())
b = float(self.lineEdit_65.text())
angle = float(self.lineEdit_66.text())
position = e.pos()
if self.drag_entity == "Agent":
self.generator.common_generate(self.scene, "Ped", shape, position.x()-20, position.y()-20, radius, length, width,
a, b,
angle)
if self.drag_entity == "Wall":
self.generator.common_generate(self.scene, "Wall", shape, position.x()-20, position.y()-20, radius, length, width,
a, b,
angle)
if self.drag_entity == "Generate Region":
pass
if self.drag_entity == "Safe Region":
self.generator.common_generate(self.scene, "Safe-Region", shape, position.x()-20, position.y()-20, radius, length,
width,
a, b,
angle)
e.setDropAction(Qt.MoveAction)
e.accept()
def center(self):
"""
move window to screen center
"""
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def handle_click(self):
if not self.isVisible():
self.show()
def grid_generate(self):
"""
use the setting window to generate the pedes in grid way
:return: pedes generated in grid way
"""
x = float(self.lineEdit_54.text())
y = float(self.lineEdit_55.text())
l = float(self.lineEdit_36.text())
w = float(self.lineEdit_37.text())
entity = self.comboBox_5.currentText()
shape = self.comboBox_3.currentText()
radius = float(self.lineEdit_43.text())
length = float(self.lineEdit_44.text())
width = float(self.lineEdit_45.text())
number = int(self.lineEdit_56.text())
intervel = int(self.lineEdit_52.text())
a = float(self.lineEdit_38.text())
b = float(self.lineEdit_59.text())
angle = float(self.lineEdit_60.text())
self.generator.grid_generate(self.scene, Box2D(Point2D(x, y), l, w), entity, shape, radius, length, width,
number, a, b, angle, intervel)
def random_generate(self):
"""
use the setting window to generate the pedes in random way
:return: pedes generated in random way
"""
x = float(self.lineEdit_39.text())
y = float(self.lineEdit_41.text())
l = float(self.lineEdit_40.text())
w = float(self.lineEdit_42.text())
entity = self.comboBox_6.currentText()
shape = self.comboBox_4.currentText()
radius = float(self.lineEdit_46.text())
length = float(self.lineEdit_47.text())
width = float(self.lineEdit_48.text())
number = int(self.lineEdit_53.text())
a = float(self.lineEdit_61.text())
b = float(self.lineEdit_62.text())
angle = float(self.lineEdit_63.text())
self.generator.random_generate(self.scene, Box2D(Point2D(x, y), l, w), entity, shape, radius, length,
width, number, a, b, angle)
def common_generate(self):
"""
use the setting window to generate the item
:return:
"""
center_x = float(self.lineEdit_57.text())
center_y = float(self.lineEdit_58.text())
radius = float(self.lineEdit_49.text())
length = float(self.lineEdit_50.text())
width = float(self.lineEdit_51.text())
entity = self.comboBox_9.currentText()
shape = self.comboBox_7.currentText()
a = float(self.lineEdit_64.text())
b = float(self.lineEdit_65.text())
angle = float(self.lineEdit_66.text())
self.generator.common_generate(self.scene, entity, shape, center_x, center_y, radius, length, width, a, b,
angle)
def remove_all_entity(self):
"""
use the setting window to remove the entities in the entityPool
:return:
"""
self.mainwindow.scene._entities = EntityPool()
class PaintArea(QWidget):
"""
A paint area that shows the whole scene
"""
def __init__(self, window, fps):
super().__init__()
self.window = window
self.painter = QPainter()
# set clock timer to
self.checkThreadTimer = QtCore.QTimer(self)
self.checkThreadTimer.start(fps)
self.checkThreadTimer.timeout.connect(self.update)
self.zoom = 1.0
self.offset_x = 0.0
self.offset_y = 0.0
self.last_x = -1
self.last_y = -1
@property
def scene(self):
return self.window.scene
def paintEvent(self, e):
""" Define that window will call scene's drawer to draw themselves (and it then will call entities
& then shapes to draw)
:param e: painter event, not yet used
:return:
"""
self.painter.begin(self)
self.painter.scale(self.zoom, self.zoom)
if self.scene is not None:
if self.scene.drawer is None or self.scene.drawer.device is not self.painter:
self.register_drawer(SceneDrawerRegister(self.painter, mode="default")) # fixme remove hard code
self.scene.drawer.draw(self.scene)
self.painter.end()
def wheelEvent(self, event: QtGui.QWheelEvent):
pass
def mousePressEvent(self, event: QtGui.QMouseEvent):
pass
def mouseReleaseEvent(self, a0: QtGui.QMouseEvent):
pass
def register_drawer(self, drawer_register):
""" Give value to attribute 'drawer_register'.
And call add_drawer_support() to register entities and shapes with drawer in self.scene.
Shall be called after assigned a listening scene.1
Set drawer_register's device to self.painter
:param drawer_register: a scene_drawer_register
"""
self.drawer_register = drawer_register
if self.drawer_register.device is not self.painter:
self.drawer_register.device = self.painter
self.drawer_register.add_drawer_support(self.scene)
| 38.976945 | 142 | 0.624473 | 12,873 | 0.951793 | 0 | 0 | 63 | 0.004658 | 0 | 0 | 1,910 | 0.14122 |
e2609871df4431162077f02f9f954e5270a79a11 | 1,608 | py | Python | emLam/corpus/component.py | DavidNemeskey/emLam | 89359e7eee5b7b9c596dec8ab6654591d4039e3e | [
"MIT"
] | 2 | 2018-03-31T10:00:11.000Z | 2018-09-15T19:38:19.000Z | emLam/corpus/component.py | DavidNemeskey/emLam | 89359e7eee5b7b9c596dec8ab6654591d4039e3e | [
"MIT"
] | 16 | 2017-02-28T13:58:28.000Z | 2018-03-14T11:42:01.000Z | emLam/corpus/component.py | dlt-rilmta/emLam | 2b7274dcda4080445698e10b34a3db2e2eed5112 | [
"MIT"
] | 1 | 2017-01-30T15:06:37.000Z | 2017-01-30T15:06:37.000Z | #!/usr/bin/env python3
"""An instantiable component. See the docstring for the class."""
from __future__ import absolute_import, division, print_function
from future.utils import with_metaclass
import logging
import inspect
class NamedClass(type):
"""
A read-only name property for classes. See
http://stackoverflow.com/questions/3203286/how-to-create-a-read-only-class-property-in-python
"""
@property
def name(cls):
return getattr(cls, 'NAME', None)
@property
def description(cls):
return getattr(cls, 'DESCRIPTION', None)
class Component(with_metaclass(NamedClass, object)):
"""
Base class for corpus and preprocessor objects. All corpus and preprocessor
classes must be subclasses of Component. Also, multiple inheritence is
discouraged, as it may break some parts of the code.
"""
def __init__(self):
self.logger = logging.getLogger(inspect.getmodule(self).__name__)
self.logger.setLevel(self.logger.parent.level)
@classmethod
def instantiate(cls, process_id=0, **kwargs):
"""
Instantiates the class from keyword arguments. The process_id (not a
real pid, but an ordinal starting from 0) is there so that components
that use external resources can "plan" accordingly.
"""
argspec = inspect.getargspec(cls.__init__).args
component_args = {k: kwargs[k] for k in argspec[1:] if k in kwargs}
logging.getLogger(cls.__module__).debug(
'Instantiating with parameters {}'.format(component_args))
return cls(**component_args)
| 35.733333 | 97 | 0.69403 | 1,378 | 0.856965 | 0 | 0 | 744 | 0.462687 | 0 | 0 | 749 | 0.465796 |
e261e4b60cdb7a70713d7103ceda512e5e2a1812 | 2,288 | py | Python | kws/bin/export_onnx.py | ishine/wenet-kws | 7d142b952850416c4244d235a42b04fa4fc56952 | [
"Apache-2.0"
] | 81 | 2021-05-22T17:21:05.000Z | 2021-11-28T06:56:01.000Z | kws/bin/export_onnx.py | ishine/wenet-kws | 7d142b952850416c4244d235a42b04fa4fc56952 | [
"Apache-2.0"
] | 16 | 2021-11-30T08:56:15.000Z | 2022-03-23T03:17:28.000Z | kws/bin/export_onnx.py | ishine/wenet-kws | 7d142b952850416c4244d235a42b04fa4fc56952 | [
"Apache-2.0"
] | 31 | 2021-12-06T04:52:32.000Z | 2022-03-22T08:28:13.000Z | # Copyright (c) 2021 Binbin Zhang(binbzha@qq.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import yaml
import onnxruntime as ort
def get_args():
parser = argparse.ArgumentParser(description='export to onnx model')
parser.add_argument('--config', required=True, help='config file')
parser.add_argument('--jit_model',
required=True,
help='pytorch jit script model')
parser.add_argument('--onnx_model',
required=True,
help='output onnx model')
args = parser.parse_args()
return args
def main():
args = get_args()
with open(args.config, 'r') as fin:
configs = yaml.load(fin, Loader=yaml.FullLoader)
feature_dim = configs['model']['input_dim']
model = torch.jit.load(args.jit_model)
print(model)
# dummy_input: (batch, time, feature_dim)
dummy_input = torch.randn(1, 100, feature_dim, dtype=torch.float)
torch.onnx.export(model,
dummy_input,
args.onnx_model,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {
1: 'T'
}})
torch_output = model(dummy_input)
ort_sess = ort.InferenceSession(args.onnx_model)
onnx_input = dummy_input.numpy()
onnx_output = ort_sess.run(None, {'input': onnx_input})
if torch.allclose(torch_output, torch.tensor(onnx_output[0])):
print('Export to onnx succeed!')
else:
print('''Export to onnx succeed, but pytorch/onnx have different
outputs when given the same input, please check!!!''')
if __name__ == '__main__':
main()
| 34.666667 | 74 | 0.628934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 958 | 0.418706 |
e2658afba23005465bd6d4798e3cbe737e1308a6 | 5,711 | py | Python | omtk/core/className.py | renaudll/omtk | a7740d53a5587529773594bfd7c37e553787028f | [
"MIT"
] | 20 | 2015-09-30T16:07:02.000Z | 2022-03-12T06:57:59.000Z | omtk/core/className.py | nilouco/omtk | a7740d53a5587529773594bfd7c37e553787028f | [
"MIT"
] | 23 | 2015-12-22T15:41:02.000Z | 2018-04-13T02:52:41.000Z | omtk/core/className.py | nilouco/omtk | a7740d53a5587529773594bfd7c37e553787028f | [
"MIT"
] | 13 | 2015-07-10T16:06:26.000Z | 2021-08-21T20:09:41.000Z | from maya import cmds
import copy
# TODO: Find a way to have different naming for different production.
# Maybe handle it in the rig directly?
class BaseName(object):
"""
This class handle the naming of object.
Store a name as a list of 'tokens'
When resolved, the tokens are joinned using a 'separator' (normally an underscore)
Also some specific properties exists:
- Side: Generally L/R token
- Prefix: Always the first token
- Suffix: Always the last token
You can resolve a BaseName instance from a string.
>>> name = BaseName('l_eye_jnt')
>>> name.resolve()
'l_eye_jnt'
You can build a BaseName instance manually.
>>> name = BaseName(tokens=('eye',), suffix='jnt', side=BaseName.SIDE_L)
>>> name.resolve()
'l_eye_jnt'
You can add tokens at any time.
>>> name.add_tokens('upp')
>>> name.resolve()
'l_eye_upp_jnt'
You can override a BaseName public properties.
>>> name = BaseName()
>>> name.tokens = ('eye',)
>>> name.resolve()
'eye'
>>> name.suffix = 'jnt'
>>> name.resolve()
'eye_jnt'
>>> name.side = name.SIDE_L
>>> name.resolve()
'l_eye_jnt'
"""
separator = '_'
type_anm = 'anm'
type_anm_grp = 'anm_grp'
type_jnt = 'jnt'
type_rig = 'rig'
type_rig_grp = 'data_grp'
root_anm_name = 'anms'
root_geo_name = 'geos'
root_jnt_name = 'jnts'
root_rig_name = 'data'
root_backup_name = 'backup'
layer_anm_name = 'layer_anm'
layer_rig_name = 'layer_rig'
layer_geo_name = 'layer_geo'
SIDE_L = 'l'
SIDE_R = 'r'
def __init__(self, name=None, tokens=None, prefix=None, suffix=None, side=None):
self.tokens = []
self.prefix = None
self.suffix = None
self.side = None
if name:
tokens = self.build_from_string(name)
# Apply manual overrides
if tokens:
self.tokens = tokens
if prefix:
self.prefix = prefix
if suffix:
self.suffix = suffix
if side:
self.side = side
def copy(self):
"""
Return a copy of the name object.
"""
inst = self.__class__()
inst.tokens = copy.copy(self.tokens)
inst.prefix = self.prefix
inst.suffix = self.suffix
return inst
def rebuild(self, name):
return self.__class__(name, prefix=self.prefix, suffix=self.suffix)
def get_basename(self):
"""
Each name have one single token that represent it's part.
ex: L_LegUpp_Ik_Ctrl -> LegUpp
By default it is the first non-side token in the name.
return: The part name.
"""
for token in self.tokens:
if not self.get_side_from_token(token):
return token
def remove_extra_tokens(self):
"""
Remove any tokens that is not the base token or a side token.
:return:
"""
basename = self.get_basename()
found_base_token = False
new_tokens = []
for token in self.tokens:
if self.get_side_from_token(token):
new_tokens.append(token)
elif not found_base_token and token == basename:
new_tokens.append(token)
self.tokens = new_tokens
def build_from_string(self, name):
raw_tokens = self._get_tokens(name)
self.tokens = []
#self.prefix = None
#self.suffix = None
self.side = None
self.add_tokens(*raw_tokens)
def _get_tokens(self, val):
return val.split(self.separator)
def _join_tokens(self, tokens):
return self.separator.join(tokens)
def add_tokens(self, *args):
for arg in args:
for token in arg.split(self.separator):
side = self.get_side_from_token(token)
if side:
self.side = side
else:
self.tokens.append(token)
def add_suffix(self, suffix):
self.tokens.append(suffix)
def add_prefix(self, prefix):
self.tokens.insert(0, prefix)
def get_unique_name(self, name):
if cmds.objExists(name):
i = 1
while cmds.objExists(name + str(i)):
i += 1
return name + str(i)
return name
@classmethod
def get_side_from_token(cls, token):
token_lower = token.lower()
if token_lower == cls.SIDE_L.lower():
return cls.SIDE_L
if token_lower == cls.SIDE_R.lower():
return cls.SIDE_R
def get_tokens(self):
"""
:return: All token without the side tokens.
"""
return [token for token in self.tokens if not self.get_side_from_token(token)]
def resolve(self, *args):
tokens = []
if self.prefix:
tokens.append(self.prefix)
if self.side:
tokens.append(self.side)
tokens.extend(self.tokens)
tokens.extend(args)
if self.suffix:
tokens.append(self.suffix)
name = self._join_tokens(tokens)
# If we have name conflicts, we WILL want to crash.
'''
# Prevent maya from crashing by guarantying that the name is unique.
if cmds.objExists(name):
name_old = name
name = self.get_unique_name(name)
cmds.warning("Name {0} already exist, using {1} instead.".format(
name_old, name
))
'''
return name
def rename(self, obj, *args):
name = self.resolve(*args)
obj.rename(name)
def __repr__(self):
return self.resolve()
| 26.686916 | 86 | 0.57468 | 5,565 | 0.974435 | 0 | 0 | 241 | 0.042199 | 0 | 0 | 2,107 | 0.368937 |
e2662d51f3f0f8d10ecce13084f3868fbba37200 | 2,185 | py | Python | qcp/algorithms/abstract_algorithm.py | RiddhiYadav/https-github.com-Tiernan8r-quantum_computing_project | 904896171895faf3613e9b8d64165a35c36f519e | [
"Apache-2.0"
] | null | null | null | qcp/algorithms/abstract_algorithm.py | RiddhiYadav/https-github.com-Tiernan8r-quantum_computing_project | 904896171895faf3613e9b8d64165a35c36f519e | [
"Apache-2.0"
] | null | null | null | qcp/algorithms/abstract_algorithm.py | RiddhiYadav/https-github.com-Tiernan8r-quantum_computing_project | 904896171895faf3613e9b8d64165a35c36f519e | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Tiernan8r
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constructs the quantum register, circuits of composite gates, and runs the
simulation of Grover's Algorithm
"""
import abc
from typing import Tuple
from qcp.matrices import SPARSE, DefaultMatrix, Matrix
class GeneralAlgorithm(abc.ABC):
def __init__(self, size: int):
assert size > 1, "need minimum of two qbits"
self.size = size
self.state = self.initial_state()
self.circuit = self.construct_circuit()
def initial_state(self) -> Matrix:
"""
Creates a state vector corresponding to |1..0>
returns:
Matrix: the state vector
"""
entries: SPARSE = {i: {} for i in range(2 ** self.size)}
entries[0][0] = 1
return DefaultMatrix(entries)
def construct_circuit(self) -> Matrix:
"""
Construct the circuit for the algorithm
returns:
Matrix: Matrix representing our the circuit for the algorithm
"""
pass
def run(self) -> Matrix:
"""
Run the algorithm by applying the quantum circuit to the initial
state
returns:
Matrix: Column matrix representation of the final state
"""
if self.circuit is not None:
self.state = self.circuit * self.state
return self.state
def measure(self) -> Tuple[int, float]:
"""
'measures' self.state by selecting a state weighted by its
(amplitude ** 2)
returns:
Tuple[int, float]: The state observed and the probability of
measuring said state
"""
pass
| 28.75 | 74 | 0.639359 | 1,401 | 0.64119 | 0 | 0 | 0 | 0 | 0 | 0 | 1,401 | 0.64119 |
e2676954f761940a1207f34851f49d36c66d622a | 2,090 | py | Python | code/tests/core/test_train_utils.py | ashesh-0/MultiZoomGaze | 24494a1346d09e21e4b6d999a742b5d31bbbeff0 | [
"MIT"
] | 1 | 2022-01-24T04:53:00.000Z | 2022-01-24T04:53:00.000Z | code/tests/core/test_train_utils.py | ashesh-0/MultiZoomGaze | 24494a1346d09e21e4b6d999a742b5d31bbbeff0 | [
"MIT"
] | null | null | null | code/tests/core/test_train_utils.py | ashesh-0/MultiZoomGaze | 24494a1346d09e21e4b6d999a742b5d31bbbeff0 | [
"MIT"
] | null | null | null | import math
import numpy as np
import torch
from mock import patch
from core.train_utils import compute_angular_error, compute_angular_error_xyz_arr, spherical2cartesial
def test_spherical2cartesial():
spherical = torch.Tensor([
[0, 0],
[math.pi / 2, 0],
[-math.pi / 2, 0],
[0, math.pi / 2],
[math.pi / 2, math.pi / 2],
])
target_xyz = np.array([
[0, 0, -1],
[1, 0, 0],
[-1, 0, 0],
[0, 1, 0],
[0, 1, 0],
])
xyz = spherical2cartesial(spherical)
assert xyz.shape[0] == spherical.shape[0]
assert xyz.shape[1] == 3
assert isinstance(xyz, torch.Tensor)
assert np.linalg.norm(target_xyz - xyz.numpy(), axis=1).max() < 1e-5
@patch('core.train_utils.epsilon', 0)
def test_compute_angular_error_xyz_arr():
input1 = torch.Tensor([
[0.8001 / math.sqrt(2), 0.6, 0.8 / math.sqrt(2)],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[-1, 0, 0],
[1 / math.sqrt(2), 0, 1 / math.sqrt(2)],
])
input2 = torch.Tensor([
[0.8 / math.sqrt(2), 0.6, 0.8 / math.sqrt(2)],
[-1, 0, 0],
[0, 0, 1],
[0, 0, -1],
[-1, 0, 0],
[-1 / math.sqrt(2), 0, 1 / math.sqrt(2)],
], )
target = torch.Tensor([
0,
180,
180 / 2,
180,
0,
180 / 2,
])
output = compute_angular_error_xyz_arr(input1, input2)
assert np.max(np.abs(output.numpy() - target.numpy())) < 1e-5
@patch('core.train_utils.epsilon', 0)
def test_compute_angular_error():
input1 = torch.Tensor([
[math.pi / 2, 0],
[0, math.pi / 2],
[math.pi, 0],
[-math.pi / 2, 0],
[math.pi / 4, 0],
])
input2 = torch.Tensor([
[-math.pi / 2, 0],
[math.pi, 0],
[0, 0],
[-math.pi / 2, 0],
[-math.pi / 4, 0],
], )
target = torch.Tensor([
180,
180 / 2,
180,
0,
180 / 2,
])
output = compute_angular_error(input1, input2)
assert torch.mean(target) == output
| 24.022989 | 102 | 0.488038 | 0 | 0 | 0 | 0 | 1,347 | 0.644498 | 0 | 0 | 52 | 0.02488 |
e26904d170e4e8c6e1dcb9ac5ffac8b016dc97a4 | 732 | py | Python | scripts/serializers.py | sul-cidr/scriptchart-backend | 38bb4139d77d683d85f31839a1a06096fe2fabbc | [
"MIT"
] | 1 | 2019-06-05T23:05:32.000Z | 2019-06-05T23:05:32.000Z | scripts/serializers.py | sul-cidr/scriptchart-backend | 38bb4139d77d683d85f31839a1a06096fe2fabbc | [
"MIT"
] | 42 | 2019-01-24T23:51:42.000Z | 2021-09-08T01:04:45.000Z | scripts/serializers.py | sul-cidr/scriptchart-backend | 38bb4139d77d683d85f31839a1a06096fe2fabbc | [
"MIT"
] | 1 | 2019-08-05T12:47:57.000Z | 2019-08-05T12:47:57.000Z | from rest_framework import serializers
from scripts.models import Manuscript
from scripts.models import Page
from scripts.models import Coordinates
class ManuscriptSerializer(serializers.ModelSerializer):
class Meta:
model = Manuscript
fields = ('id', 'slug', 'shelfmark', 'date', 'manifest')
class PageSerializer(serializers.ModelSerializer):
class Meta:
model = Page
fields = ('id', 'manuscript', 'url', 'height', 'width')
class CoordinatesSerializer(serializers.ModelSerializer):
page = PageSerializer(read_only=True)
class Meta:
model = Coordinates
fields = ('id', 'page', 'letter', 'top', 'left', 'width', 'height',
'binary_url', 'page')
| 28.153846 | 75 | 0.669399 | 575 | 0.785519 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.184426 |
e26b0de3f1cabd89fc29afca7fa65f5643740f6e | 750 | py | Python | test/generators/utils.py | yanqd0/LeetCode | 8c669b954f4e4ae5e31a14727bf4ceedc58ea363 | [
"MIT"
] | null | null | null | test/generators/utils.py | yanqd0/LeetCode | 8c669b954f4e4ae5e31a14727bf4ceedc58ea363 | [
"MIT"
] | 3 | 2019-08-29T02:33:12.000Z | 2019-08-29T02:34:23.000Z | test/generators/utils.py | yanqd0/LeetCode | 8c669b954f4e4ae5e31a14727bf4ceedc58ea363 | [
"MIT"
] | null | null | null | import csv
import re
from os import makedirs
from os.path import abspath, basename, dirname, isdir, join
def generate_csv(path, fields, rows, quote_empty=False):
path = abspath(path)
name = basename(path)
name = re.sub('py$', 'csv', name)
cases = join(dirname(dirname(path)), 'cases')
if not isdir(cases):
makedirs(cases)
csv_path = join(cases, name)
with open(csv_path, 'w') as fobj:
writer = csv.DictWriter(fobj, fieldnames=fields, lineterminator='\n')
writer.writeheader()
with open(csv_path, 'a') as fobj:
quoting = csv.QUOTE_NONNUMERIC if quote_empty else csv.QUOTE_MINIMAL
writer = csv.writer(fobj, quoting=quoting, lineterminator='\n')
writer.writerows(rows)
| 31.25 | 77 | 0.669333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.041333 |
e26bcea95043f4e93feb6b1f624c1b506a9ab48a | 8,196 | py | Python | fig/backends/aws/__init__.py | LaudateCorpus1/falcon-integration-gateway | 5575c014d8315e0a7480f985d6e3e238ea7e1f61 | [
"Unlicense"
] | 10 | 2021-04-02T02:45:07.000Z | 2021-12-17T19:03:10.000Z | fig/backends/aws/__init__.py | LaudateCorpus1/falcon-integration-gateway | 5575c014d8315e0a7480f985d6e3e238ea7e1f61 | [
"Unlicense"
] | 50 | 2021-03-19T14:30:43.000Z | 2022-03-22T21:07:29.000Z | fig/backends/aws/__init__.py | LaudateCorpus1/falcon-integration-gateway | 5575c014d8315e0a7480f985d6e3e238ea7e1f61 | [
"Unlicense"
] | 11 | 2021-03-19T11:36:21.000Z | 2022-03-22T23:33:13.000Z | from datetime import datetime
import traceback
import boto3
from botocore.exceptions import ClientError
from ...config import config
from ...log import log
class Submitter():
def __init__(self, event):
self.event = event
def find_instance(self, instance_id, mac_address): # pylint: disable=R0201
# Instance IDs are unique to the region, not the account, so we have to check them all
report_region = config.get('aws', 'region')
ec2instance = None
ec2_client = boto3.client("ec2")
regions = [region["RegionName"] for region in ec2_client.describe_regions()["Regions"]]
for region in regions:
ec2 = boto3.resource("ec2", region_name=region)
try:
ec2instance = ec2.Instance(instance_id)
found = False
# Confirm the mac address matches
for iface in ec2instance.network_interfaces:
det_mac = mac_address.lower().replace(":", "").replace("-", "")
ins_mac = iface.mac_address.lower().replace(":", "").replace("-", "")
if det_mac == ins_mac:
found = True
if found: # pylint: disable=R1723
return region, ec2instance
except ClientError:
continue
except Exception: # pylint: disable=W0703
trace = traceback.format_exc()
log.exception(str(trace))
continue
return report_region, ec2instance
@staticmethod
def send_to_securityhub(manifest):
client = boto3.client('securityhub', region_name=config.get('aws', 'region'))
check_response = {}
found = False
try:
check_response = client.get_findings(Filters={'Id': [{'Value': manifest["Id"], 'Comparison': 'EQUALS'}]})
for _ in check_response["Findings"]:
found = True
except ClientError:
pass
import_response = False
if not found:
try:
import_response = client.batch_import_findings(Findings=[manifest])
except ClientError as err:
# Boto3 issue communicating with SH, throw the error in the log
log.exception(str(err))
return import_response
def submit(self):
log.info("Processing detection: %s", self.event.detect_description)
det_region = config.get('aws', 'region')
send = False
try:
if self.event.instance_id:
det_region, instance = self.find_instance(self.event.instance_id, self.event.device_details["mac_address"])
if instance is None:
log.warning("Instance %s with MAC address %s not found in regions searched. Alert not processed.",
self.event.instance_id, self.event.device_details["mac_address"])
return
try:
for _ in instance.network_interfaces:
# Only send alerts for instances we can find
send = True
except ClientError:
# Not our instance
i_id = self.event.instance_id
mac = self.event.device_details["mac_address"]
log.info("Instance %s with MAC address %s not found in regions searched. Alert not processed.", i_id, mac)
except AttributeError:
# Instance ID was not provided by the detection
log.info("Instance ID not provided by detection. Alert not processed.")
if send:
sh_payload = self.create_payload(det_region)
response = self.send_to_securityhub(sh_payload)
if not response:
log.info("Detection already submitted to Security Hub. Alert not processed.")
else:
if response["SuccessCount"] > 0:
submit_msg = f"Detection submitted to Security Hub. (Request ID: {response['ResponseMetadata']['RequestId']})"
log.info(submit_msg)
def create_payload(self, instance_region):
region = config.get('aws', 'region')
try:
account_id = boto3.client("sts").get_caller_identity().get('Account')
except KeyError:
# Failed to get endpoint_resolver the first time, try it again
account_id = boto3.client("sts").get_caller_identity().get('Account')
severity_product = self.event.severity_value
severity_normalized = severity_product * 20
payload = {
"SchemaVersion": "2018-10-08",
"ProductArn": "arn:aws:securityhub:{}:517716713836:product/crowdstrike/crowdstrike-falcon".format(region),
"AwsAccountId": account_id,
"SourceUrl": self.event.falcon_link,
"GeneratorId": "Falcon Host",
"CreatedAt": datetime.utcfromtimestamp(float(self.event.event_create_time) / 1000.).isoformat() + 'Z',
"UpdatedAt": ((datetime.utcfromtimestamp(datetime.timestamp(datetime.now()))).isoformat() + 'Z'),
"RecordState": "ACTIVE",
"Severity": {"Product": severity_product, "Normalized": severity_normalized}
}
# Instance ID based detail
try:
payload["Id"] = f"{self.event.instance_id}{self.event.event_id}"
payload["Title"] = "Falcon Alert. Instance: %s" % self.event.instance_id
payload["Resources"] = [{"Type": "AwsEc2Instnace", "Id": self.event.instance_id, "Region": instance_region}]
except AttributeError:
payload["Id"] = f"UnknownInstanceID:{self.event.event_id}"
payload["Title"] = "Falcon Alert"
payload["Resources"] = [{"Type": "Other",
"Id": f"UnknownInstanceId:{self.event.event_id}",
"Region": region
}]
# Description
aws_id = ""
if self.event.cloud_provider_account_id:
aws_id = f"| AWS Account for alerting instance: {self.event.cloud_provider_account_id}"
payload["Description"] = f"{self.event.detect_description} {aws_id}"
# TTPs
try:
payload["Types"] = ["Namespace: TTPs",
"Category: %s" % self.event.original_event["event"]["Tactic"],
"Classifier: %s" % self.event.original_event["event"]["Technique"]
]
except KeyError:
payload.pop("Types", None)
# Running process detail
try:
payload["Process"] = {}
payload["Process"]["Name"] = self.event.original_event["event"]["FileName"]
payload["Process"]["Path"] = self.event.original_event["event"]["FilePath"]
except KeyError:
payload.pop("Process", None)
# Network detail
try:
payload['Network'] = self.network_payload()
except KeyError:
pass
return payload
def network_payload(self):
net = {}
net['Direction'] = \
"IN" if self.event.original_event['event']['NetworkAccesses'][0]['ConnectionDirection'] == 0 else 'OUT'
net['Protocol'] = self.event.original_event['event']['NetworkAccesses'][0]['Protocol']
net['SourceIpV4'] = self.event.original_event['event']['NetworkAccesses'][0]['LocalAddress']
net['SourcePort'] = self.event.original_event['event']['NetworkAccesses'][0]['LocalPort']
net['DestinationIpV4'] = self.event.original_event['event']['NetworkAccesses'][0]['RemoteAddress']
net['DestinationPort'] = self.event.original_event['event']['NetworkAccesses'][0]['RemotePort']
return net
class Runtime():
def __init__(self):
log.info("AWS Backend is enabled.")
def is_relevant(self, falcon_event): # pylint: disable=R0201
return falcon_event.cloud_provider == 'AWS_EC2'
def process(self, falcon_event): # pylint: disable=R0201
Submitter(falcon_event).submit()
__all__ = ['Runtime']
| 44.064516 | 130 | 0.578575 | 8,010 | 0.977306 | 0 | 0 | 789 | 0.096266 | 0 | 0 | 2,418 | 0.295022 |
e26c1de61513b9dd04386c2769fcd9c9212d69dd | 19,754 | py | Python | src/plugin.py | saitho/galaxy_blizzard_plugin | 9716c44e9db261d85baddc019d3f9e5eb1f28cf7 | [
"MIT"
] | null | null | null | src/plugin.py | saitho/galaxy_blizzard_plugin | 9716c44e9db261d85baddc019d3f9e5eb1f28cf7 | [
"MIT"
] | null | null | null | src/plugin.py | saitho/galaxy_blizzard_plugin | 9716c44e9db261d85baddc019d3f9e5eb1f28cf7 | [
"MIT"
] | null | null | null | import asyncio
import json
import os
import sys
import multiprocessing
import webbrowser
import requests
import requests.cookies
import logging as log
import subprocess
import time
from galaxy.api.consts import LocalGameState, Platform
from galaxy.api.plugin import Plugin, create_and_run_plugin
from galaxy.api.types import Achievement, Game, LicenseInfo, LocalGame
from galaxy.api.errors import ( AuthenticationRequired,
BackendTimeout, BackendNotAvailable, BackendError, NetworkError, UnknownError, InvalidCredentials
)
from version import __version__ as version
from process import ProcessProvider
from local_client_base import ClientNotInstalledError
from local_client import LocalClient
from backend import BackendClient, AccessTokenExpired
from definitions import Blizzard, DataclassJSONEncoder, License_Map, ClassicGame
from consts import SYSTEM
from consts import Platform as pf
from http_client import AuthenticatedHttpClient
class BNetPlugin(Plugin):
def __init__(self, reader, writer, token):
super().__init__(Platform.Battlenet, version, reader, writer, token)
self.local_client = LocalClient(self._update_statuses)
self.authentication_client = AuthenticatedHttpClient(self)
self.backend_client = BackendClient(self, self.authentication_client)
self.owned_games_cache = []
self.watched_running_games = set()
self.local_games_called = False
async def _notify_about_game_stop(self, game, starting_timeout):
if not self.local_games_called:
return
id_to_watch = game.info.id
if id_to_watch in self.watched_running_games:
log.debug(f'Game {id_to_watch} is already watched. Skipping')
return
try:
self.watched_running_games.add(id_to_watch)
await asyncio.sleep(starting_timeout)
ProcessProvider().update_games_processes([game])
log.info(f'Setuping process watcher for {game._processes}')
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, game.wait_until_game_stops)
finally:
self.update_local_game_status(LocalGame(id_to_watch, LocalGameState.Installed))
self.watched_running_games.remove(id_to_watch)
def _update_statuses(self, refreshed_games, previous_games):
if not self.local_games_called:
return
for blizz_id, refr in refreshed_games.items():
prev = previous_games.get(blizz_id, None)
if prev is None:
if refr.playable:
log.debug('Detected playable game')
state = LocalGameState.Installed
else:
log.debug('Detected installation begin')
state = LocalGameState.None_
elif refr.playable and not prev.playable:
log.debug('Detected playable game')
state = LocalGameState.Installed
elif refr.last_played != prev.last_played:
log.debug('Detected launched game')
state = LocalGameState.Installed | LocalGameState.Running
asyncio.create_task(self._notify_about_game_stop(refr, 5))
else:
continue
log.info(f'Changing game {blizz_id} state to {state}')
self.update_local_game_status(LocalGame(blizz_id, state))
for blizz_id, prev in previous_games.items():
refr = refreshed_games.get(blizz_id, None)
if refr is None:
log.debug('Detected uninstalled game')
state = LocalGameState.None_
self.update_local_game_status(LocalGame(blizz_id, state))
def log_out(self):
if self.backend_client:
asyncio.create_task(self.authentication_client.shutdown())
self.authentication_client.user_details = None
self.owned_games_cache = []
async def open_battlenet_browser(self):
url = self.authentication_client.blizzard_battlenet_download_url
log.info(f'Opening battle.net website: {url}')
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, lambda x: webbrowser.open(x, autoraise=True), url)
async def install_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game and os.access(installed_game.install_path, os.F_OK):
log.warning("Received install command on an already installed game")
return await self.launch_game(game_id)
if game_id in Blizzard.legacy_game_ids:
if SYSTEM == pf.WINDOWS:
platform = 'windows'
elif SYSTEM == pf.MACOS:
platform = 'macos'
webbrowser.open(f"https://www.blizzard.com/download/confirmation?platform={platform}&locale=enUS&version=LIVE&id={game_id}")
return
try:
self.local_client.refresh()
log.info(f'Installing game of id {game_id}')
self.local_client.install_game(game_id)
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except Exception as e:
log.exception(f"Installing game {game_id} failed: {e}")
def _open_battlenet_at_id(self, game_id):
try:
self.local_client.refresh()
self.local_client.open_battlenet(game_id)
except Exception as e:
log.exception(f"Opening battlenet client on specific game_id {game_id} failed {e}")
try:
self.local_client.open_battlenet()
except Exception as e:
log.exception(f"Opening battlenet client failed {e}")
async def uninstall_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
if game_id == 'wow_classic':
# attempting to uninstall classic wow through protocol gives you a message that the game cannot
# be uninstalled through protocol and you should use battle.net
return self._open_battlenet_at_id(game_id)
if SYSTEM == pf.MACOS:
self._open_battlenet_at_id(game_id)
else:
try:
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game is None or not os.access(installed_game.install_path, os.F_OK):
log.error(f'Cannot uninstall {Blizzard[game_id].uid}')
self.update_local_game_status(LocalGame(game_id, LocalGameState.None_))
return
if not isinstance(installed_game.info, ClassicGame):
if self.local_client.uninstaller is None:
raise FileNotFoundError('Uninstaller not found')
uninstall_tag = installed_game.uninstall_tag
client_lang = self.local_client.config_parser.locale_language
self.local_client.uninstaller.uninstall_game(installed_game, uninstall_tag, client_lang)
except Exception as e:
log.exception(f'Uninstalling game {game_id} failed: {e}')
async def launch_game(self, game_id):
if not self.local_games_called:
await self.get_local_games()
try:
if self.local_client.get_installed_games() is None:
log.error(f'Launching game that is not installed: {game_id}')
return await self.install_game(game_id)
game = self.local_client.get_installed_games().get(game_id, None)
if game is None:
log.error(f'Launching game that is not installed: {game_id}')
return await self.install_game(game_id)
if isinstance(game.info, ClassicGame):
log.info(f'Launching game of id: {game_id}, {game} at path {os.path.join(game.install_path, game.info.exe)}')
if SYSTEM == pf.WINDOWS:
subprocess.Popen(os.path.join(game.install_path, game.info.exe))
elif SYSTEM == pf.MACOS:
if not game.info.bundle_id:
log.warning(f"{game.name} has no bundle id, help by providing us bundle id of this game")
subprocess.Popen(['open', '-b', game.info.bundle_id])
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
asyncio.create_task(self._notify_about_game_stop(game, 6))
return
self.local_client.refresh()
log.info(f'Launching game of id: {game_id}, {game}')
await self.local_client.launch_game(game, wait_sec=60)
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
self.local_client.close_window()
asyncio.create_task(self._notify_about_game_stop(game, 3))
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except TimeoutError as e:
log.warning(str(e))
except Exception as e:
log.exception(f"Launching game {game_id} failed: {e}")
async def authenticate(self, stored_credentials=None):
try:
if stored_credentials:
auth_data = self.authentication_client.process_stored_credentials(stored_credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
if self.authentication_client.validate_auth_status(auth_status):
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_user_details()
else:
return self.authentication_client.authenticate_using_login()
except Exception as e:
raise e
async def pass_login_credentials(self, step, credentials, cookies):
if "logout&app=oauth" in credentials['end_uri']:
# 2fa expired, repeat authentication
return self.authentication_client.authenticate_using_login()
if self.authentication_client.attempted_to_set_battle_tag:
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_auth_after_setting_battletag()
cookie_jar = self.authentication_client.parse_cookies(cookies)
auth_data = await self.authentication_client.get_auth_data_login(cookie_jar, credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
if not ("authorities" in auth_status and "IS_AUTHENTICATED_FULLY" in auth_status["authorities"]):
raise InvalidCredentials()
self.authentication_client.user_details = await self.backend_client.get_user_info()
self.authentication_client.set_credentials()
return self.authentication_client.parse_battletag()
async def get_owned_games(self):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
def _parse_classic_games(classic_games):
for classic_game in classic_games["classicGames"]:
log.info(f"looking for {classic_game} in classic games")
try:
blizzard_game = Blizzard[classic_game["localizedGameName"].replace(u'\xa0', ' ')]
log.info(f"match! {blizzard_game}")
classic_game["titleId"] = blizzard_game.uid
classic_game["gameAccountStatus"] = "Good"
except KeyError:
continue
return classic_games
def _get_not_added_free_games(owned_games):
owned_games_ids = []
for game in owned_games:
if "titleId" in game:
owned_games_ids.append(str(game["titleId"]))
return [{"titleId": game.blizzard_id,
"localizedGameName": game.name,
"gameAccountStatus": "Free"}
for game in Blizzard.free_games if game.blizzard_id not in owned_games_ids]
try:
games = await self.backend_client.get_owned_games()
classic_games = _parse_classic_games(await self.backend_client.get_owned_classic_games())
owned_games = games["gameAccounts"] + classic_games["classicGames"]
# Add wow classic if retail wow is present in owned games
for owned_game in owned_games.copy():
if 'titleId' in owned_game:
if owned_game['titleId'] == 5730135:
owned_games.append({'titleId': 'wow_classic',
'localizedGameName': 'World of Warcraft Classic',
'gameAccountStatus': owned_game['gameAccountStatus']})
free_games_to_add = _get_not_added_free_games(owned_games)
owned_games += free_games_to_add
self.owned_games_cache = owned_games
return [
Game(
str(game["titleId"]),
game["localizedGameName"],
[],
LicenseInfo(License_Map[game["gameAccountStatus"]]),
)
for game in self.owned_games_cache if "titleId" in game
]
except Exception as e:
log.exception(f"failed to get owned games: {repr(e)}")
raise
async def get_local_games(self):
timeout = time.time() + 2
try:
translated_installed_games = []
while not self.local_client.games_finished_parsing():
await asyncio.sleep(0.1)
if time.time() >= timeout:
break
running_games = self.local_client.get_running_games()
installed_games = self.local_client.get_installed_games()
log.info(f"Installed games {installed_games.items()}")
log.info(f"Running games {running_games}")
for id_, game in installed_games.items():
if game.playable:
state = LocalGameState.Installed
if id_ in running_games:
state |= LocalGameState.Running
else:
state = LocalGameState.None_
translated_installed_games.append(LocalGame(id_, state))
self.local_client.installed_games_cache = installed_games
return translated_installed_games
except Exception as e:
log.exception(f"failed to get local games: {str(e)}")
raise
finally:
self.local_games_called = True
async def _get_wow_achievements(self):
achievements = []
try:
characters_data = await self.backend_client.get_wow_character_data()
characters_data = characters_data["characters"]
wow_character_data = await asyncio.gather(
*[
self.backend_client.get_wow_character_achievements(character["realm"], character["name"])
for character in characters_data
],
return_exceptions=True,
)
for data in wow_character_data:
if isinstance(data, requests.Timeout) or isinstance(data, requests.ConnectionError):
raise data
wow_achievement_data = [
list(
zip(
data["achievements"]["achievementsCompleted"],
data["achievements"]["achievementsCompletedTimestamp"],
)
)
for data in wow_character_data
if type(data) is dict
]
already_in = set()
for char_ach in wow_achievement_data:
for ach in char_ach:
if ach[0] not in already_in:
achievements.append(Achievement(achievement_id=ach[0], unlock_time=int(ach[1] / 1000)))
already_in.add(ach[0])
except (AccessTokenExpired, BackendError) as e:
log.exception(str(e))
with open('wow.json', 'w') as f:
f.write(json.dumps(achievements, cls=DataclassJSONEncoder))
return achievements
async def _get_sc2_achievements(self):
account_data = await self.backend_client.get_sc2_player_data(self.authentication_client.user_details["id"])
# TODO what if more sc2 accounts?
assert len(account_data) == 1
account_data = account_data[0]
profile_data = await self.backend_client.get_sc2_profile_data(
account_data["regionId"], account_data["realmId"],
account_data["profileId"]
)
sc2_achievement_data = [
Achievement(achievement_id=achievement["achievementId"], unlock_time=achievement["completionDate"])
for achievement in profile_data["earnedAchievements"]
if achievement["isComplete"]
]
with open('sc2.json', 'w') as f:
f.write(json.dumps(sc2_achievement_data, cls=DataclassJSONEncoder))
return sc2_achievement_data
# async def get_unlocked_achievements(self, game_id):
# if not self.website_client.is_authenticated():
# raise AuthenticationRequired()
# try:
# if game_id == "21298":
# return await self._get_sc2_achievements()
# elif game_id == "5730135":
# return await self._get_wow_achievements()
# else:
# return []
# except requests.Timeout:
# raise BackendTimeout()
# except requests.ConnectionError:
# raise NetworkError()
# except Exception as e:
# log.exception(str(e))
# return []
async def launch_platform_client(self):
if self.local_client.is_running():
log.info("Launch platform client called but client is already running")
return
self.local_client.open_battlenet()
await self.local_client.prevent_battlenet_from_showing()
async def shutdown_platform_client(self):
await self.local_client.shutdown_platform_client()
async def shutdown(self):
log.info("Plugin shutdown.")
await self.authentication_client.shutdown()
def main():
multiprocessing.freeze_support()
create_and_run_plugin(BNetPlugin, sys.argv)
if __name__ == "__main__":
main()
| 42.665227 | 136 | 0.621849 | 18,669 | 0.945074 | 0 | 0 | 0 | 0 | 15,327 | 0.775893 | 2,992 | 0.151463 |
e26f9851f519998d86c2d83b72b1bb841fd008d3 | 649 | py | Python | buyer/tests/test_serializers.py | uktrade/directory-api | 45a9024a7ecc2842895201cbb51420ba9e57a168 | [
"MIT"
] | 2 | 2017-06-02T09:09:08.000Z | 2021-01-18T10:26:53.000Z | buyer/tests/test_serializers.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | 629 | 2016-10-10T09:35:52.000Z | 2022-03-25T15:04:04.000Z | buyer/tests/test_serializers.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | 5 | 2017-06-22T10:02:22.000Z | 2022-03-14T17:55:21.000Z | import pytest
from buyer import serializers
@pytest.mark.django_db
def test_buyer_deserialization():
data = {
'email': 'jim@example.com',
'name': 'Jim Exampleson',
'sector': 'AEROSPACE',
'company_name': 'Example corp',
'country': 'China',
}
serializer = serializers.BuyerSerializer(data=data)
assert serializer.is_valid()
instance = serializer.save()
assert instance.email == data['email']
assert instance.name == data['name']
assert instance.sector == data['sector']
assert instance.company_name == data['company_name']
assert instance.country == data['country']
| 25.96 | 56 | 0.651772 | 0 | 0 | 0 | 0 | 601 | 0.92604 | 0 | 0 | 153 | 0.235747 |
e27110053843a24a09a5e561022d265c1c30eb63 | 637 | py | Python | moai/utils/arguments/__init__.py | tzole1155/moai | d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180 | [
"Apache-2.0"
] | null | null | null | moai/utils/arguments/__init__.py | tzole1155/moai | d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180 | [
"Apache-2.0"
] | null | null | null | moai/utils/arguments/__init__.py | tzole1155/moai | d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180 | [
"Apache-2.0"
] | null | null | null | from moai.utils.arguments.common import (
assert_numeric,
assert_non_negative,
assert_negative,
)
from moai.utils.arguments.choices import (
assert_choices,
ensure_choices,
)
from moai.utils.arguments.list import (
ensure_numeric_list,
ensure_string_list,
assert_sequence_size,
)
from moai.utils.arguments.path import (
assert_path,
ensure_path,
)
__all__ = [
"assert_numeric",
"ensure_numeric_list",
"ensure_string_list",
"assert_choices",
"ensure_choices",
"assert_sequence_size",
"assert_non_negative",
"assert_negative",
"assert_path",
"ensure_path",
] | 20.548387 | 42 | 0.706436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.274725 |
e272629bf1764e47dd0429ebae5dabad29cdb7f2 | 1,258 | py | Python | tools/harness/tests/aos_tests.py | Frankie8472/advanced-operating-systems | bff03e668c76886781e5fdb24dfab5880f79941d | [
"MIT"
] | 5 | 2020-06-12T11:47:21.000Z | 2022-02-27T14:39:05.000Z | tools/harness/tests/aos_tests.py | Frankie8472/advanced-operating-systems | bff03e668c76886781e5fdb24dfab5880f79941d | [
"MIT"
] | 3 | 2020-06-04T20:11:26.000Z | 2020-07-26T23:16:33.000Z | tools/harness/tests/aos_tests.py | Frankie8472/advanced-operating-systems | bff03e668c76886781e5fdb24dfab5880f79941d | [
"MIT"
] | 3 | 2020-06-12T18:06:29.000Z | 2022-03-13T17:19:02.000Z | ##########################################################################
# Copyright (c) 2009, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import re, tests, barrelfish
from common import TestCommon
from results import PassFailResult
@tests.add_test
class AosTest(TestCommon):
'''Base class for AOS tests'''
name = "aos_test"
def get_modules(self, build, machine):
m = barrelfish.BootModules(self, prefix="armv8/sbin/")
m.set_boot_driver("boot_armv8_generic")
m.set_cpu_driver("cpu_imx8x")
m.add_module("init", ["g:ira0=4096"])
return m
def get_finish_string(self):
return "<grading> TEST"
def process_data(self, testdir, rawiter):
# the test passed iff the last line is the finish string
lastline = ''
for line in rawiter:
if re.match("<grading>\s*TEST\s*ira\s*PASSED", line):
return PassFailResult(True)
return PassFailResult(False)
| 34.944444 | 80 | 0.59062 | 715 | 0.568362 | 0 | 0 | 731 | 0.581081 | 0 | 0 | 632 | 0.502385 |
e2727885961948aa3428682ba096be8e0ec9a20d | 2,243 | py | Python | examples/one_d_test.py | jamesvuc/autograd-mcmc | 0650fb32b6bc0205b2feb09ff7957eca8b696a5b | [
"MIT"
] | null | null | null | examples/one_d_test.py | jamesvuc/autograd-mcmc | 0650fb32b6bc0205b2feb09ff7957eca8b696a5b | [
"MIT"
] | null | null | null | examples/one_d_test.py | jamesvuc/autograd-mcmc | 0650fb32b6bc0205b2feb09ff7957eca8b696a5b | [
"MIT"
] | null | null | null | #1d_tests.py
import autograd.numpy as np
import autograd.scipy.stats.norm as norm
from copy import copy
import datetime as dt
from matplotlib import pyplot as plt
import seaborn as sns
import sys; sys.path.append('..')
from mcmc import langevin, MALA, RK_langevin, RWMH, HMC
def bimodal_logprob(z):
return (np.log(np.sin(z)**2) + np.log(np.sin(2*z)**2) + norm.logpdf(z)).ravel()
def main():
#====== Setup =======
n_iters, n_samples = 2500, 500
init_vals = np.random.randn(n_samples, 1)
allsamps = []
logprob = bimodal_logprob
#====== Tests =======
t = dt.datetime.now()
print('running 1d tests ...')
samps = langevin(logprob, copy(init_vals),
num_iters = n_iters, num_samples = n_samples, step_size = 0.05)
print('done langevin in', dt.datetime.now()-t,'\n')
allsamps.append(samps)
samps = MALA(logprob, copy(init_vals),
num_iters = n_iters, num_samples = n_samples, step_size = 0.05)
print('done MALA in', dt.datetime.now()-t,'\n')
allsamps.append(samps)
samps = RK_langevin(logprob, copy(init_vals),
num_iters = n_iters, num_samples = n_samples, step_size = 0.01)
print('done langevin_RK in', dt.datetime.now()-t,'\n')
allsamps.append(samps)
t = dt.datetime.now()
samps = RWMH(logprob, copy(init_vals),
num_iters = n_iters, num_samples = n_samples, sigma = 0.5)
print('done RW MH in' , dt.datetime.now()-t,'\n')
allsamps.append(samps)
t = dt.datetime.now()
samps = HMC(logprob, copy(init_vals),
num_iters = n_iters//5, num_samples = n_samples,
step_size = 0.05, num_leap_iters=5)
print('done HMC in', dt.datetime.now()-t,'\n')
allsamps.append(samps)
#====== Plotting =======
lims = [-5,5]
names = ['langevin', 'MALA', 'langevin_RK', 'RW MH', 'HMC']
f, axes = plt.subplots(len(names), sharex=True)
for i, (name, samps) in enumerate(zip(names, allsamps)):
sns.distplot(samps, bins=1000, kde=False, ax=axes[i])
axb = axes[i].twinx()
axb.scatter(samps, np.ones(len(samps)), alpha=0.1, marker='x', color='red')
axb.set_yticks([])
zs = np.linspace(*lims, num=250)
axes[i].twinx().plot(zs, np.exp(bimodal_logprob(zs)), color='orange')
axes[i].set_xlim(*lims)
title = name
axes[i].set_title(title)
plt.show()
if __name__ == '__main__':
main() | 27.024096 | 80 | 0.665181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.121266 |
e27404f1e9416d7b05bddb353f28ac49feb953fb | 195 | py | Python | main.py | NawrasseDahman/Qr-Code-Generator | 0f1bb8b0979f887c980cec3a241457176515b1b9 | [
"MIT"
] | 1 | 2021-12-31T07:12:09.000Z | 2021-12-31T07:12:09.000Z | main.py | NawrasseDahman/Qr-Code-Generator | 0f1bb8b0979f887c980cec3a241457176515b1b9 | [
"MIT"
] | null | null | null | main.py | NawrasseDahman/Qr-Code-Generator | 0f1bb8b0979f887c980cec3a241457176515b1b9 | [
"MIT"
] | null | null | null | import qrcode
# data example
data = "www.google.com"
# file name
file_name = "qrcode.png"
# generate qr code
img = qrcode.make(data=data)
# save generated qr code as img
img.save(file_name)
| 13 | 31 | 0.717949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.523077 |
e2740d3f04d16f6560cb0b5dc5a3ad68e5d09f12 | 7,845 | py | Python | onecodex/models/helpers.py | onecodex/onecodex | c02da5dcaa43497eb5f8bf62bdc9fb6780f06597 | [
"MIT"
] | 19 | 2016-06-09T03:55:15.000Z | 2021-05-13T11:45:45.000Z | onecodex/models/helpers.py | onecodex/onecodex | c02da5dcaa43497eb5f8bf62bdc9fb6780f06597 | [
"MIT"
] | 336 | 2016-02-01T19:10:15.000Z | 2022-03-30T16:14:36.000Z | onecodex/models/helpers.py | onecodex/onecodex | c02da5dcaa43497eb5f8bf62bdc9fb6780f06597 | [
"MIT"
] | 9 | 2017-07-28T21:03:14.000Z | 2021-02-27T16:30:10.000Z | import click
import inspect
import os
import requests
from onecodex.exceptions import OneCodexException, UnboundObject
def as_uri(uuid, base_class):
return base_class._resource._schema._uri.rstrip("#") + "/" + uuid
def coerce_search_value(search_value, field_name, base_class):
from onecodex.models import OneCodexBase # in here to prevent circular import
if field_name == "$uri":
return as_uri(field_name, base_class)
elif isinstance(search_value, OneCodexBase):
return {"$ref": search_value._resource._uri}
return search_value
def check_bind(self_or_cls):
if not hasattr(self_or_cls, "_resource"):
name = "class" if inspect.isclass(self_or_cls) else "instance"
raise UnboundObject("This {} is not associated with an API binding.".format(name))
def generate_potion_sort_clause(sort_items, sort_schema):
if sort_items is None:
return {}
if not isinstance(sort_items, list):
sort_items = [sort_items]
sort_clause = {}
for item in sort_items:
if item.lstrip("^") not in sort_schema:
raise AttributeError("Attribute {} can not be sorted on".format(item.lstrip("^")))
if item.startswith("^"):
sort_clause[item[1:]] = False
else:
sort_clause[item] = True
return sort_clause
def generate_potion_keyword_where(keyword_filters, where_schema, base_class):
where = {}
for keyword in keyword_filters:
search_value = keyword_filters[keyword]
if keyword == "id":
keyword = "$uri"
if keyword not in where_schema:
raise AttributeError(
"{} can not be searched on {}".format(base_class.__name__, keyword)
)
avail_searches = [v["required"] for v in where_schema[keyword]["anyOf"] if "required" in v]
# flatten the list
avail_searches = [item for sublist in avail_searches for item in sublist]
# TODO: do schema type checking here too?
if "$eq" not in avail_searches and "$containsall" in avail_searches:
if not isinstance(search_value, list):
search_value = [search_value]
where[keyword] = {
"$containsall": [coerce_search_value(v, keyword, base_class) for v in search_value]
}
elif isinstance(search_value, list):
where[keyword] = {
"$in": [coerce_search_value(v, keyword, base_class) for v in search_value]
}
else:
where[keyword] = coerce_search_value(search_value, keyword, base_class)
return where
def truncate_string(s, length=24):
if len(s) < length - 3:
return s
else:
s = s[0 : (length - 3)]
if s[-1] == ".":
s = s + ".."
else:
s = s + "..."
return s
class ResourceDownloadMixin(object):
def download(self, path=None, file_obj=None, progressbar=False):
"""Download files from One Codex.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, defaults to the original filename
in the current working directory.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
If no arguments specified, defaults to download the file as the original filename
in the current working directory. If `file_obj` given, will write data into the
passed file-like object. If `path` given, will download the file to the path provided,
but will not overwrite any existing files.
"""
return self._download(
"download_uri",
self.filename,
use_potion_session=False,
path=path,
file_obj=file_obj,
progressbar=progressbar,
)
def _download(
self,
_resource_method,
_filename=None,
use_potion_session=False,
path=None,
file_obj=None,
progressbar=False,
):
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
if hasattr(self._resource, "visibility") and self._resource.visibility == "awaiting data":
raise OneCodexException("Sample has not finished processing. Please try again later.")
if path and file_obj:
raise OneCodexException("Please specify only one of: path, file_obj")
try:
method_to_call = getattr(self._resource, _resource_method)
download_link_info = method_to_call()
if path is None and file_obj is None:
if _filename is None:
if "save_as_filename" not in download_link_info:
raise OneCodexException(
"Please specify `path`, `file_obj`, or `_filename`."
)
_filename = download_link_info["save_as_filename"]
path = os.path.join(os.getcwd(), _filename)
if path and os.path.exists(path):
raise OneCodexException("{} already exists! Will not overwrite.".format(path))
if use_potion_session:
session = self._resource._client.session
else:
session = requests.Session()
link = download_link_info["download_uri"]
# Retry up to 5 times with backoff timing of 2s, 4s, 8s, 16s, and 32s (applies to all
# HTTP methods). 404 is included for cases where the file is being asynchronously
# uploaded to S3 and is expected to be available soon.
retry_strategy = Retry(
total=5,
backoff_factor=2,
status_forcelist=[404, 429, 500, 502, 503, 504],
method_whitelist=False,
)
adapter = HTTPAdapter(max_retries=retry_strategy)
session.mount("http://", adapter)
session.mount("https://", adapter)
resp = session.get(link, stream=True)
with (open(path, "wb") if path else file_obj) as f_out:
if progressbar:
progress_label = os.path.basename(path) if path else self.filename
with click.progressbar(length=self.size, label=progress_label) as bar:
for data in resp.iter_content(chunk_size=1024):
bar.update(len(data))
f_out.write(data)
else:
for data in resp.iter_content(chunk_size=1024):
f_out.write(data)
except KeyboardInterrupt:
if path:
os.remove(path)
raise
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 401:
raise OneCodexException("You must be logged in to download files.")
elif exc.response.status_code == 402:
raise OneCodexException(
"You must either have a premium platform account or be in "
"a notebook environment to download files."
)
elif exc.response.status_code == 403:
raise OneCodexException("You are not authorized to download this file.")
else:
raise OneCodexException(
"Download failed with an HTTP status code {}.".format(exc.response.status_code)
)
return path
| 36.830986 | 99 | 0.589038 | 4,991 | 0.636201 | 0 | 0 | 0 | 0 | 0 | 0 | 2,079 | 0.26501 |
e275623fdee4109dc56435c825bd183c1bd1b9da | 932 | py | Python | aoc-2021/day-11/day_11.py | bsamseth/advent-of-code-2018 | bdd3969e61fac3f1543c20983260aa9fda912e99 | [
"MIT"
] | 1 | 2021-01-03T17:09:13.000Z | 2021-01-03T17:09:13.000Z | aoc-2021/day-11/day_11.py | bsamseth/advent-of-code-2018 | bdd3969e61fac3f1543c20983260aa9fda912e99 | [
"MIT"
] | 3 | 2021-06-01T23:09:26.000Z | 2022-03-12T01:08:40.000Z | aoc-2021/day-11/day_11.py | bsamseth/advent-of-code | 65d40548057a86b6fda37aec8e7d5d473d627124 | [
"MIT"
] | null | null | null | from itertools import count, product
import numpy as np
grid = np.genfromtxt("input.txt", delimiter=1, dtype=int)
def propagate_flash(grid, i, j, flash_mask):
flash_mask[i, j] = 1
for di, dj in product(range(-1, 2), range(-1, 2)):
if 0 <= i + di < grid.shape[0] and 0 <= j + dj < grid.shape[1]:
if flash_mask[i + di, j + dj] == 0:
grid[i + di, j + dj] += 1
if grid[i + di, j + dj] > 9:
propagate_flash(grid, i + di, j + dj, flash_mask)
flash_count = 0
for step in count(1):
flash_mask = grid != grid
grid += 1
for i, j in np.ndindex(*grid.shape):
if not flash_mask[i, j] and grid[i, j] > 9:
propagate_flash(grid, i, j, flash_mask)
grid[flash_mask] = 0
if step <= 100:
flash_count += np.sum(flash_mask)
elif flash_mask.all():
break
print("Part 1:", flash_count)
print("Part 2:", step)
| 27.411765 | 71 | 0.553648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.031116 |
e27585ce7f82ce155f8b78f9a6d5c4a5e52b8168 | 122 | py | Python | notebooks/_solutions/05-spatial-operations-overlays11.py | jorisvandenbossche/DS-python-geospatial | 893a12edc5c203a75815f6dcb5f1e18c577c8cd5 | [
"BSD-3-Clause"
] | 58 | 2020-10-09T10:10:59.000Z | 2022-03-07T14:58:07.000Z | notebooks/_solutions/05-spatial-operations-overlays11.py | amitkb3/DS-python-geospatial | 5f156ebff67e06d59b2a7ef446d1fed746ce0650 | [
"BSD-3-Clause"
] | 24 | 2020-09-30T19:57:14.000Z | 2021-10-05T07:21:09.000Z | notebooks/_solutions/05-spatial-operations-overlays11.py | amitkb3/DS-python-geospatial | 5f156ebff67e06d59b2a7ef446d1fed746ce0650 | [
"BSD-3-Clause"
] | 19 | 2020-10-05T09:32:18.000Z | 2022-03-20T00:09:14.000Z | # Calculate the intersection of the land use polygons with Muette
land_use_muette = land_use.geometry.intersection(muette) | 61 | 65 | 0.844262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.532787 |
e275d33de5c822230fb24286db48967204245526 | 7,754 | py | Python | lib/cbutils/misc.py | civicboom/civicboom | 239cb9c2f1e8afa42ea2736c171e6043b04a7ff0 | [
"MIT"
] | null | null | null | lib/cbutils/misc.py | civicboom/civicboom | 239cb9c2f1e8afa42ea2736c171e6043b04a7ff0 | [
"MIT"
] | null | null | null | lib/cbutils/misc.py | civicboom/civicboom | 239cb9c2f1e8afa42ea2736c171e6043b04a7ff0 | [
"MIT"
] | null | null | null | """
Low level miscilanious calls
"""
import UserDict
import types
import random
import datetime
import pprint
import re
import unicodedata
import logging
log = logging.getLogger(__name__)
now_override = None
def now():
"""
A passthough to get now()
We can override this so that automated tests can fake the current datetime
"""
if now_override:
return now_override
return datetime.datetime.now()
def set_now(new_now_override=None):
global now_override
now_override = None
if isinstance(new_now_override, datetime.datetime):
now_override = new_now_override
def timedelta_from_str(string_args):
"""
Convert a string containing comma separted timedelta kwargs into a timedeta object
>>> timedelta_from_str( "hours=10" ) == datetime.timedelta( hours=10)
True
>>> timedelta_from_str("days = 10, hours = 10" ) == datetime.timedelta(days=10, hours=10)
True
>>> timedelta_from_str(datetime.timedelta(minutes=1)) == datetime.timedelta(minutes=1 )
True
"""
if isinstance(string_args, basestring):
d = datetime.timedelta(**dict([(kwarg.split('=')[0].strip(), int(kwarg.split('=')[1].strip())) for kwarg in string_args.split(',')]))
if isinstance(d, datetime.timedelta) and d.total_seconds()>0:
return d
else:
return None
return string_args
def timedelta_to_str(t):
"""
Convert a timedelta object to a string representation
>>> timedelta_to_str(datetime.timedelta( hours=10))
'hours=10'
>>> timedelta_to_str(datetime.timedelta(days=5, hours=10)) in ['days=5,hours=10', 'hours=10,days=5']
True
>>> timedelta_to_str(datetime.timedelta(minutes=1 ))
'minutes=1'
"""
t = t.total_seconds()
d = dict()
for key, div in [('milliseconds',1),('seconds',60),('minutes',60),('hours',24),('days',7),('weeks',None)]:
if div:
val = t % div
else:
val = t
if val:
d[key] = val
if div:
t = int((t-val)/div)
else:
t = 0
return ",".join('='.join((key, str(value))) for key, value in d.iteritems())
def random_string(length=8):
"""
Generate a random string of a-z A-Z 0-9
(Without vowels to stop bad words from being generated!)
>>> len(random_string())
8
>>> len(random_string(10))
10
If random, it should compress pretty badly:
>>> import zlib
>>> len(zlib.compress(random_string(100))) > 50
True
"""
random_symbols = '1234567890bcdfghjklmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ'
r = ''
for i in range(length):
r += random_symbols[random.randint(0,len(random_symbols)-1)]
return r
def str_to_int(text, default=0):
"""
>>> str_to_int("3")
3
>>> str_to_int("moo")
0
>>> str_to_int(None)
0
>>> str_to_int(str_to_int)
0
>>> str_to_int("cake", default=6)
6
"""
try:
return int(text)
except (ValueError, TypeError):
return default
def calculate_age(born):
"""
Calculate the age of a user.
http://www.fabent.co.uk/blog/2007/08/04/calculating-age-in-python/
>>> today = datetime.date.today()
>>> ten_ago = datetime.date(today.year-10, today.month, today.day)
>>> calculate_age(ten_ago)
10
>>> calculate_age(ten_ago - datetime.timedelta(days=3))
10
>>> calculate_age(ten_ago + datetime.timedelta(days=3))
9
>>> calculate_age(datetime.date.today())
0
>>> born_odd = datetime.date(2000, 2, 29)
>>> calculate_age(born_odd) > 0
True
"""
today = datetime.date.today()
try:
birthday = datetime.date(today.year, born.month, born.day )
except ValueError:
birthday = datetime.date(today.year, born.month, born.day - 1) # Raised when person was born on 29 February and the current year is not a leap year.
if birthday > today:
return today.year - born.year - 1
else:
return today.year - born.year
def update_dict(dict_a, dict_b):
"""
Because dict.update(d) does not return the new dict
>>> a = {'a': 1, 'b': 2}
>>> update_dict(a, {'b': 3, 'c': 3})
{'a': 1, 'c': 3, 'b': 3}
"""
dict_a.update(dict_b)
return dict_a
def obj_to_dict(obj, dict_fields):
"""
Used to convert a python object to a python dict of strings, but only including requested fields
dict_fields is a dictionary of functions
if a key is set will a null function - it will check if it is a primitive type
if a key is set with a function - that function is used to convert the object to a primitive type
TODO: currenly does not follow lists or dict, just string dumps .. could be useful in future to recusivly call obj_to_dict
>>> class a:
... foo = "bar"
... def __unicode__(self):
... raise Exception('asdf')
...
>>> b = a()
>>> b.c = a()
>>> obj_to_dict(b, {'foo': None})
{'foo': u'bar'}
>>> obj_to_dict(b, {'c': None})
Traceback (most recent call last):
...
Exception: Object types are not allowed in object dictionaries [c]
"""
d = {}
for field_name in dict_fields:
field_processor = dict_fields[field_name]
field_value = None
if field_processor == None:
field_value = getattr(obj,field_name,'')
elif type(field_processor)==types.FunctionType:
field_value = field_processor(obj)
if field_value:
field_value_type = type(field_value)
if hasattr(field_value,'keys') or hasattr(field_value, '__iter__'):
pass
elif field_value_type in [types.IntType, types.FloatType, types.BooleanType]:
pass
elif field_value_type == datetime.datetime:
field_value = field_value.strftime("%Y-%m-%d %H:%M:%S")
else:
try:
field_value = unicode(field_value)
except:
raise Exception('Object types are not allowed in object dictionaries [%s]' % (field_name, ))
d[field_name] = field_value
return d
def args_to_tuple(*args, **kwargs):
"""
>>> args_to_tuple()
((), {})
>>> args_to_tuple("hello?")
(('hello?',), {})
>>> args_to_tuple("hello", name="dave")
(('hello',), {'name': 'dave'})
"""
return (args, kwargs)
def make_username(title):
"""
turn a display name into a username
>>> make_username("Bob's Cake Factory")
'bob-s-cake-factory'
"""
# GregM: Normalise unicode chars to ascii equivalents first before performing replace
# Stops Si(a with a hat)n becoming si-n
title = unicodedata.normalize("NFKD", unicode(title)).encode("ascii", "ignore")
return re.sub("[^\w-]", "-", title.lower()).strip("-")
def debug_type(var):
return "%s:%s" % (type(var), repr(var))
def substring_in(substrings, string_list):
"""
Find a substrings in a list of string_list
Think of it as
is 'bc' in ['abc', 'def']
>>> substring_in( 'bc' , ['abc','def','ghi'])
True
>>> substring_in( 'jkl' , ['abc','def','ghi'])
False
>>> substring_in(['zx','hi'], ['abc','def','ghi'])
True
>>> substring_in(['zx','yw'], ['abc','def','ghi'])
False
"""
if isinstance(substrings, basestring):
substrings = [substrings]
if not isinstance(string_list, list) or not isinstance(substrings, list):
raise TypeError('params mustbe lists')
for s in string_list:
for ss in substrings:
if ss in s:
return True
return False | 29.260377 | 156 | 0.593887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,164 | 0.537013 |
e2770fdbc3011a18fb2c25303fdd40f81c8ec0d1 | 6,421 | py | Python | c_comp/nodes.py | Commodoreprime/Command-Block-Assembly | b54c2afee3ea7bdfddfe619b9b207ce30d160e45 | [
"MIT"
] | 223 | 2017-05-10T18:27:44.000Z | 2022-03-06T22:44:18.000Z | c_comp/nodes.py | Commodoreprime/Command-Block-Assembly | b54c2afee3ea7bdfddfe619b9b207ce30d160e45 | [
"MIT"
] | 25 | 2017-12-07T15:37:37.000Z | 2021-02-05T14:28:59.000Z | c_comp/nodes.py | Commodoreprime/Command-Block-Assembly | b54c2afee3ea7bdfddfe619b9b207ce30d160e45 | [
"MIT"
] | 30 | 2017-12-07T15:16:36.000Z | 2022-03-16T03:29:59.000Z |
class Node:
props = ()
def __init__(self, **kwargs):
for prop in kwargs:
if prop not in self.props:
raise Exception('Invalid property %r, allowed only: %s' %
(prop, self.props))
self.__dict__[prop] = kwargs[prop]
for prop in self.props:
if prop not in self.__dict__:
self.__dict__[prop] = None
self.attrs = {}
def print_node(self, indent=0, indent_size=4,extra=0):
s = self.__class__.__name__
s += '(\n'
i = ' ' * (indent+indent_size)
for prop in self.props:
s += i + prop + ' = '
s += self._print_val(self.__dict__[prop], indent+indent_size, indent_size,
(len(prop) + 3) - indent_size)
s += '\n'
s += (' ' * (indent + extra)) + ')'
return s
def _print_val(self, val, indent, indent_size,extra=0):
if isinstance(val, Node):
return val.print_node(indent+indent_size,indent_size,extra)
elif type(val) == list:
s = '[\n'
i = ' ' * (indent+indent_size)
for e in val:
s += i + self._print_val(e, indent, indent_size)
s += ',\n'
s += (' ' * (indent+extra)) + ']'
return s
else:
return str(val)
class Statement(Node): pass
class Expression(Node): pass
class EmptyStatement(Statement): pass
EmptyStatement.INSTANCE = EmptyStatement()
class FunctionDeclaration(Statement): props = ('type', 'decl', 'body')
class Declaration(Statement): props = ('type', 'init')
class ParamDeclaration(Node): props = ('type', 'decl')
class StructTypeRef(Node): props = ('name',)
class DeclarationSpecifier(Node): props = ('store', 'qual', 'type')
class InitSpec(Node): props = ('decl', 'val')
class DeclaratorSpec(Node): props = ('pointer_depth', 'name_spec')
class ArrayDeclSpec(Node): props = ('name', 'dim')
class FuncDeclSpec(Node): props = ('name', 'params')
class VarArgs(Node): pass
VarArgs.INSTANCE = VarArgs()
class StructSpec(Node): props = ('name', 'decl')
class StructMemberDecl(Node): props = ('spec', 'decl')
class MemberReference(Node): props = ('child', 'idx', 'name')
class TypeName(Node): props = ('type', 'spec')
class LabelledStmt(Statement): props = ('label', 'stmt')
class WhileStmt(Statement): props = ('cond', 'body')
class DoWhileStmt(Statement): props = ('body', 'cond')
class ForStmt(Statement): props = ('init', 'cond', 'after', 'body')
class IfStmt(Statement): props = ('cond', 'true', 'false')
class SwitchStmt(Statement): props = ('expr', 'cases')
class ContinueStmt(Statement): pass
ContinueStmt.INSTANCE = ContinueStmt()
class BreakStmt(Statement): pass
BreakStmt.INSTANCE = BreakStmt()
class ReturnStmt(Statement): props = ('expr',)
class GotoStmt(Statement): props = ('label',)
class CaseStmt(Statement): props = ('choice', 'body')
class SyncStmt(Statement): pass
class ExpressionStmt(Statement): props = ('expr',)
class SizeofExpr(Expression): props = ('expr',)
class ConditionalExpr(Expression): props = ('cond', 'true', 'false')
class FunctionCallExpr(Expression): props = ('ref', 'args')
class IdentifierExpr(Expression): props = ('val',)
class AssignmentExpr(Expression): props = ('left', 'right')
class AssignmentOperatorExpr(Expression): props = ('left', 'op', 'right')
class UnaryExpr(Expression): props = ('op', 'expr')
class BinaryOperatorExpr(Expression): props = ('left', 'op', 'right')
class IncrementExpr(Expression): props = ('dir', 'post', 'expr')
class MemberAccessExpr(Expression): props = ('expr', 'prop', 'deref')
class ArraySubscriptExpr(Expression): props = ('expr', 'sub')
class Literal(Expression): props = ('val',)
class IntLiteral(Literal): pass
class StringLiteral(Literal): pass
class Pragma(Node): props = ('val',)
class Token:
class Type:
IDENTIFIER = 'identifier'
OPERATOR = 'operator'
NUMBER = 'number'
STRING = 'string'
def __init__(self, val, type=None):
self.val = val
self.type = type or Token.Type.OPERATOR
def __str__(self):
return 'Token(%r, %s)' % (self.val, self.type)
class Keyword(Token):
REGISTRY = {}
def __init__(self, val):
super().__init__(val, Token.Type.IDENTIFIER)
Keyword.REGISTRY[val] = self
Token.EOF = Token('<eof>')
Token.OPEN_PAREN = Token('(')
Token.CLOSE_PAREN = Token(')')
Token.OPEN_BRACE = Token('{')
Token.CLOSE_BRACE = Token('}')
Token.OPEN_SQUARE = Token('[')
Token.CLOSE_SQUARE = Token(']')
Token.COMMA = Token(',')
Token.SEMICOLON = Token(';')
Token.QUESTION = Token('?')
Token.COLON = Token(':')
Token.DOT = Token('.')
Token.ARROW = Token('->')
Token.VARARG = Token('...')
Token.OP_ASSIGN = Token('=')
Token.OP_MUL_ASSIGN = Token('*=')
Token.OP_DIV_ASSIGN = Token('/=')
Token.OP_MOD_ASSIGN = Token('%=')
Token.OP_PLUS_ASSIGN = Token('+=')
Token.OP_MINUS_ASSIGN = Token('-=')
Token.OP_LSHIFT_ASSIGN = Token('<<=')
Token.OP_RSHIFT_ASSIGN = Token('>>=')
Token.OP_AND_ASSIGN = Token('&=')
Token.OP_XOR_ASSIGN = Token('^=')
Token.OP_OR_ASSIGN = Token('|=')
Token.OP_PLUS = Token('+')
Token.OP_PLUS_PLUS = Token('++')
Token.OP_MINUS = Token('-')
Token.OP_MINUS_MINUS = Token('--')
Token.OP_STAR = Token('*')
Token.OP_DIV = Token('/')
Token.OP_MOD = Token('%')
Token.OP_AND = Token('&')
Token.OP_OR = Token('|')
Token.OP_AND_AND = Token('&&')
Token.OP_OR_OR = Token('||')
Token.OP_XOR = Token('^')
Token.OP_NOT = Token('!')
Token.OP_BITNOT = Token('~')
Token.OP_SHIFT_LEFT = Token('<<')
Token.OP_SHIFT_RIGHT = Token('>>')
Token.OP_EQUAL = Token('==')
Token.OP_NOT_EQUAL = Token('!=')
Token.OP_LESS_THAN = Token('<')
Token.OP_LESS_OR_EQUAL = Token('<=')
Token.OP_GREATER_THAN = Token('>')
Token.OP_GREATER_OR_EQUAL = Token('>=')
Keyword.DO = Keyword('do')
Keyword.WHILE = Keyword('while')
Keyword.FOR = Keyword('for')
Keyword.IF = Keyword('if')
Keyword.ELSE = Keyword('else')
Keyword.SIZEOF = Keyword('sizeof')
Keyword.SYNC = Keyword('sync')
Keyword.SWITCH = Keyword('switch')
Keyword.CASE = Keyword('case')
Keyword.DEFAULT = Keyword('default')
Keyword.GOTO = Keyword('goto')
Keyword.CONTINUE = Keyword('continue')
Keyword.BREAK = Keyword('break')
Keyword.RETURN = Keyword('return')
Keyword.CONST = Keyword('const')
Keyword.STATIC = Keyword('static')
Keyword.TYPEDEF = Keyword('typedef')
Keyword.STRUCT = Keyword('struct')
| 31.47549 | 86 | 0.634636 | 4,163 | 0.648341 | 0 | 0 | 0 | 0 | 0 | 0 | 895 | 0.139386 |
e277978869ba969473b22353a021df73d2ed4b99 | 1,381 | py | Python | backend/ReceiptProcessor/data_generator.py | shrey-bansal/ABINBEV | 09d0eaca6e7edf1820aa79b88a56d1ed39b6300f | [
"Apache-2.0"
] | 1 | 2020-08-17T01:26:27.000Z | 2020-08-17T01:26:27.000Z | backend/ReceiptProcessor/data_generator.py | shrey-bansal/ABINBEV | 09d0eaca6e7edf1820aa79b88a56d1ed39b6300f | [
"Apache-2.0"
] | 1 | 2020-10-20T01:40:24.000Z | 2020-11-05T17:38:53.000Z | backend/ReceiptProcessor/data_generator.py | shrey-bansal/ABINBEV | 09d0eaca6e7edf1820aa79b88a56d1ed39b6300f | [
"Apache-2.0"
] | 2 | 2021-12-14T16:57:58.000Z | 2021-12-23T11:51:10.000Z | import os
import cv2
from ReceiptGenerator.draw_receipt import create_crnn_sample
NUM_OF_TRAINING_IMAGES = 3000
NUM_OF_TEST_IMAGES = 1000
TEXT_TYPES = ['word', 'word_column', 'word_bracket', 'int', 'float', 'price_left', 'price_right', 'percentage']
# TEXT_TYPES = ['word']
with open('./ReceiptProcessor/training_images/Train/sample.txt', 'w') as input_file:
for type in TEXT_TYPES:
if not os.path.exists('./ReceiptProcessor/training_images/Train/{}'.format(type)):
os.mkdir('./ReceiptProcessor/training_images/Train/{}'.format(type))
for i in range(0, NUM_OF_TRAINING_IMAGES):
img, label = create_crnn_sample(type)
cv2.imwrite('./ReceiptProcessor/training_images/Train/{}/{}.jpg'.format(type, i), img)
input_file.write('{}/{}.jpg {}\n'.format(type, i, label))
with open('./ReceiptProcessor/training_images/Test/sample.txt', 'w') as input_file:
for type in TEXT_TYPES:
if not os.path.exists('./ReceiptProcessor/training_images/Test/{}'.format(type)):
os.mkdir('./ReceiptProcessor/training_images/Test/{}'.format(type))
for i in range(0, NUM_OF_TEST_IMAGES):
img, label = create_crnn_sample(type)
cv2.imwrite('./ReceiptProcessor/training_images/Test/{}/{}.jpg'.format(type, i), img)
input_file.write('{}/{}.jpg {}\n'.format(type, i, label))
| 47.62069 | 111 | 0.672701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.383056 |
e278e8c7930da117f96d8cb6f36e05b643813f57 | 1,487 | py | Python | src/web/settings.py | iwwxiong/fastapi-box | 3be11a6c0451467c9cac09b9a9c59ad509bb834e | [
"MIT"
] | 8 | 2021-02-24T11:28:57.000Z | 2022-02-11T08:58:39.000Z | src/web/settings.py | iwwxiong/fastapi-box | 3be11a6c0451467c9cac09b9a9c59ad509bb834e | [
"MIT"
] | null | null | null | src/web/settings.py | iwwxiong/fastapi-box | 3be11a6c0451467c9cac09b9a9c59ad509bb834e | [
"MIT"
] | null | null | null | import os
from pydantic import BaseSettings
class AppSettings(BaseSettings):
debug: bool = False
time_zone: str = "Asia/Shanghai"
logger_level: str = "INFO"
logger_formatter: str = "%(asctime)s [%(name)s] %(funcName)s[line:%(lineno)d] %(levelname)-7s: %(message)s"
secret_key: str = "1@3$5^7*9)"
class DatabaseSettings(BaseSettings):
_db_port = os.getenv("POSTGRESQL_PORT") or "5432"
_db_password = os.getenv("REDIS_PASSWORD") or "password"
fastapi_uri: str = f"postgresql+asyncpg://postgres:{_db_password}@fastapi-postgresql:{_db_port}/fastapi"
class RedisSettings(BaseSettings):
_redis_port = os.getenv("REDIS_PORT") or "6379"
_redis_password = os.getenv("REDIS_PASSWORD") or "password"
fastapi_redis_uri: str = f"redis://:{_redis_password}@fastapi-redis:{_redis_port}/0?encoding=utf-8"
class DataFileSettings(BaseSettings):
basedir: str = "/app/web/data"
runtimedir: str = "/app/runtimedir"
class Settings(AppSettings, DatabaseSettings, RedisSettings, DataFileSettings):
pass
settings = Settings()
env = os.getenv("FASTAPI_ENV")
print(f"FASTAPI_ENV = {env}")
if env == "development":
from settings_dev import settings as dev_settings
for k, v in dev_settings:
if hasattr(settings, k):
setattr(settings, k, v)
elif env == "test":
from settings_test import settings as test_settings
for k, v in test_settings:
if hasattr(settings, k):
setattr(settings, k, v)
| 29.74 | 111 | 0.696032 | 988 | 0.664425 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.305313 |
e279297fcb080b95532ae07e3e2f6c0eedfa2d13 | 4,867 | py | Python | layint_api/models/alert_events.py | LayeredInsight/layint_api_python | a5c9a5b24098bd823c5102b7ab9e4745432f19b4 | [
"Apache-2.0"
] | null | null | null | layint_api/models/alert_events.py | LayeredInsight/layint_api_python | a5c9a5b24098bd823c5102b7ab9e4745432f19b4 | [
"Apache-2.0"
] | null | null | null | layint_api/models/alert_events.py | LayeredInsight/layint_api_python | a5c9a5b24098bd823c5102b7ab9e4745432f19b4 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Layered Insight Assessment, Compliance, Witness & Control
LI Assessment & Compliance performs static vulnerability analysis, license and package compliance. LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.10
Contact: help@layeredinsight.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class AlertEvents(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'float',
'events': 'float',
'event_i_ds': 'list[str]'
}
attribute_map = {
'id': 'ID',
'events': 'Events',
'event_i_ds': 'EventIDs'
}
def __init__(self, id=None, events=None, event_i_ds=None):
"""
AlertEvents - a model defined in Swagger
"""
self._id = None
self._events = None
self._event_i_ds = None
if id is not None:
self.id = id
if events is not None:
self.events = events
if event_i_ds is not None:
self.event_i_ds = event_i_ds
@property
def id(self):
"""
Gets the id of this AlertEvents.
Position of this event (basically an array key)
:return: The id of this AlertEvents.
:rtype: float
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this AlertEvents.
Position of this event (basically an array key)
:param id: The id of this AlertEvents.
:type: float
"""
self._id = id
@property
def events(self):
"""
Gets the events of this AlertEvents.
Number of events in this time period
:return: The events of this AlertEvents.
:rtype: float
"""
return self._events
@events.setter
def events(self, events):
"""
Sets the events of this AlertEvents.
Number of events in this time period
:param events: The events of this AlertEvents.
:type: float
"""
self._events = events
@property
def event_i_ds(self):
"""
Gets the event_i_ds of this AlertEvents.
12 character internal hexadecimal identifiers for Events in this time period
:return: The event_i_ds of this AlertEvents.
:rtype: list[str]
"""
return self._event_i_ds
@event_i_ds.setter
def event_i_ds(self, event_i_ds):
"""
Sets the event_i_ds of this AlertEvents.
12 character internal hexadecimal identifiers for Events in this time period
:param event_i_ds: The event_i_ds of this AlertEvents.
:type: list[str]
"""
self._event_i_ds = event_i_ds
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, AlertEvents):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.741758 | 383 | 0.56729 | 4,189 | 0.860694 | 0 | 0 | 1,643 | 0.33758 | 0 | 0 | 2,678 | 0.550236 |
e27a33ff138d20a46d8f453fc5dd0073093d82d3 | 540 | py | Python | htsworkflow/settings/felcat.py | detrout/htsworkflow | 99d3300e2533d79428ad49aaf10b9429b175da2d | [
"BSD-3-Clause"
] | null | null | null | htsworkflow/settings/felcat.py | detrout/htsworkflow | 99d3300e2533d79428ad49aaf10b9429b175da2d | [
"BSD-3-Clause"
] | 1 | 2018-02-26T18:30:05.000Z | 2018-02-26T18:30:05.000Z | htsworkflow/settings/felcat.py | detrout/htsworkflow | 99d3300e2533d79428ad49aaf10b9429b175da2d | [
"BSD-3-Clause"
] | null | null | null | # configure debugging
import os
from .local import *
DEBUG=True
TEMPLATE_DEBUG = True
INTERNAL_IPS = ('127.0.0.1',)
MIDDLEWARE_CLASSES.extend([
#'debug_toolbar.middleware.DebugToolbarMiddleware',
])
DATABASES = {
'fctracker': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/var/htsworkflow/htsworkflow/fctracker.db',
},
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': 'felcat.caltech.edu',
'USER': 'jumpgate',
'NAME': 'htsworkflow',
}
}
| 19.285714 | 60 | 0.624074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.55 |
e27d1ce66a0fb5eca31113991113207c943becc1 | 4,343 | py | Python | PROJ/LEVY/Barrier_Options/Script_DoubleBarrierOptions.py | mattslezak-shell/PROJ_Option_Pricing_Matlab | 6105bd00ba3471802180c122fdf81e90833a91c4 | [
"MIT"
] | null | null | null | PROJ/LEVY/Barrier_Options/Script_DoubleBarrierOptions.py | mattslezak-shell/PROJ_Option_Pricing_Matlab | 6105bd00ba3471802180c122fdf81e90833a91c4 | [
"MIT"
] | null | null | null | PROJ/LEVY/Barrier_Options/Script_DoubleBarrierOptions.py | mattslezak-shell/PROJ_Option_Pricing_Matlab | 6105bd00ba3471802180c122fdf81e90833a91c4 | [
"MIT"
] | 1 | 2022-01-07T15:31:45.000Z | 2022-01-07T15:31:45.000Z | # Generated with SMOP 0.41-beta
try:
from smop.libsmop import *
except ImportError:
raise ImportError('File compiled with `smop3`, please install `smop3` to run it.') from None
# Script_DoubleBarrierOptions.m
##################################################################
### DOUBLE BARRIER OPTION PRICER
##################################################################
# Descritpion: Script to Price Double Barrier options in Levy Models
# using the PROJ method
# Author: Justin Kirkby
# References: (1) Efficient Option Pricing By Frame Duality with The Fast
# Fourier Transform, SIAM J. Financial Math., 2015
# (2) Robust Barrier Option Pricing by Frame Projection under
# Exponential Levy Dynamics, App. Math. Finance, 2017
##################################################################
folder,name,ext=fileparts(which(mfilename('fullpath')),nargout=3)
# Script_DoubleBarrierOptions.m:13
cd(folder)
addpath('../RN_CHF')
addpath('../Helper_Functions')
##############################################
### Step 1) CHOOSE CONTRACT/GENERAL PARAMETERS
##############################################
S_0=100
# Script_DoubleBarrierOptions.m:21
W=100
# Script_DoubleBarrierOptions.m:22
r=0.05
# Script_DoubleBarrierOptions.m:23
q=0.02
# Script_DoubleBarrierOptions.m:24
T=1
# Script_DoubleBarrierOptions.m:25
call=1
# Script_DoubleBarrierOptions.m:26
L=80
# Script_DoubleBarrierOptions.m:27
U=120
# Script_DoubleBarrierOptions.m:28
M=52
# Script_DoubleBarrierOptions.m:29
##############################################
### Step 2) CHOOSE MODEL PARAMETERS
##############################################
model=1
# Script_DoubleBarrierOptions.m:35
params=cellarray([])
# Script_DoubleBarrierOptions.m:36
if model == 1:
params.sigmaBSM = copy(0.2)
# Script_DoubleBarrierOptions.m:39
else:
if model == 2:
params.C = copy(0.02)
# Script_DoubleBarrierOptions.m:42
params.G = copy(5)
# Script_DoubleBarrierOptions.m:43
params.MM = copy(15)
# Script_DoubleBarrierOptions.m:44
params.Y = copy(1.2)
# Script_DoubleBarrierOptions.m:45
else:
if model == 3:
params.alpha = copy(15)
# Script_DoubleBarrierOptions.m:48
params.beta = copy(- 5)
# Script_DoubleBarrierOptions.m:49
params.delta = copy(0.5)
# Script_DoubleBarrierOptions.m:50
else:
if model == 4:
params.sigma = copy(0.12)
# Script_DoubleBarrierOptions.m:53
params.lam = copy(0.4)
# Script_DoubleBarrierOptions.m:54
params.muj = copy(- 0.12)
# Script_DoubleBarrierOptions.m:55
params.sigmaj = copy(0.18)
# Script_DoubleBarrierOptions.m:56
else:
if model == 5:
params.sigma = copy(0.15)
# Script_DoubleBarrierOptions.m:59
params.lam = copy(3)
# Script_DoubleBarrierOptions.m:60
params.p_up = copy(0.2)
# Script_DoubleBarrierOptions.m:61
params.eta1 = copy(25)
# Script_DoubleBarrierOptions.m:62
params.eta2 = copy(10)
# Script_DoubleBarrierOptions.m:63
##############################################
### Step 3) CHOOSE PROJ PARAMETERS
##############################################
logN=13
# Script_DoubleBarrierOptions.m:71
L1=8
# Script_DoubleBarrierOptions.m:72
##############################################
### PRICE
##############################################
### Note: rnCHF is the risk netural CHF, c1,c2,c4 are the cumulants
modelInput=getModelInput(model,T / M,r,q,params)
# Script_DoubleBarrierOptions.m:78
alpha=getTruncationAlpha(T,L1,modelInput,model)
# Script_DoubleBarrierOptions.m:80
N=2 ** logN
# Script_DoubleBarrierOptions.m:81
tic
price=PROJ_Double_Barrier(N,alpha,call,L,U,S_0,W,M,T,r,modelInput.rnCHF)
# Script_DoubleBarrierOptions.m:84
toc
fprintf('%.8f \n',price) | 32.901515 | 97 | 0.53143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,630 | 0.605572 |
e27f7aa8f3b09c4a3cfeb2e39b74ae35d8c6e4d4 | 870 | py | Python | instances/migrations/0001_initial.py | glzjin/webvirtcloud | ecaf11e02aeb57654257ed502d3da6fd8405f21b | [
"Apache-2.0"
] | 1 | 2020-11-06T00:50:06.000Z | 2020-11-06T00:50:06.000Z | instances/migrations/0001_initial.py | qmutz/webvirtcloud | 159e06221af435700047a8e5ababe758a12d7579 | [
"Apache-2.0"
] | null | null | null | instances/migrations/0001_initial.py | qmutz/webvirtcloud | 159e06221af435700047a8e5ababe758a12d7579 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.10 on 2020-01-28 07:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('computes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Instance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('uuid', models.CharField(max_length=36)),
('is_template', models.BooleanField(default=False)),
('created', models.DateField(auto_now_add=True)),
('compute', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='computes.Compute')),
],
),
]
| 31.071429 | 115 | 0.595402 | 743 | 0.854023 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.173563 |
e28018dbe4a52211a237799c1957a41d91902266 | 1,145 | py | Python | GEOS_Util/coupled_diagnostics/g5lib/plots.py | GEOS-ESM/GMAO_Shared | 022af23abbc7883891006b57379be96d9a50df23 | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-02-01T17:36:53.000Z | 2020-02-01T17:36:53.000Z | GEOS_Util/coupled_diagnostics/g5lib/plots.py | GEOS-ESM/GMAO_Shared | 022af23abbc7883891006b57379be96d9a50df23 | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 105 | 2019-07-08T19:27:23.000Z | 2022-03-22T02:12:16.000Z | GEOS_Util/coupled_diagnostics/g5lib/plots.py | GEOS-ESM/GMAO_Shared | 022af23abbc7883891006b57379be96d9a50df23 | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 10 | 2019-07-05T18:00:44.000Z | 2022-03-11T16:26:29.000Z | '''
Different utils used by plotters.
'''
import mpl_toolkits.basemap as bm
import matplotlib.pyplot as pl
from matplotlib import colors, cm
import scipy as sp
# Make spectral plot
def my_psd(x):
P,f=pl.psd(x); pl.clf()
T=2./f; ind= T>=12.
pl.plot(T[ind]/12,P[ind]);
ax=pl.gca();
ax.set_xscale('log');
#ax.set_xlim(1,20); ax.set_ylim(0,20);
#tick=[1,2,3,5,10,20]; tickl=[str(i) for i in tick]
#ax.set_xticks(tick); ax.set_xticklabels(tickl);
ax.set_xlabel('Period, years'); ax.set_ylabel('Power');
ax.set_title('Power spectral density');
# Contour plot
def contour(x,y,z,func=pl.contourf,black=None,**opts):
'''
Adds a "black" functionality to default contour function
'''
if black!=None:
clevs=opts.get('levels',None)
if clevs != None:
min=clevs[0]; max=clevs[-1]
else:
min=sp.ma.minimum(z); max=sp.ma.maximum(z)
norm=opts.get('norm',colors.normalize(min,max));
cmap=opts.get('cmap',MyCmap(cm.get_cmap(),black=norm(black)))
opts['norm']=norm; opts['cmap']=cmap
cs=func(x,y,z,**opts)
return cs
| 27.926829 | 69 | 0.609607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 368 | 0.321397 |
e28084f908dc1fcc33c7dd3386b97c41eee47a44 | 23,708 | py | Python | lib/TrainingUtils.py | bo9zbo9z/MachineLearning | eae74837e1c98c44b9a6b2c1c16c019dd1fba069 | [
"MIT"
] | null | null | null | lib/TrainingUtils.py | bo9zbo9z/MachineLearning | eae74837e1c98c44b9a6b2c1c16c019dd1fba069 | [
"MIT"
] | null | null | null | lib/TrainingUtils.py | bo9zbo9z/MachineLearning | eae74837e1c98c44b9a6b2c1c16c019dd1fba069 | [
"MIT"
] | 1 | 2020-06-25T01:48:18.000Z | 2020-06-25T01:48:18.000Z | """
These are various mehtods that can be used in the training process. Some
return values, some display images. Best used in Jupyter Notebooks.
"""
from __future__ import division, print_function, absolute_import
# Use one of these based on the version of skimage loaded
from skimage.util import montage
# from skimage.util.montage import montage2d as montage
import os
from random import sample
import numpy as np
import csv
import matplotlib.pyplot as plt
import itertools
from sklearn.metrics import confusion_matrix, classification_report, \
accuracy_score
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.models import Model
from tqdm import tqdm
##############################################################
class GlobalParms(object):
"""
Class that contains global variables. In other notebooks you will see
global vars as upper case variables, dictionaries or classes. I picked
a modified class approach because I felt it gave the best balance between
structure and flexibility.
These are all possible global vars, most notebooks will only use a subset.
"""
def __init__(self, **kwargs):
self.keys_and_defaults = {
"MODEL_NAME": "", # if you leave .h5 off, puts into a subdirectory
"ROOT_PATH": "", # Location of the data for storing any data or files
"TRAIN_DIR": "", # Subdirectory in the Root for Training files
"TEST_DIR": "", # Optional subdirectory in Root for Testing file
"SUBMISSION_PATH": None, # Optional subdirectory for Contest files
"MODEL_PATH": None, # Optional, subdirectory for saving/loading model
"TRAIN_PATH": None, # Subdirectory in the Root for Training files
"TEST_PATH": None, # Optional subdirectory in Root for Testing file
"SMALL_RUN": False, # Optional, run size will be reduced
"NUM_CLASSES": 0, # Number of classes
"CLASS_NAMES": [], # list of class names
"IMAGE_ROWS": 0, # Row size of the image
"IMAGE_COLS": 0, # Col size of the image
"IMAGE_CHANNELS": 0, # Num of Channels, 1 for Greyscale, 3 for color
"BATCH_SIZE": 0, # Number of images in each batch
"EPOCS": 0, # Max number of training EPOCS
"ROW_SCALE_FACTOR": 1, # Optional, allows scaling of an image.
"COL_SCALE_FACTOR": 1, # Optional, allows scaling of an image.
"IMAGE_EXT": ".jpg", # Extent of the image file_ext
# Optional, default is np.float64, reduce memory by using np.float32
# or np.float16
"IMAGE_DTYPE": np.float32,
# Optional, change default if needed, can save memory space
"Y_DTYPE": np.int,
"LOAD_MODEL": False, # Optional, If you want to load a saved model
"SUBMISSION": "submission.csv", # Optional, Mainly used for Kaggle
"METRICS": ['accuracy'], # ['categorical_accuracy'], ['accuracy']
"FINAL_ACTIVATION": 'sigmoid', # sigmoid, softmax
"LOSS": "" # 'binary_crossentropy', 'categorical_crossentropy'
}
self.__dict__.update(self.keys_and_defaults)
self.__dict__.update((k, v) for k, v in kwargs.items()
if k in self.keys_and_defaults)
# Automatically reduce the training parms, change as needed
if self.__dict__["SMALL_RUN"]:
self.__dict__["BATCH_SIZE"] = 1
self.__dict__["EPOCS"] = 2
self.__dict__["ROW_SCALE_FACTOR"] = 1
self.__dict__["COL_SCALE_FACTOR"] = 1
# Use configuration items to create real ones
self.__dict__["SCALED_ROW_DIM"] = \
np.int(self.__dict__["IMAGE_ROWS"] /
self.__dict__["ROW_SCALE_FACTOR"])
self.__dict__["SCALED_COL_DIM"] = \
np.int(self.__dict__["IMAGE_COLS"] /
self.__dict__["COL_SCALE_FACTOR"])
if self.__dict__["TRAIN_PATH"] is None: # Not passed, so set it
self.__dict__["TRAIN_PATH"] = \
os.path.join(self.__dict__["ROOT_PATH"],
self.__dict__["TRAIN_DIR"])
if self.__dict__["TEST_PATH"] is None: # Not passed, so set it
self.__dict__["TEST_PATH"] = \
os.path.join(self.__dict__["ROOT_PATH"],
self.__dict__["TEST_DIR"])
if self.__dict__["SUBMISSION_PATH"] is None: # Not passed, so set
self.__dict__["SUBMISSION_PATH"] = \
os.path.join(self.__dict__["ROOT_PATH"],
self.__dict__["SUBMISSION"])
else:
self.__dict__["SUBMISSION_PATH"] = \
os.path.join(self.__dict__["SUBMISSION_PATH"],
self.__dict__["SUBMISSION"])
if self.__dict__["MODEL_PATH"] is None: # Not passed, so set it
self.__dict__["MODEL_PATH"] = \
os.path.join(self.__dict__["ROOT_PATH"],
self.__dict__["MODEL_NAME"])
else:
self.__dict__["MODEL_PATH"] = \
os.path.join(self.__dict__["MODEL_PATH"],
self.__dict__["MODEL_NAME"])
self.__dict__["IMAGE_DIM"] = \
(self.__dict__["SCALED_ROW_DIM"],
self.__dict__["SCALED_COL_DIM"],
self.__dict__["IMAGE_CHANNELS"])
if self.__dict__["IMAGE_CHANNELS"] == 1:
self.__dict__["COLOR_MODE"] = "grayscale"
else:
self.__dict__["COLOR_MODE"] = "rgb"
def set_train_path(self, train_path):
self.__dict__["TRAIN_PATH"] = train_path
def set_class_names(self, class_name_list):
self.__dict__["CLASS_NAMES"] = class_name_list
if self.__dict__["NUM_CLASSES"] != \
len(self.__dict__["CLASS_NAMES"]):
raise ValueError("ERROR number of classses do not match, Classes: "
+ str(self.__dict__["NUM_CLASSES"])
+ " Class List: "
+ str(self.__dict__["CLASS_NAMES"]))
def print_contents(self):
print(self.__dict__)
def print_key_value(self):
for key, value in self.__dict__.items():
print(key, ":", value)
##############################################################
def load_file_names_Util(file_path,
image_ext,
full_file_path=True):
"""
Returns a list of file names that can be just the file or fully qualified
Args:
file_path : path to the location of the file_exists
image_est : the extension you want to filter on
full_file_path : default True. True is full path, False is file name
Returns:
file_list : list of file names based on full_file_path
"""
file_list = []
file_names = os.listdir(file_path)
for i, fn in enumerate(file_names):
if fn.endswith(image_ext):
if full_file_path:
file_list.append(fn)
else:
head, tail = os.path.split(fn)
file_list.append(tail)
return file_list
##############################################################
def load_file_names_labeled_subdir_Util(file_path,
file_ext,
override_dirs=None,
max_dir_files=1000000):
"""
Get all subdirectories of basepath. Each represents a label.
Args:
file_path : path to the location of the file_exists
image_est : the extension you want to filter on
override_dirs : use only these directories to load files
max_dir_files : maximum number of files to process. This can be
used for testing by setting the value smaller.
Returns:
file_list : list of file names based on full_file_path
directories : list of directoies processed
"""
file_list = []
directories = [d for d in os.listdir(file_path)
if os.path.isdir(os.path.join(file_path, d))]
if len(directories) == 0:
print("Error with path, no subdirectories: ", file_path)
else:
if override_dirs is None:
pass
else:
directories = override_dirs
directories = sorted(directories)
# Loop through the label directories and collect the data in a list
for i, subdir in enumerate(directories):
label_dir = os.path.join(file_path, subdir)
print("loading subdir ", subdir, " ", label_dir)
file_names = [os.path.join(label_dir, f)
for f in os.listdir(label_dir)
if f.endswith(file_ext)]
if len(file_names) > max_dir_files:
file_names_random = sample(file_names, max_dir_files)
print("Reducing files to ", str(max_dir_files), ": ",
subdir, " Actual ", len(file_names))
else:
file_names_random = file_names
print("Adding ", subdir, len(file_names))
for j, file_name in enumerate(file_names_random):
# On a Mac, it sometimes includes ".DS_Store," do not include
if not(".DS_Store" in file_name):
file_list.append(file_name)
return file_list, directories
##############################################################
def string2image(string, shape=(96, 96)):
"""Converts a string of numbers to a numpy array."""
return np.array([int(item) for item in string.split()]).reshape(shape)
##############################################################
def file_exists(file_id):
# print(file_id, os.path.isfile(file_id))
return os.path.isfile(file_id)
##############################################################
def save_list(list_name, file_name):
""" Save a list to a file """
with open(file_name, 'w') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(list_name)
##############################################################
def predictions_using_dataset(model_actual,
dataset,
steps,
batch_size,
create_bad_results_list=False):
"""
Uses generator to predict results. Builds actual_labels, predict_labels
and predict_probabilities
Args:
model_actual : trained model to use for predictions
ds_iter : dataset iterator
steps : number of batches to process
create_bad_results_list : bool default True. Lets you trun on/off
the creation of the bad results lists.
Returns:
actual_labels : list of actual labels
predict_labels : list of predicted labels
predict_probabilities : list of predicted probability array
bad_results : list of bad results [actual_labels, predict_labels,
predict_probabilities, image]
"""
bad_cnt = 0.0
good_cnt = 0.0
total_cnt = 0
actual_labels = []
predict_labels = []
predict_probabilities = []
bad_results = []
for image_batch, label_batch in tqdm(dataset.take(steps)):
for j in range(batch_size):
image = image_batch[j]
label = label_batch[j]
total_cnt += 1
# if a single label, then use it, otherwise find argmax()
if label.shape[0] == 1:
actual_label = label
else:
actual_label = np.argmax(label)
image = np.expand_dims(image, axis=0)
# image = tf.reshape(image, (1, *image.shape))
predict_probabilities_tmp = model_actual.predict(image)[0]
predict_label = np.argmax(predict_probabilities_tmp)
actual_labels.append(actual_label)
predict_labels.append(predict_label)
predict_probabilities.append(predict_probabilities_tmp)
correct_flag = actual_label == predict_label
if correct_flag:
good_cnt = good_cnt + 1
else:
bad_cnt = bad_cnt + 1
if create_bad_results_list:
bad_results.append([[actual_label],
[predict_label],
predict_probabilities_tmp,
np.squeeze(image)])
print(" ")
print("total: ", total_cnt, " Good: ", good_cnt, " Bad: ",
bad_cnt, " percent good: ", str(good_cnt/total_cnt))
return actual_labels, predict_labels, predict_probabilities, \
bad_results
##############################################################
def plot_results(arr1,
arr2,
labels,
x_label,
y_label,
title,
bar_labels,
num_labels):
# Used by display_prediction_results to plot the results after a training
# run
plt.figure(figsize=(18, 4))
index = np.arange(num_labels)
bar_width = 0.35
opacity = 0.8
rects1 = plt.bar(index, arr1, bar_width,
alpha=opacity,
color='g',
label=bar_labels[0])
rects2 = plt.bar(index + bar_width, arr2, bar_width,
alpha=opacity,
color='r',
label=bar_labels[1])
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
if len(labels[0]) == 1:
plt.xticks(index + bar_width, labels, fontsize=12)
else:
plt.xticks(index + bar_width, labels, rotation=90, fontsize=12)
plt.legend()
def display_prediction_results(file_labels,
pred_labels,
pred_probabilities,
num_classes,
class_names_list):
"""
Takes original and predicted results and graphs them. Two graphs areas
shown. One for classes and one for predictions.
Args:
file_labels : original labels
pred_labels : predicted labels
pred_probabilities : predicted precentages
num_classes : number of classes
class_names_list : list of class names
Returns:
nothing
"""
bad_cnt = 0.0
good_cnt = 0.0
total_cnt = len(pred_probabilities)
bad_arr = np.zeros(num_classes) # hold bad counts by label
good_arr = np.zeros(num_classes) # hold good counts by label
bad_pred_arr = np.zeros(11) # hold the segmented count, bad prodictions
good_pred_arr = np.zeros(11) # hold the segmented count, good prodictions
for i, data in enumerate(pred_probabilities):
actual_label = file_labels[i]
predict_label = pred_labels[i]
predict_probabilities = pred_probabilities[i]
pred_seg = int(predict_probabilities[predict_label]*10)
correct_flag = actual_label == predict_label
if correct_flag:
good_pred_arr[pred_seg] = good_pred_arr[pred_seg] + 1
good_arr[predict_label] = good_arr[predict_label] + 1
good_cnt = good_cnt + 1
else:
bad_pred_arr[pred_seg] = bad_pred_arr[pred_seg] + 1
bad_arr[predict_label] = bad_arr[predict_label] + 1
bad_cnt = bad_cnt + 1
print("total: ", total_cnt, " Good: ", good_cnt, " Bad: ", bad_cnt,
" percent good: ", str(good_cnt/total_cnt))
x_label = "Classes"
y_label = "Count"
title = "Prediction Counts by Class"
bar_labels = ["Good", "Bad"]
plot_results(good_arr, bad_arr, class_names_list, x_label, y_label,
title, bar_labels, num_classes)
x_label = "Segments"
y_label = "Count"
title = "Predictions by Segments"
bar_labels = ["Good", "Bad"]
label_names = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
plot_results(good_pred_arr, bad_pred_arr, label_names, x_label, y_label,
title, bar_labels, 11)
##############################################################
def plot_confusion_matrix(cm,
classes,
normalize=False,
cmap=plt.cm.Blues):
"""
Used by show_confusion_matrix.
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix'
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def show_confusion_matrix(labels,
predict_labels,
class_names,
show_graph=True):
"""
Shows various accuracry measurements.
Args:
labels : actual labels
predict_labels : predicted labels
class_names : list of class names
show_graph : flag to show or not show the actual graph. set
to False for large number of classes.
Returns:
nothing
"""
# Accuracy score
print("Accuracy : " + str(accuracy_score(labels, predict_labels)))
print("")
# Classification report
print("Classification Report")
print(classification_report(np.array(labels),
np.array(predict_labels), digits=5))
if show_graph:
# Plot confusion matrix
cnf_matrix = confusion_matrix(labels, predict_labels)
print(cnf_matrix)
plot_confusion_matrix(cnf_matrix, classes=class_names)
##############################################################
# Displays the Training and Validation files in a montage format
def batch_montage_display_using_generator(image_batch, img_channels):
# images already read into 4 dim arrays as batches
"""
Uses generator to display a montage. Useful to check output of a
generator.
Args:
image_batch : batch of images
img_channels : number of channels
Returns:
nothing
"""
multi_ch = True
if img_channels == 1:
image_batch = np.reshape(image_batch,
(image_batch.shape[0],
image_batch.shape[1],
image_batch.shape[2]))
multi_ch = False
img_batch_montage = montage(image_batch, multichannel=multi_ch)
fig, (ax1) = plt.subplots(1, 1, figsize=(30, 10))
if img_channels == 1:
ax1.imshow(img_batch_montage, cmap="gray")
else:
ax1.imshow(img_batch_montage)
ax1.set_title('Batch images: '+str(len(image_batch)))
# Uncomment if you want to save the figure
# fig.savefig('overview.png')
##############################################################
def image_show_seq_model_layers_BETA(image_path,
model,
image_dim,
layers_num=4,
activation_layer_num=0,
activation_channel_num=9):
"""
BETA, works but not fully tested. Displays the activation layers_num
for a model. Model can be fully tained or just initial weights.
This is a merging of these two articles:
Orig: https://colab.research.google.com/github/google/eng-edu/blob/master/ml/pc/exercises/image_classification_part1.ipynb#scrollTo=-5tES8rXFjux
Another example https://github.com/gabrielpierobon/cnnshapes/blob/master/README.md
Args:
image_path : fully qualified path to the image to be used
model : model to use, can be trainned or just initialized
image_dim : dimesion of the image (r, c, d)
layers_num : number of layers to show, should be > 2
activation_layer_num : number of the activation layer
activation_channel_num : number of the channel
(layer and channel are used to display the image)
Returns:
nothing
"""
# Let's define a new Model that will take an image as input, and will
# output intermediate representations for all layers in the previous model
# after the first.
successive_outputs = [layer.output for layer in model.layers[:layers_num]]
visualization_model = Model(model.input, successive_outputs)
if image_dim[2] == 1:
img = load_img(image_path,
color_mode="grayscale",
target_size=(image_dim[0], image_dim[1]))
cmap = "gray"
plt.matshow(img, cmap=cmap)
else:
img = load_img(image_path, target_size=image_dim)
cmap = "viridis"
plt.matshow(img)
print("Loaded image: ", image_path)
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
# Rescale by 1/255
x /= 255
# Let's run our image through our network, thus obtaining all
# intermediate representations for this image.
successive_feature_maps = visualization_model.predict(x)
# These are the names of the layers, so can have them as part of our plot
layer_names = [layer.name for layer in model.layers]
# Plots the "activation_channel" of the "activation_layer"
layer_activation = successive_feature_maps[activation_layer_num]
print("Layer: ", activation_layer_num,
" Channel: ", activation_channel_num,
" Shape: ", layer_activation.shape)
plt.matshow(layer_activation[0, :, :, activation_channel_num], cmap=cmap)
# Now let's display our representations
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv / maxpool layers, not the
# fully-connected layers
n_features = feature_map.shape[-1] # number of features in map
# The feature map has shape (1, size, size, n_features)
size = feature_map.shape[1]
# We will tile our images in this matrix
display_grid = np.zeros((size, size * n_features))
for i in range(n_features):
# Postprocess the feature to make it visually palatable
x = feature_map[0, :, :, i]
x -= x.mean()
x /= x.std()
x *= 64
x += 128
x = np.clip(x, 0, 255).astype('uint8')
# We'll tile each filter into this big horizontal grid
display_grid[:, i * size:(i + 1) * size] = x
# Display the grid
scale = 20. / n_features
plt.figure(figsize=(scale * n_features, scale))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap=cmap)
##############################################################
| 37.101721 | 152 | 0.572718 | 5,525 | 0.233044 | 0 | 0 | 0 | 0 | 0 | 0 | 9,555 | 0.403029 |
e28108106479e557e24126e1a37268ee719ce945 | 1,125 | py | Python | setup.py | shamilbi/lyrebird | 57aa822b456ecbf2a39d40c5c810dd781e3a823e | [
"MIT"
] | null | null | null | setup.py | shamilbi/lyrebird | 57aa822b456ecbf2a39d40c5c810dd781e3a823e | [
"MIT"
] | null | null | null | setup.py | shamilbi/lyrebird | 57aa822b456ecbf2a39d40c5c810dd781e3a823e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Lyrebird Voice Changer
Simple and powerful voice changer for Linux, written in GTK 3
(c) Charlotte 2020
"""
import sys
import re
from setuptools import setup
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('app/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
VERSION = match.group(1)
else:
raise RuntimeError("No version number found!")
with open("requirements.txt") as f:
required = [l for l in f.read().splitlines() if not l.startswith("#")]
extra_options = dict(
name = 'Lyrebird',
version=VERSION,
author = 'Charlotte',
# author_email = '',
url = 'https://github.com/charpointer/lyrebird',
description = 'Simple and powerful voice changer for Linux, written in GTK 3',
download_url = 'https://github.com/charpointer/lyrebird/releases',
license = 'MIT License',
install_requires=required,
entry_points = {
'console_scripts': [
'lyrebird = app.__main__:main']},
packages = ['app',
'app.core'],
)
setup(**extra_options)
| 26.162791 | 82 | 0.635556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.451556 |
e283e6e707ad35c8a4b77ae151e15d9f613278be | 1,617 | py | Python | silkpy/symbolic/curve/transform.py | jiaxin1996/silkpy | 7720d47b33b731d9e11e67d99c8574514b8f177b | [
"MIT"
] | null | null | null | silkpy/symbolic/curve/transform.py | jiaxin1996/silkpy | 7720d47b33b731d9e11e67d99c8574514b8f177b | [
"MIT"
] | null | null | null | silkpy/symbolic/curve/transform.py | jiaxin1996/silkpy | 7720d47b33b731d9e11e67d99c8574514b8f177b | [
"MIT"
] | null | null | null | from .curve import ParametricCurve as _ParametricCurve
from sympy import Symbol as _Symbol
def curve_normalization(
other:_ParametricCurve,
new_var=_Symbol('s', real=True)):
from sympy import S, solveset, Eq
from sympy import integrate
from silkpy.sympy_utility import norm
drdt = norm(other.expr().diff(other.sym(0)))
new_var_in_old = integrate(drdt, other.sym(0)).simplify()
solset = solveset(Eq(new_var, new_var_in_old), other.sym(0), domain=S.Reals).simplify()
try:
if len(solset) != 1:
raise RuntimeError(f"We have not yet succedded in inverse s(t) into t(s).\
It found these solutions: {solset}.\
Users need to choose from them or deduce manually, and then set it by obj.param_norm(s_symbol, t_expressed_by_s")
except:
raise RuntimeError(f"We have not yet succedded in inverse s(t) into t(s). Try the curve_param_transform function instead and set the transform relation manually.")
else:
old_var_in_new = next(iter(solset))
return _ParametricCurve(
other.expr().subs(other.sym(0), old_var_in_new),
(new_var,
new_var_in_old.subs(other.sym(0), other.sym_limit(0)[0]),
new_var_in_old.subs(other.sym(0), other.sym_limit(0)[1])))
def curve_param_transform(old_curve, newt, t_expr=None):
from sympy import S, solveset, Eq
return _ParametricCurve(
old_curve._r.applyfunc(lambda x: x.subs(old_curve._t, t_expr)),
(newt, newt_expr.subs(old_curve._t, old_curve._t_limit[0]), newt_expr.subs(old_curve._t, old_curve._t_limit[1])), old_curve._sys) | 49 | 171 | 0.694496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 375 | 0.231911 |
e2850c2cd171c65701a7c0ab8f349d8b562b539c | 556 | py | Python | venv/Lib/site-packages/pyroute2/netlink/rtnl/riprsocket.py | kalymgr/Project-T-Cryptocurrencies | 5dbb679a76bcf07b913036e7b44ba4247c39482d | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyroute2/netlink/rtnl/riprsocket.py | kalymgr/Project-T-Cryptocurrencies | 5dbb679a76bcf07b913036e7b44ba4247c39482d | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyroute2/netlink/rtnl/riprsocket.py | kalymgr/Project-T-Cryptocurrencies | 5dbb679a76bcf07b913036e7b44ba4247c39482d | [
"MIT"
] | null | null | null | from pyroute2.netlink import rtnl
from pyroute2.netlink import NETLINK_ROUTE
from pyroute2.netlink.nlsocket import NetlinkSocket
from pyroute2.netlink.rtnl.marshal import MarshalRtnl
class RawIPRSocketMixin(object):
def __init__(self, fileno=None):
super(RawIPRSocketMixin, self).__init__(NETLINK_ROUTE, fileno=fileno)
self.marshal = MarshalRtnl()
def bind(self, groups=rtnl.RTMGRP_DEFAULTS, **kwarg):
super(RawIPRSocketMixin, self).bind(groups, **kwarg)
class RawIPRSocket(RawIPRSocketMixin, NetlinkSocket):
pass
| 29.263158 | 77 | 0.767986 | 367 | 0.660072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e285889f2e3b2ffcbbc82294049dd0be72f9d5f1 | 1,013 | py | Python | zhixuewang/teacher/urls.py | lihaoze123/zhixuewang-python | 7a54bb1ae96f74d3bb3a0845f3b084bb5942f758 | [
"MIT"
] | 22 | 2019-01-21T03:49:44.000Z | 2020-02-13T08:43:01.000Z | zhixuewang/teacher/urls.py | lihaoze123/zhixuewang-python | 7a54bb1ae96f74d3bb3a0845f3b084bb5942f758 | [
"MIT"
] | 10 | 2019-01-21T03:50:23.000Z | 2020-01-03T13:06:49.000Z | zhixuewang/teacher/urls.py | lihaoze123/zhixuewang-python | 7a54bb1ae96f74d3bb3a0845f3b084bb5942f758 | [
"MIT"
] | 3 | 2019-02-17T06:12:35.000Z | 2019-10-29T13:24:06.000Z | from zhixuewang.urls import BASE_URL
class Url:
INFO_URL = f"{BASE_URL}/container/container/student/account/"
CHANGE_PASSWORD_URL = f"{BASE_URL}/portalcenter/home/updatePassword/"
TEST_URL = f"{BASE_URL}/container/container/teacher/teacherAccountNew"
GET_EXAM_URL = f"{BASE_URL}/classreport/class/classReportList/"
GET_AcademicTermTeachingCycle_URL = f"{BASE_URL}/classreport/class/getAcademicTermTeachingCycle/"
GET_REPORT_URL = f"{BASE_URL}/exportpaper/class/getExportStudentInfo"
GET_MARKING_PROGRESS_URL = f"{BASE_URL}/marking/marking/markingProgressDetail"
GET_EXAM_DETAIL_URL = f"{BASE_URL}/scanmuster/cloudRec/scanrecognition"
GET_EXAM_SCHOOLS_URL = f"{BASE_URL}/exam/marking/schoolClass"
GET_EXAM_SUBJECTS_URL = f"{BASE_URL}/configure/class/getSubjectsIncludeSubAndGroup"
#后必须接上paperId
# ORIGINAL_PAPER_URL = f"{BASE_URL}/classreport/class/student/checksheet/?userId="
ORIGINAL_PAPER_URL = f"{BASE_URL}/classreport/class/student/checksheet/" | 44.043478 | 101 | 0.78381 | 984 | 0.961877 | 0 | 0 | 0 | 0 | 0 | 0 | 670 | 0.654936 |
e2858d86f914bf75d274567c879ac15e007d7753 | 229 | py | Python | 2 semester/PP/9/Code/1.3.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
] | null | null | null | 2 semester/PP/9/Code/1.3.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
] | null | null | null | 2 semester/PP/9/Code/1.3.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
] | null | null | null | sort = lambda array: [sublist for sublist in sorted(array, key=lambda x: x[1])]
if __name__ == "__main__":
print(sort([
("English", 88),
("Social", 82),
("Science", 90),
("Math", 97)
]))
| 20.818182 | 79 | 0.50655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.183406 |
e2867f314d7004069b6df31d48702399fd7727ef | 451 | py | Python | users/models.py | jannetasa/haravajarjestelma | 419f23656306d94ae4d9a8d3477a6325cc80b601 | [
"MIT"
] | null | null | null | users/models.py | jannetasa/haravajarjestelma | 419f23656306d94ae4d9a8d3477a6325cc80b601 | [
"MIT"
] | 79 | 2018-11-26T09:43:41.000Z | 2022-02-10T08:19:11.000Z | users/models.py | jannetasa/haravajarjestelma | 419f23656306d94ae4d9a8d3477a6325cc80b601 | [
"MIT"
] | 3 | 2018-11-27T08:08:22.000Z | 2022-03-25T08:30:34.000Z | from django.db import models
from django.utils.translation import ugettext_lazy as _
from helusers.models import AbstractUser
class User(AbstractUser):
is_official = models.BooleanField(verbose_name=_("official"), default=False)
class Meta:
verbose_name = _("user")
verbose_name_plural = _("users")
ordering = ("id",)
def can_view_contract_zone_details(user):
return user.is_authenticated and user.is_official
| 26.529412 | 80 | 0.738359 | 224 | 0.496674 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.059867 |
e287c088eb04f012860164c20f94da353ad49546 | 3,904 | py | Python | src/main/python/tranquilitybase/gcpdac/main/core/terraform/terraform_utils.py | tranquilitybase-io/tb-gcp-dac | 1d65afced1ab7427262dcdf98ee544370201439a | [
"Apache-2.0"
] | 2 | 2020-04-23T16:50:26.000Z | 2021-05-09T11:30:42.000Z | src/main/python/tranquilitybase/gcpdac/main/core/terraform/terraform_utils.py | tranquilitybase-io/tb-gcp-dac | 1d65afced1ab7427262dcdf98ee544370201439a | [
"Apache-2.0"
] | 156 | 2020-04-08T14:08:47.000Z | 2021-07-01T14:48:15.000Z | src/main/python/tranquilitybase/gcpdac/main/core/terraform/terraform_utils.py | tranquilitybase-io/tb-gcp-dac | 1d65afced1ab7427262dcdf98ee544370201439a | [
"Apache-2.0"
] | 2 | 2020-06-24T11:19:58.000Z | 2020-06-24T13:27:22.000Z | import time
import traceback
from python_terraform import Terraform
from src.main.python.tranquilitybase.gcpdac.configuration.helpers.eaglehelper import EagleConfigHelper
from src.main.python.tranquilitybase.gcpdac.configuration.helpers.envhelper import EnvHelper
from src.main.python.tranquilitybase.gcpdac.main.core.terraform.terraform_config import get_terraform_path
from src.main.python.tranquilitybase.lib.common.FileUtils import FileUtils
from src.main.python.tranquilitybase.lib.common.StringUtils import is_none_or_empty
# --- Logger ---
import inspect
from src.main.python.tranquilitybase.lib.common.local_logging import get_logger, get_frame_name
logger = get_logger(get_frame_name(inspect.currentframe()))
def validate_terraform_path():
terraform_source_path = get_terraform_path('folder_creation')
if not FileUtils.dir_exists(terraform_source_path):
raise Exception("terraform directory not found: " + terraform_source_path)
if EnvHelper.is_ide():
logger.warn("running in IDE skipping terraform validation")
return
tf = Terraform(working_dir=terraform_source_path)
terraform_plan(tf)
def validate_terraform_config():
ec_config = EagleConfigHelper.config_dict
terraform_state_bucket = ec_config['terraform_state_bucket']
tb_discriminator = ec_config['tb_discriminator']
if is_none_or_empty(terraform_state_bucket) or \
is_none_or_empty(tb_discriminator):
raise Exception("terraform value from ec_config found to be invalid")
def terraform_plan(tf: Terraform):
return_code, stdout, stderr = tf.plan(capture_output=True)
logger.debug('Terraform plan return code is {}'.format(return_code))
logger.debug('Terraform plan stdout is {}'.format(stdout))
logger.debug('Terraform plan stderr is {}'.format(stderr))
def terraform_init(backend_prefix, terraform_state_bucket, tf: Terraform):
return_code, stdout, stderr = tf.init(capture_output=True,
backend_config={'bucket': terraform_state_bucket,
'prefix': backend_prefix})
logger.debug('Terraform init return code is {}'.format(return_code))
logger.debug('Terraform init stdout is {}'.format(stdout))
logger.debug('Terraform init stderr is {}'.format(stderr))
def terraform_apply(env_data, tf: Terraform):
retry_count = 0
return_code = 0
while retry_count < 5:
logger.debug("Try {}".format(retry_count))
return_code, stdout, stderr = tf.apply(skip_plan=True, var_file=env_data, capture_output=True)
logger.debug('Terraform apply return code is {}'.format(return_code))
logger.debug('Terraform apply stdout is {}'.format(stdout))
logger.debug("Terraform apply stderr is {}".format(stderr))
retry_count += 1
if return_code == 0:
break
time.sleep(30)
if return_code == 0:
show_return_code, tf_state, stdout = tf.show(json=True)
logger.debug('Terraform show return code is {}'.format(show_return_code))
logger.debug('Terraform show stdout is {}'.format(stdout))
tf_outputs = tf.output()
for output_value in tf_outputs:
logger.debug('Terraform output value is {}'.format(output_value))
else:
# TODO get output for errors
tf_state = {}
tf_outputs = {}
traceback.print_stack()
return {"tf_return_code": return_code, "tf_outputs": tf_outputs, "tf_state": tf_state}
def terraform_destroy(env_data, tf):
return_code, stdout, stderr = tf.destroy(var_file=env_data, capture_output=True)
logger.debug('Terraform destroy return code is {}'.format(return_code))
logger.debug('Terraform destroy stdout is {}'.format(stdout))
logger.debug('Terraform destroy stderr is {}'.format(stderr))
return {"tf_return_code": return_code}
| 41.094737 | 106 | 0.71542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 785 | 0.201076 |
e288295cb9e018ae0fba4c6ba89f7da0720d2b7a | 2,692 | py | Python | jsonfile.py | jason0x43/jcalfred | 66e5b3d1ffc7529aeda925695b8fb32fd7fa383c | [
"MIT"
] | 6 | 2015-05-08T19:52:04.000Z | 2020-10-21T14:56:32.000Z | jsonfile.py | jason0x43/jcalfred | 66e5b3d1ffc7529aeda925695b8fb32fd7fa383c | [
"MIT"
] | 1 | 2016-05-25T19:10:07.000Z | 2016-05-25T19:10:07.000Z | jsonfile.py | jason0x43/jcalfred | 66e5b3d1ffc7529aeda925695b8fb32fd7fa383c | [
"MIT"
] | null | null | null | import logging
import json
import os.path
LOG = logging.getLogger(__name__)
class JsonFile(object):
def __init__(self, path, default_data=None, ignore_errors=False,
header=None):
'''Construct a new JsonFile.
Parameters
----------
default_data : dict
This is a dictionary of data to initialize the JsonFile with. Any
data already present in the file will override this data.
ignore_errors : boolean
Set to True to ignore errors when loading data from an existing
file.
header : string
A comment header to include with the file. This should be a string
or a list of strings. Necessary comment tags will be added
automatically.
'''
self._data = {}
self._path = path
self._header = header
if os.path.exists(path):
try:
with open(path, 'rt') as cfile:
lines = [n for n in cfile.readlines() if not
n.strip().startswith('//')]
self._data = json.loads(''.join(lines))
except ValueError:
if ignore_errors:
LOG.warn('ignoring corrupt JsonFile %s', path)
self._data = {}
else:
LOG.warn('corrupt JsonFile %s', path)
raise
elif default_data:
self._data = default_data
self._save()
def __contains__(self, key):
return key in self._data
def __getitem__(self, key):
return self._data.get(key, None)
def __delitem__(self, key):
del self._data[key]
self._save()
def __setitem__(self, key, value):
self._data[key] = value
self._save()
def __iter__(self):
return self._data.__iter__()
def iterkeys(self):
return self._data.__iter__()
def items(self):
return self._data.items()
@property
def path(self):
return self._path
@property
def header(self):
return self._header
@header.setter
def header(self, value):
self._header = value
self._save()
def get(self, key, default=None):
return self._data.get(key, default)
def _save(self):
with open(self._path, 'wt') as cfile:
if self.header:
if not isinstance(self.header, (list, tuple)):
self.header = self.header.split('\n')
for line in self.header:
cfile.write('// {0}\n'.format(line))
json.dump(self._data, cfile, indent=2)
| 27.752577 | 78 | 0.540862 | 2,611 | 0.969911 | 0 | 0 | 207 | 0.076895 | 0 | 0 | 663 | 0.246285 |
e2890f61a50cee88e2d36d96b2e7ecb300c712a7 | 982 | py | Python | twitch.py | blueben/twitchalyze | daa5e4ea40a7959ee5b2fdf82f86a70db1800511 | [
"Apache-2.0"
] | 1 | 2017-07-14T07:14:36.000Z | 2017-07-14T07:14:36.000Z | twitch.py | blueben/twitchalyze | daa5e4ea40a7959ee5b2fdf82f86a70db1800511 | [
"Apache-2.0"
] | 1 | 2017-07-07T18:40:40.000Z | 2017-07-07T18:40:40.000Z | twitch.py | blueben/twitchalyze | daa5e4ea40a7959ee5b2fdf82f86a70db1800511 | [
"Apache-2.0"
] | null | null | null | """
Twitch API Module.
This module implements only those parts of the Twitch API needed for the
twitchalyze app to function. It is not a general purpose SDK.
This module is written against Version 5 of the Twitch API.
"""
import json
import requests
# Read in user configuration
with open('.twitchalyze') as data_file:
CONFIG = json.load(data_file)
# Set up a new request session
S = requests.Session()
S.headers.update({
'Client-ID': CONFIG["client_id"],
'Accept': 'application/vnd.twitchtv.v5+json'
})
S.params.update({
'limit': CONFIG["record_limit"],
'offset': 0
})
def _url(path):
return CONFIG["api_url"] + path
##
# Twitch API calls
def streams(channel, game, stream_type):
"""
Get live streams.
https://dev.twitch.tv/docs/v5/reference/streams/#get-live-streams
"""
query = {
'channel': ','.join(channel),
'game': game,
'type': stream_type
}
return S.get(_url('/streams/'), params=query)
| 20.458333 | 72 | 0.660896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.566191 |
e28a67a7643e93d68a4bd7360926046da59daf02 | 12,956 | py | Python | code/formatting.py | MaryumSayeed/TheSwan | 3e186e15acb41faec7dd508d8a8cd250659eba9c | [
"MIT"
] | null | null | null | code/formatting.py | MaryumSayeed/TheSwan | 3e186e15acb41faec7dd508d8a8cd250659eba9c | [
"MIT"
] | null | null | null | code/formatting.py | MaryumSayeed/TheSwan | 3e186e15acb41faec7dd508d8a8cd250659eba9c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import glob,os,csv,re,math
import shutil, time
from astropy.io import ascii
import matplotlib.pyplot as plt
# Load all data files:
psdir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/'
hrdir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/'
# Original directories:
pande_dir='/Users/maryumsayeed/Desktop/pande/pande_lcs/'
ast_dir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/data/large_train_sample/'
# Directories when testing sections of lightcurves:
# pande_dir='/Users/maryumsayeed/Desktop/pande/pande_lcs_third/'
# ast_dir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/data/large_train_sample_third/'
pande_lcs =glob.glob(pande_dir+'*.fits')
ast_lcs =glob.glob(ast_dir+'*.fits')
print('# of Pande .ps files:',len(glob.glob(pande_dir+'*.ps')))
print('# of Pande .fits files:',len(glob.glob(pande_dir+'*.fits')))
print('# of Astero. .ps files:',len(glob.glob(ast_dir+'*.ps')))
print('# of Astero. .fits files:',len(glob.glob(ast_dir+'*.fits')))
# Load Berger+ stellar properties catalogues:
gaia =ascii.read('/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/DR2PapTable1.txt',delimiter='&')
gaia =gaia[gaia['binaryFlag']==0] #remove any binaries
kepler_catalogue=pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/GKSPC_InOut_V4.csv')#,skiprows=1,delimiter=',',usecols=[0,1])
# Get Kps for all stars:
kpfile ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/KIC_Kepmag_Berger2018.csv'
kp_all =pd.read_csv(kpfile,usecols=['KIC','kic_kepmag'])
# # Load Asteroseismic Samples:
# Don't want to include any Mathur sample:
mathur_header=['KIC','loggi','e_loggi','r_loggi','n_loggi','logg','E_logg','e_logg','Mass','E_Mass','e_Mass']
mathur_2017 =pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/mathur_2017.txt',delimiter=';',skiprows=54,names=mathur_header)
mathur_2017 =mathur_2017[mathur_2017['n_loggi']=='AST'] #include only asteroseismic measurements
yu_header=['KICID','Teff','err','logg','logg_err','Fe/H','err','M_noCorrection','M_nocorr_err','R_noCorrection','err','M_RGB','M_RGB_err','R_RGB','err','M_Clump','M_Clump_err','R_Clump','err','EvoPhase']
yu_2018 =pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/rg_yu.txt',delimiter='|',names=yu_header,skiprows=1,index_col=False)#,names=yu_header)
#chaplin_2014=pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/Chaplin_2014.txt',skiprows=47,delimiter='\t',names=chaplin_header)
#huber_2013 =pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/Huber_2013.txt',delimiter='\t',skiprows=37,names=['KIC','Mass','Mass_err'])
mathur_kics=np.array(mathur_2017['KIC'])
yu_kics=np.array(yu_2018['KICID'])
#chaplin_kics=np.array(chaplin_2014['KIC'])
#huber_kics=np.array(huber_2013['KIC'])
print('# of stars in Yu+2018:',len(yu_kics))
print('# of stars in Mathur+17:',len(mathur_kics))
astero_kics=np.concatenate([mathur_kics,yu_kics])
astero_kics=np.array(list(set(astero_kics)))
print('Total seismic stars:',len(astero_kics))
# # Load Pande sample:
pande =pd.read_csv('/Users/maryumsayeed/Desktop/pande/pande_granulation.txt')#,skiprows=1,usecols=[0],dtype=int,delimiter=',')
pande_kics=list(pande['#KIC'])
print('# of stars in Pande+2018:',len(pande))
# If star in both sample, treat it as asteroseismic star to increase ast. sample.
# If star only in Pande sample, keep it there.
# If star only in ast. sample, keep it there.
pande_stars0=(set(pande_kics) - set(astero_kics))
print('# stars only in Pande+ and not astero',len(pande_stars0))
print('# total astero. stars:',len(astero_kics))
print('# stars in both Pande+ and astero catalogues:',len(list(set(pande_kics) & set(astero_kics))))
# # Get catalogues of non-granulation stars:
not_dir='/Users/maryumsayeed/Desktop/HuberNess/mlearning/ACFcannon-master/not_granulation_star/'
dscutis =np.loadtxt(not_dir+'murphy_dscuti.txt',usecols=[0,-1],delimiter=',',skiprows=1,dtype=int)
idx=np.where(dscutis[:,1]==1)[0] #stars that have dSct flag
dscutis =dscutis[idx][:,0]
binaries =np.loadtxt(not_dir+'ebinary.txt',usecols=[0],dtype=int,delimiter=',')
exoplanets =pd.read_csv(not_dir+'koi_planethosts.csv',skiprows=53,usecols=['kepid','koi_disposition','koi_pdisposition'])
#exoplanets=exoplanets[exoplanets['koi_pdisposition']!='FALSE POSITIVE'] # Remove false positive exoplanets:
exoplanets =[int(i) for i in list(exoplanets['kepid'])]
superflares=np.loadtxt(hrdir+'superflares_shibayama2013.txt',skiprows=33,usecols=[0],dtype=int)
superflares=[int(i) for i in list(superflares)]
flares =list(np.loadtxt(not_dir+'flares_davenport2016.txt',usecols=[0],skiprows=1,delimiter=',',dtype=int))
rotating =list(np.loadtxt(not_dir+'mcquillan_rotation.txt',usecols=[0],skiprows=1,delimiter=',',dtype=int))
clas =ascii.read(not_dir+'debosscher2011.dat')
gdor =clas[(clas['V1'] == 'GDOR') | (clas['V1'] == 'SPB')]
gdor =[int(i) for i in list(gdor['KIC'])]
dscutis2 =clas[clas['V1'] == 'DSCUT']
dscutis2 =[int(i) for i in list(dscutis2['KIC'])]
rrlyr =pd.read_csv(not_dir+'rrlyr.txt')
rrlyr =[int(i) for i in list(rrlyr['kic'])]
# # Remove non-granulation stars:
pande_stars=list(set(pande_stars0)-set(binaries)-set(exoplanets)-set(flares)-set(rotating) -set(superflares)-set(dscutis)-set(dscutis2)-set(gdor)-set(rrlyr))
astero_stars=list(set(astero_kics)-set(binaries)-set(exoplanets)-set(flares)-set(rotating) -set(superflares)-set(dscutis)-set(dscutis2)-set(gdor)-set(rrlyr))
print('# of non-granulation stars removed from astero sample:',len(astero_kics)-len(astero_stars))
print('# of non-granulation stars removed from pande sample:',len(pande_stars0)-len(pande_stars))
# Only get stars in Gaia catalogue (Berger+2018):
print('(before cross-referenced with Gaia) # of Pande stars:',len(pande_stars))
print('(before cross-referenced with Gaia) # of Astero. stars:',len(astero_stars))
pande_stars = list((set(pande_stars) & set(gaia['KIC'])))
astero_stars = list((set(astero_stars) & set(gaia['KIC'])))
print('final # of Pande stars:',len(pande_stars))
print('final # of asteroseismic stars:',len(astero_stars))
# Check if all Pande stars have a light curve downloaded :
print('\n','===== PANDE =====')
pande_kics_downloaded=[]
for file in pande_lcs:
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
pande_kics_downloaded.append(kic)
print('These should be the same:')
print('---Stars downloaded:',len(pande_kics_downloaded))
print('---Stars needed:',len(pande_stars))
if len(pande_kics_downloaded) > len(pande_stars):
print('We have more stars downloaded than we need from Pande+18.')
else:
print("Don't have all the stars that we need. Download more!")
# Only use Pande stars we have downloaded:
#pande_stars = list(set(set(pande_stars)-set(pande_not_downloaded)))
pande_below_dc=ascii.read(psdir+'LLR_gaia/pande_kics_below_duty_cycle.txt',names=['KICID'])
pande_below_89=ascii.read(psdir+'LLR_gaia/pande_kics_below_89_days.txt',names=['KICID'])
pande_below_dc,pande_below_89=pande_below_dc['KICID'],pande_below_89['KICID']
pande_not_downloaded =[]
pande_stars_downloaded=[]
for kic in pande_stars:
if kic in pande_kics_downloaded:
pande_stars_downloaded.append(kic)
else:
pande_not_downloaded.append(kic)
print('Need from Pande+18',len(pande_stars))
print('Downloaded',len(pande_stars_downloaded))
print('Have but removed aka:')
print('---# of Pande stars below 89 days',len(pande_below_89))
print('---# of Pande stars below duty cycle',len(pande_below_dc))
print('Pande not downloaded',len(pande_not_downloaded))
print('Good pande stars',len(pande_stars))
# Check if all astero. stars have a light curve downloaded :
print('\n','===== ASTERO. =====')
ast_kics_downloaded=[]
for file in ast_lcs:
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
ast_kics_downloaded.append(kic)
print('These should be the same:')
print('---Stars downloaded:',len(ast_kics_downloaded))
print('---Stars needed:',len(astero_stars))
if len(ast_kics_downloaded) > len(astero_stars):
print('We have more stars downloaded than we need from astero catalogues.')
else:
print("Don't have all the stars that we need. Download more!")
astero_below_dc=ascii.read(psdir+'LLR_seismic/astero_kics_below_duty_cycle.txt',names=['KICID'])
astero_below_89=ascii.read(psdir+'LLR_seismic/astero_kics_below_89_days.txt',names=['KICID'])
astero_below_dc,astero_below_89=astero_below_dc['KICID'],astero_below_89['KICID']
astero_not_downloaded =[]
astero_stars_downloaded=[]
for kic in astero_stars:
if kic in ast_kics_downloaded:
astero_stars_downloaded.append(kic)
else:
astero_not_downloaded.append(kic)
print('Need from catalogues',len(astero_stars))
print('Downloaded',len(ast_kics_downloaded))
print('Have but removed aka:')
print('---# of astero stars below 89 days',len(astero_below_89))
print('---# of astero stars below duty cycle',len(astero_below_dc))
print('Astero not downloaded',len(astero_not_downloaded))
print('Good astero stars',len(astero_stars))
# In[13]:
# ascii.write([astero_stars],psdir+'astero_stars_we_need.txt',overwrite=True)
# ascii.write([ast_kics_downloaded],psdir+'astero_stars_downloaded.txt',overwrite=True)
# ascii.write([good_astero_stars],psdir+'good_stars_downloaded.txt',overwrite=True)
fn='/Users/maryumsayeed/Downloads/'
# np.savetxt(fn+'pande_not_downloaded.txt',pande_not_downloaded,fmt='%s')
# np.savetxt(fn+'astero_not_downloaded.txt',astero_not_downloaded,fmt='%s')
# # Find logg for Pande:
print('\n','Getting logg for Pande. stars...')
pande_ps=glob.glob(pande_dir+'*.ps')
pande_no_logg=0
pande_final_sample=[]
pande_loggs=[]
check_dups=[]
for file in pande_ps:
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic in pande_stars:
row=kepler_catalogue.loc[kepler_catalogue['KIC']==kic]
logg=row['iso_logg'].item()
if math.isnan(logg) is False: # check to see there are no nan loggs
logg_pos_err=row['iso_logg_err1']
logg_neg_err=row['iso_logg_err2']
pande_final_sample.append([file,logg])
pande_loggs.append(logg)
else:
pande_no_logg+=1
else:
continue
print('Pande w/ no logg:',pande_no_logg)
# Double check all these stars are in Pande:
kic_not_in_pande=[]
for i in pande_final_sample:
file=i[0]
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic not in pande_kics:
kic_not_in_pande.append(kic)
print('# stars not in Pande.',len(kic_not_in_pande))
print('# Pande stars to save:',len(pande_final_sample))
diff=2000
# np.savetxt(psdir+'pande_final_sample_full.txt',pande_final_sample,fmt='%s')
# np.savetxt(psdir+'pande_pickle_1.txt',pande_final_sample[0:2000],fmt='%s')
# np.savetxt(psdir+'pande_pickle_2.txt',pande_final_sample[2000:4000],fmt='%s')
# np.savetxt(psdir+'pande_pickle_3.txt',pande_final_sample[4000:],fmt='%s')
# # Find logg for Astero. stars:
print('\n','Getting logg for Astero. stars...')
astero_ps=glob.glob(ast_dir+'*.ps')
files,loggs=[],np.zeros(len(astero_ps))
c1,c2,c3,none=0,0,0,0
for i in range(0,len(astero_ps)):
file=astero_ps[i]
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic in astero_stars:
if kic in yu_kics:
row=yu_2018.loc[yu_2018['KICID']==kic]
logg =row['logg'].item()
c1+=1
elif kic in mathur_kics:
row =mathur_2017.loc[mathur_2017['KIC']==kic]
logg =row['loggi'].item()
c2+=1
else:
none+=1
loggs[i]=logg
files.append(file)
# astero_final_sample.append([file,logg])
# astero_loggs.append(logg)
else:
continue
files,loggs=np.array(files),np.array(loggs).astype(float)
print('Yu+:',c1,'Mathur+',c2,'None',none)
idx=np.where(loggs>0)[0] #aka select valid stars
astero_files,astero_loggs=files[idx],loggs[idx]
astero_final_sample=np.array([astero_files,astero_loggs]).T
# Double check all these stars are in Pande:
kic_not_in_astero=[]
for i in astero_final_sample:
file=i[0]
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic not in astero_stars:
kic_not_in_astero.append(kic)
print('# stars not in Astero.',len(kic_not_in_astero))
print('# Astero. stars to save:',len(astero_final_sample))
diff=4000
# np.savetxt(psdir+'astero_final_sample_full.txt',astero_final_sample,fmt='%s')
# np.savetxt(psdir+'astero_final_sample_1.txt',astero_final_sample[0:4000],fmt='%s')
# np.savetxt(psdir+'astero_final_sample_2.txt',astero_final_sample[4000:4000+diff],fmt='%s')
# np.savetxt(psdir+'astero_final_sample_3.txt',astero_final_sample[8000:8000+diff],fmt='%s')
# np.savetxt(psdir+'astero_final_sample_4.txt',astero_final_sample[12000:12000+diff],fmt='%s')
| 42.201954 | 203 | 0.732865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,601 | 0.509494 |
e28ae1ecff19d7a967621ff35f6bf1e80a303f2f | 646 | py | Python | problems/g2_academic/LangfordBin.py | cprudhom/pycsp3 | 980927188f4262c9ea48a6534795712f09d731d6 | [
"MIT"
] | 28 | 2019-12-14T09:25:52.000Z | 2022-03-24T08:15:13.000Z | problems/g2_academic/LangfordBin.py | cprudhom/pycsp3 | 980927188f4262c9ea48a6534795712f09d731d6 | [
"MIT"
] | 7 | 2020-04-15T11:02:07.000Z | 2022-01-20T12:48:54.000Z | problems/g2_academic/LangfordBin.py | cprudhom/pycsp3 | 980927188f4262c9ea48a6534795712f09d731d6 | [
"MIT"
] | 3 | 2020-04-15T08:23:45.000Z | 2021-12-07T14:02:28.000Z | """
See Ian P. Gent, Christopher Jefferson, Ian Miguel: Watched Literals for Constraint Propagation in Minion. CP 2006: 182-197
Examples of Execution:
python3 LangfordBin.py
python3 LangfordBin.py -data=10
"""
from pycsp3 import *
n = data or 8
# v[i] is the ith value of the Langford's sequence
v = VarArray(size=2 * n, dom=range(1, n + 1))
# p[j] is the first (resp., second) position of 1+j/2 in v if j is even (resp., odd)
p = VarArray(size=2 * n, dom=range(2 * n))
satisfy(
[v[p[2 * i]] == i + 1 for i in range(n)],
[v[p[2 * i + 1]] == i + 1 for i in range(n)],
[p[2 * i] == i + 2 + p[2 * i + 1] for i in range(n)]
)
| 24.846154 | 123 | 0.611455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.5387 |
e28e4be1e1115462a9f71610a6de2f1ea15e9d02 | 3,912 | py | Python | momentumnet-main/momentumnet/exact_rep_pytorch.py | ZhuFanCheng/Thesis | eba9a7567a5c254acb2e78fdac0cda7dddabb327 | [
"MIT"
] | null | null | null | momentumnet-main/momentumnet/exact_rep_pytorch.py | ZhuFanCheng/Thesis | eba9a7567a5c254acb2e78fdac0cda7dddabb327 | [
"MIT"
] | null | null | null | momentumnet-main/momentumnet/exact_rep_pytorch.py | ZhuFanCheng/Thesis | eba9a7567a5c254acb2e78fdac0cda7dddabb327 | [
"MIT"
] | null | null | null | # Authors: Michael Sander, Pierre Ablin
# License: MIT
"""
Original code from
Maclaurin, Dougal, David Duvenaud, and Ryan Adams.
"Gradient-based hyperparameter optimization through reversible learning."
International conference on machine learning. PMLR, 2015.
"""
import numpy as np
import torch
RADIX_SCALE = 2 ** 52
class TorchExactRep(object):
def __init__(
self,
val,
from_intrep=False,
shape=None,
device=None,
from_representation=None,
):
if from_representation is not None:
intrep, store = from_representation
self.intrep = intrep
self.aux = BitStore(0, 0, store=store)
else:
if device is None:
device = val.device.type
if shape is not None:
self.intrep = torch.zeros(
*shape, dtype=torch.long, device=device
)
else:
shape = val.shape
if from_intrep:
self.intrep = val
else:
self.intrep = self.float_to_intrep(val)
self.aux = BitStore(shape, device)
def __imul__(self, a):
self.mul(a)
return self
def __iadd__(self, a):
self.add(a)
return self
def __isub__(self, a):
self.sub(a)
return self
def __itruediv__(self, a):
self.div(a)
return self
def add(self, A):
"""Reversible addition of vector or scalar A."""
self.intrep += self.float_to_intrep(A)
return self
def sub(self, A):
self.add(-A)
return self
def rational_mul(self, n, d):
self.aux.push(self.intrep % d, d) # Store remainder bits externally
# self.intrep //= d # Divide by denominator
self.intrep = torch.div(self.intrep, d, rounding_mode="trunc")
self.intrep *= n # Multiply by numerator
self.intrep += self.aux.pop(n) # Pack bits into the remainder
def mul(self, a):
n, d = self.float_to_rational(a)
self.rational_mul(n, d)
return self
def div(self, a):
n, d = self.float_to_rational(a)
self.rational_mul(d, n)
return self
def float_to_rational(self, a):
d = 2 ** 16 // int(a + 1)
n = int(a * d + 1)
return n, d
def float_to_intrep(self, x):
if type(x) is torch.Tensor:
return (x * RADIX_SCALE).long()
return int(x * RADIX_SCALE)
def __repr__(self):
return repr(self.val)
def n_max_iter(self, beta):
d, n = self.float_to_rational(beta)
return int((64 - np.log2(n)) / np.abs(np.log2(n) - np.log2(d)))
@property
def val(self):
return self.intrep.float() / RADIX_SCALE
def copy(self):
v = TorchExactRep(self.val)
v.intrep = torch.clone(self.intrep)
v.aux.store = torch.clone(self.aux.store)
return v
def reset(self):
self.intrep.fill_(0)
self.aux.store.fill_(0)
class BitStore(object):
"""
Efficiently stores information with non-integer number of bits (up to 16).
"""
def __init__(self, shape, device, store=None):
# Use an array of Python 'long' ints which conveniently grow
# as large as necessary. It's about 50X slower though...
if store is not None:
self.store = store
else:
self.store = torch.zeros(shape, dtype=torch.long).to(device)
def push(self, N, M):
"""Stores integer N, given that 0 <= N < M"""
self.store *= M
self.store += N
def pop(self, M):
"""Retrieves the last integer stored."""
N = self.store % M
# self.store //= M
self.store = torch.div(self.store, M, rounding_mode="trunc")
return N
def __repr__(self):
return repr(self.store)
| 26.612245 | 78 | 0.562117 | 3,584 | 0.916155 | 0 | 0 | 77 | 0.019683 | 0 | 0 | 763 | 0.195041 |
e28edceaeb28b473911c6cbe149b7885fa3fc75c | 531 | py | Python | lp_mongodb/loaders/loader.py | TechLaProvence/lp_mongodb | 723e7a9599442405e915fa26c361e337c80784a9 | [
"MIT"
] | null | null | null | lp_mongodb/loaders/loader.py | TechLaProvence/lp_mongodb | 723e7a9599442405e915fa26c361e337c80784a9 | [
"MIT"
] | null | null | null | lp_mongodb/loaders/loader.py | TechLaProvence/lp_mongodb | 723e7a9599442405e915fa26c361e337c80784a9 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/TechLaProvence/lp_mongodb
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 figarocms dhardy@figarocms.fr
from tornado.concurrent import return_future
from lp_mongodb.storages.mongo_storage import Storage
from thumbor.context import Context
from thumbor.config import Config
@return_future
def load(context, path, callback):
storage = Storage(context)
callback(storage.get(path)) | 26.55 | 53 | 0.781544 | 0 | 0 | 0 | 0 | 108 | 0.20339 | 0 | 0 | 242 | 0.455744 |
e29079653a4fe276a51c79065567b452a9f627d4 | 857 | py | Python | efficientEigensolvers/Adaptive_PageRank_Algo.py | ICERM-Efficient-Eigensolvers-2020/Implimentation | c5856afaeaa234946fb5151226e4f3f5a60a1018 | [
"MIT"
] | null | null | null | efficientEigensolvers/Adaptive_PageRank_Algo.py | ICERM-Efficient-Eigensolvers-2020/Implimentation | c5856afaeaa234946fb5151226e4f3f5a60a1018 | [
"MIT"
] | null | null | null | efficientEigensolvers/Adaptive_PageRank_Algo.py | ICERM-Efficient-Eigensolvers-2020/Implimentation | c5856afaeaa234946fb5151226e4f3f5a60a1018 | [
"MIT"
] | 3 | 2020-07-02T15:33:58.000Z | 2020-07-03T16:32:40.000Z | import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
import Page_Rank_Utils as pru
def detectedConverged(y,x,epsilon):
C = set()
N = set()
for i in range(len(y)):
if abs(y[i] - x[i])/abs(x[i]) < epsilon:
C.add(i)
else:
N.add(i)
return N, C
def filter(A_prime, x_prime, N, C):
n = N.shape[0]
for i in range(n):
if i in C:
x_prime[i] = 0
for j in range(n):
A_prime[i][j] = 0
return A_prime, x_prime
def Filter_APR(G, weight, period):
P = pru.stochastic_transition_matrix(G, weight, True)
n = P.shape[0]
# initialize eigenvectors
v_list = []
idx = 0
v_init = np.zeros(n)
v_init[-1] = 1
v_list.append(v_init)
converged = True
while not converged:
return
| 17.489796 | 57 | 0.555426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.029172 |
e291856a8670ce43899869355a2184217b3d5cc1 | 272 | py | Python | demo_random_pixels.py | insolor/micropython-troyka-led-matrix | d72905e1b89fcaa1e334434c713e5a6468634ef4 | [
"MIT"
] | 1 | 2021-12-10T08:01:12.000Z | 2021-12-10T08:01:12.000Z | demo_random_pixels.py | insolor/micropython-troyka-led-matrix | d72905e1b89fcaa1e334434c713e5a6468634ef4 | [
"MIT"
] | null | null | null | demo_random_pixels.py | insolor/micropython-troyka-led-matrix | d72905e1b89fcaa1e334434c713e5a6468634ef4 | [
"MIT"
] | null | null | null | from troyka_led_matrix import TroykaLedMatrix
from urandom import getrandbits
import time
matrix = TroykaLedMatrix()
while True:
matrix.draw_pixel(getrandbits(3), getrandbits(3))
matrix.clear_pixel(getrandbits(3), getrandbits(3))
time.sleep_ms(50)
| 24.727273 | 55 | 0.753676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e291c18803387c40527b5444ebd69389689cae06 | 1,560 | py | Python | enip_backend/export/bulk.py | vote/enip-backend | 0261765db6c793ea276521d049ace8fe145cd98f | [
"MIT"
] | 2 | 2020-10-29T20:10:18.000Z | 2020-10-30T04:46:05.000Z | enip_backend/export/bulk.py | vote/enip-backend | 0261765db6c793ea276521d049ace8fe145cd98f | [
"MIT"
] | 3 | 2020-10-17T02:51:06.000Z | 2020-11-04T16:08:28.000Z | enip_backend/export/bulk.py | vote/enip-backend | 0261765db6c793ea276521d049ace8fe145cd98f | [
"MIT"
] | null | null | null | import json
import logging
from datetime import datetime, timezone
from ..enip_common.pg import get_ro_cursor
from .run import export_all_states, export_national
# Bulk-exports a range of ingests for testing purposes. Prints out a JSON
# blob describing the exports.
START_TIME = datetime(2020, 10, 15, 16, 0, 0, tzinfo=timezone.utc)
END_TIME = datetime(2020, 10, 15, 18, 0, 0, tzinfo=timezone.utc)
def export_bulk():
ingests = []
with get_ro_cursor() as cursor:
cursor.execute(
"SELECT ingest_id, ingest_dt FROM ingest_run WHERE ingest_dt >= %s AND ingest_dt <= %s AND waypoint_30_dt IS NOT NULL",
(START_TIME, END_TIME),
)
ingests = [(res.ingest_id, res.ingest_dt) for res in cursor]
logging.info(f"Running {len(ingests)} exports...")
summary = []
for i, (ingest_id, ingest_dt) in enumerate(ingests):
logging.info(f"[[[ INGEST {i+1} OF {len(ingests)} ]]]")
summary.append(
{
"ingest_dt": ingest_dt.isoformat(),
"exports": {
"national": export_national(
ingest_id, ingest_dt, ingest_dt.strftime("%Y%m%d%H%M%S")
),
"states": export_all_states(
ingest_id, ingest_dt, ingest_dt.strftime("%Y%m%d%H%M%S")
),
},
}
)
return summary
if __name__ == "__main__":
out_json = export_bulk()
with open("./bulk.json", "w") as f:
json.dump(out_json, f)
| 30.588235 | 131 | 0.578846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.25 |
e291cea8a93d8a6104d653557c8f3d00a9497fd4 | 11,032 | py | Python | src/genie/libs/parser/junos/show_chassis.py | noziwatele/genieparser | 4d7a62a870b30dfc8e2a41bf1ad81218bca6f5e9 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/junos/show_chassis.py | noziwatele/genieparser | 4d7a62a870b30dfc8e2a41bf1ad81218bca6f5e9 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/junos/show_chassis.py | noziwatele/genieparser | 4d7a62a870b30dfc8e2a41bf1ad81218bca6f5e9 | [
"Apache-2.0"
] | null | null | null |
''' show_chassis.py
Parser for the following show commands:
* show chassis fpc detail
* show chassis environment routing-engine
* show chassis firmware
* show chassis firmware no-forwarding
'''
# python
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import (Any,
Optional, Use, SchemaTypeError, Schema)
class ShowChassisFpcDetailSchema(MetaParser):
schema = {
Optional("@xmlns:junos"): str,
"fpc-information": {
Optional("@junos:style"): str,
Optional("@xmlns"): str,
"fpc": {
"fips-capable": str,
"fips-mode": str,
"memory-ddr-dram-size": str,
"memory-dram-size": str,
"memory-rldram-size": str,
"slot": str,
"start-time": {
"#text": str,
Optional("@junos:seconds"): str
},
"state": str,
"temperature": {
"#text": str,
Optional("@junos:celsius"): str
},
"up-time": {
"#text": str,
Optional("@junos:seconds"): str
}
}
}
}
class ShowChassisFpcDetail(ShowChassisFpcDetailSchema):
""" Parser for:
* show chassis fpc detail
"""
cli_command = 'show chassis fpc detail'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Slot 0 information:
p1 = re.compile(r'^Slot (?P<slot>\d+) +information:$')
#State Online
p2 = re.compile(r'^State +(?P<state>\S+)$')
#Temperature Testing
p3 = re.compile(r'^Temperature +(?P<temperature>\S+)$')
#Total CPU DRAM 511 MB
p4 = re.compile(r'^Total CPU DRAM +(?P<memory_dram_size>\d+)\sMB$')
#Total RLDRAM 10 MB
p5 = re.compile(r'^Total RLDRAM +(?P<memory_rldram_size>\d+)\sMB$')
#Total DDR DRAM 0 MB
p6 = re.compile(r'^Total DDR DRAM +(?P<memory_ddr_dram_size>\d+)\sMB$')
#FIPS Capable False
p7 = re.compile(r'^FIPS Capable +(?P<fips_capable>\S+)$')
#FIPS Mode False
p8 = re.compile(r'^FIPS Mode +(?P<fips_mode>\S+)$')
#Start time 2019-08-29 09:09:16 UTC
p9 = re.compile(r'^Start time +(?P<start_time>[\d\-\:A-Za-z ]+)$')
#Uptime 208 days, 22 hours, 50 minutes, 26 seconds
p10 = re.compile(r'^Uptime +(?P<up_time>[\d\-\,A-Za-z ]+)$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
#Slot 0 information:
m = p1.match(line)
if m:
ospf_area = ret_dict.setdefault("fpc-information", {})\
.setdefault("fpc", {})
group = m.groupdict()
ospf_area.update({'slot' : group['slot']})
continue
#State Online
m = p2.match(line)
if m:
group = m.groupdict()
ospf_area.update({'state' : group['state']})
continue
#Temperature Testing
m = p3.match(line)
if m:
group = m.groupdict()
temperature_dict = {}
temperature_dict["#text"] = group["temperature"]
ospf_area.update({'temperature' : temperature_dict})
continue
#Total CPU DRAM 511 MB
m = p4.match(line)
if m:
group = m.groupdict()
ospf_area.update({'memory-dram-size' : group['memory_dram_size']})
continue
#Total RLDRAM 10 MB
m = p5.match(line)
if m:
group = m.groupdict()
ospf_area.update({'memory-rldram-size' : group['memory_rldram_size']})
continue
#Total DDR DRAM 0 MB
m = p6.match(line)
if m:
group = m.groupdict()
ospf_area.update({'memory-ddr-dram-size' : group['memory_ddr_dram_size']})
continue
#FIPS Capable False
m = p7.match(line)
if m:
group = m.groupdict()
ospf_area.update({'fips-capable' : group['fips_capable']})
continue
#FIPS Mode False
m = p8.match(line)
if m:
group = m.groupdict()
ospf_area.update({'fips-mode' : group['fips_mode']})
continue
#Start time 2019-08-29 09:09:16 UTC
m = p9.match(line)
if m:
group = m.groupdict()
start_time_dict = {}
start_time_dict["#text"] = group["start_time"]
ospf_area.update({'start-time' : start_time_dict})
continue
#Uptime 208 days, 22 hours, 50 minutes, 26 seconds
m = p10.match(line)
if m:
group = m.groupdict()
up_time_dict = {}
up_time_dict["#text"] = group["up_time"]
ospf_area.update({'up-time' : up_time_dict})
continue
return ret_dict
class ShowChassisEnvironmentRoutingEngineSchema(MetaParser):
schema = {
Optional("@xmlns:junos"): str,
"environment-component-information": {
Optional("@xmlns"): str,
"environment-component-item": {
"name": str,
"state": str
}
}
}
class ShowChassisEnvironmentRoutingEngine(ShowChassisEnvironmentRoutingEngineSchema):
""" Parser for:
* show chassis environment routing-engine
"""
cli_command = 'show chassis environment routing-engine'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Routing Engine 0 status:
p1 = re.compile(r'^(?P<name>[\S\s]+) +status:$')
#State Online Master
p2 = re.compile(r'^State +(?P<name>[\S\s]+)$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
#Routing Engine 0 status:
m = p1.match(line)
if m:
ospf_area = ret_dict.setdefault("environment-component-information", {})\
.setdefault("environment-component-item", {})
group = m.groupdict()
ospf_area.update({'name' : group['name']})
continue
#State Online Master
m = p2.match(line)
if m:
group = m.groupdict()
ospf_area.update({'state' : group['name']})
continue
return ret_dict
class ShowChassisFirmwareSchema(MetaParser):
""" schema = {
Optional("@xmlns:junos"): str,
"firmware-information": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
"chassis-module": {
"firmware": [
{
"firmware-version": str,
"type": str
}
],
"name": str
}
}
}
} """
def validate_chassis_firmware_list(value):
# Pass firmware list as value
if not isinstance(value, list):
raise SchemaTypeError('firmware is not a list')
chassis_firmware_schema = Schema({
"firmware-version": str,
"type": str
})
# Validate each dictionary in list
for item in value:
chassis_firmware_schema.validate(item)
return value
schema = {
"firmware-information": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
"chassis-module": {
"firmware": Use(validate_chassis_firmware_list),
"name": str
}
}
}
}
class ShowChassisFirmware(ShowChassisFirmwareSchema):
""" Parser for:
* show chassis firmware
"""
cli_command = 'show chassis firmware'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Part Type Version
p0 = re.compile(r'^Part +Type +Version$')
#FPC 0 ROM PC Bios
p1 = re.compile(r'^(?P<name>\S+\s+\d+) +(?P<type>\S+) +(?P<firmware>\S+\s+\S+)$')
#O/S Version 19.2R1.8 by builder on 2019-06-21 17:52:23 UTC
p2 = re.compile(r'^(?P<type>\S+) +(?P<firmware>[\s\S]+)$')
ret_dict = {}
for line in out.splitlines()[1:]:
line = line.strip()
#Part Type Version
m = p0.match(line)
if m:
continue
#FPC 0 ROM PC Bios
m = p1.match(line)
if m:
firmware_chassis_dict = ret_dict.setdefault("firmware-information", {})\
.setdefault("chassis", {}).setdefault("chassis-module", {})
firmware_entry_list = firmware_chassis_dict.setdefault("firmware", [])
group = m.groupdict()
entry_dict = {}
entry_dict["firmware-version"] = group["firmware"]
entry_dict["type"] = group["type"]
firmware_chassis_dict["name"] = group["name"]
firmware_entry_list.append(entry_dict)
continue
#O/S Version 19.2R1.8 by builder on 2019-06-21 17:52:23 UTC
m = p2.match(line)
if m:
group = m.groupdict()
entry_dict = {}
entry_dict["firmware-version"] = group["firmware"]
entry_dict["type"] = group["type"]
firmware_entry_list.append(entry_dict)
continue
return ret_dict
class ShowChassisFirmwareNoForwarding(ShowChassisFirmware):
""" Parser for:
- show chassis firmware no-forwarding
"""
cli_command = [
'show chassis firmware no-forwarding'
]
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command[0])
else:
out = output
return super().cli(output=out) | 30.307692 | 91 | 0.472263 | 10,642 | 0.964648 | 0 | 0 | 0 | 0 | 0 | 0 | 4,074 | 0.369289 |
e291fcd825f96b9724727a3f8d3acd4cb312efa4 | 2,009 | py | Python | mp_sort/virtenv/lib/python3.6/site-packages/transcrypt/demos/pysteroids_demo/org/theodox/__init__.py | ang-jason/fip_powerx_mini_projects-foxtrot | 37e3671969b516369e2d1c7cab5890b75c489f56 | [
"MIT"
] | 2,200 | 2016-10-12T16:47:13.000Z | 2022-03-30T16:40:35.000Z | mp_sort/virtenv/lib/python3.6/site-packages/transcrypt/demos/pysteroids_demo/org/theodox/__init__.py | ang-jason/fip_powerx_mini_projects-foxtrot | 37e3671969b516369e2d1c7cab5890b75c489f56 | [
"MIT"
] | 672 | 2016-10-12T16:36:48.000Z | 2022-03-25T00:57:04.000Z | mp_sort/virtenv/lib/python3.6/site-packages/transcrypt/demos/pysteroids_demo/org/theodox/__init__.py | ang-jason/fip_powerx_mini_projects-foxtrot | 37e3671969b516369e2d1c7cab5890b75c489f56 | [
"MIT"
] | 230 | 2016-10-20T14:31:40.000Z | 2022-03-16T15:57:15.000Z |
import math
import itertools
class Vector:
"""
Generic vector operations.
"""
def _apply(self,op, other):
pairwise = None
if type(other) is Vector:
pairwise = zip(self.vals, other.vals)
else:
pairwise = zip(self.vals, [other for _ in self.vals])
return Vector(*itertools.starmap(op, pairwise))
def __init__(self, *args):
self.vals = args
def __add__(self, other):
return self._apply(lambda a, b: a + b, other)
def __sub__(self, other):
return self._apply(lambda a, b: a - b, other)
def __mul__(self, other):
return self._apply(lambda a, b: a*b, other)
def __div__(self, other):
return self._apply(lambda a, b: a / b, other)
def length(self):
total = sum(map(lambda a: math.pow(a, 2), self.vals))
return math.sqrt(total)
def normalized(self):
divisor = [self.length()] * len(self)
return Vector(*(self / divisor))
def __iter__(self):
return py_iter(self.vals)
@classmethod
def map(cls, *args):
return args[0].map(args[1:])
def __getitem__(self, item):
return self.values[item]
def __str__(self):
return str(self.vals)
def __len__(self):
return len(self.vals)
@classmethod
def add(cls, a, b):
return Vector(*a) + Vector(*b)
@classmethod
def sub(cls, a, b):
return Vector(*a) - Vector(*b)
@classmethod
def mul(cls, a, b):
return Vector(*a) * Vector(*b)
@classmethod
def div(cls, a, b):
return Vector(*a) / Vector(*b)
@classmethod
def dot(cls, left, right):
return sum(Vector.mul(left, right))
@classmethod
def norm_dot(Vector, left, right):
left = Vector(*left).normalized()
right = Vector(*right).normalized()
return sum(Vector.mul(left, right))
| 22.829545 | 66 | 0.548034 | 1,964 | 0.977601 | 0 | 0 | 660 | 0.328522 | 0 | 0 | 44 | 0.021901 |
e29236ebdd1c173cccb70069b6ef58e0d5f8c5b3 | 1,230 | py | Python | gae/settings.py | fredsa/instant-tty | 31778228f08b26109b23d2d5bd2f72393228ef7b | [
"Apache-2.0"
] | 1 | 2015-09-15T23:01:34.000Z | 2015-09-15T23:01:34.000Z | gae/settings.py | fredsa/instant-tty | 31778228f08b26109b23d2d5bd2f72393228ef7b | [
"Apache-2.0"
] | null | null | null | gae/settings.py | fredsa/instant-tty | 31778228f08b26109b23d2d5bd2f72393228ef7b | [
"Apache-2.0"
] | null | null | null | """Module containing global playground constants and functions."""
import os
from google.appengine.api import app_identity
from google.appengine.api import backends
DEBUG = True
COMPUTE_IDLE_INSTANCES_TARGET = 0
COMPUTE_INSTANCE_TTL_MINUTES = 10
COMPUTE_PROJECT_ID = app_identity.get_application_id()
COMPUTE_ZONE = 'us-central1-a'
COMPUTE_SCOPE = 'https://www.googleapis.com/auth/compute'
STORAGE_SCOPE_READ_ONLY = 'https://www.googleapis.com/auth/devstorage.read_only'
COMPUTE_MACHINE_TYPE = 'f1-micro'
# whether or not we're running in the dev_appserver
DEV_MODE = os.environ['SERVER_SOFTWARE'].startswith('Development/')
# RFC1113 formatted 'Expires' to prevent HTTP/1.0 caching
LONG_AGO = 'Mon, 01 Jan 1990 00:00:00 GMT'
JSON_MIME_TYPE = 'application/json'
# ACCESS_KEY_SET_COOKIE_PARAM_NAME = 'set_access_key_cookie'
# ACCESS_KEY_HTTP_HEADER = 'X-App-Access-Key'
# ACCESS_KEY_COOKIE_NAME = 'access_key'
# ACCESS_KEY_COOKIE_ARGS = {
# 'httponly': True,
# 'secure': not DEV_MODE,
# }
# name for the session cookie
SESSION_COOKIE_NAME = 'session'
SESSION_COOKIE_ARGS = {
'httponly': True,
'secure': not DEV_MODE,
}
XSRF_COOKIE_ARGS = {
'httponly': False,
'secure': not DEV_MODE,
}
| 21.578947 | 80 | 0.753659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.54878 |
e292ecca162d2532e3d87a925cd18ac31886440b | 4,984 | py | Python | experiments/ucf101.py | srph25/videoonenet | a935ef62c53d43bf52e0089b3b2078ece9f3b014 | [
"MIT"
] | null | null | null | experiments/ucf101.py | srph25/videoonenet | a935ef62c53d43bf52e0089b3b2078ece9f3b014 | [
"MIT"
] | null | null | null | experiments/ucf101.py | srph25/videoonenet | a935ef62c53d43bf52e0089b3b2078ece9f3b014 | [
"MIT"
] | null | null | null | import numpy as np
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH']='true'
import datetime
from sacred import Experiment
from sacred.observers import FileStorageObserver
from datasets.ucf101 import UCF101Dataset
from algorithms.kerasvideoonenet import KerasVideoOneNet
from algorithms.kerasvideoonenet_admm import KerasVideoOneNetADMM
from algorithms.numpyvideowaveletsparsity_admm import NumpyVideoWaveletSparsityADMM
name = os.path.basename(__file__).split('.')[0]
ex = Experiment(name)
dt = datetime.datetime.now()
results_dir = 'results/' + name + '/' + '{y:04d}{mo:02d}{d:02d}{h:02d}{mi:02d}{s:02d}_{p:05d}'.format(y=dt.year, mo=dt.month, d=dt.day, h=dt.hour, mi=dt.minute, s=dt.second, p=os.getpid()) + '_' + os.uname()[1]
ex.observers.append(FileStorageObserver.create(results_dir))
@ex.config
def cfg():
_data = {'path': 'datasets/UCF-101',
'path_split': 'datasets/ucfTrainTestlist',
'split': 1,
'batch_size': 2,
'frames': 4,
'size': 32,
'dtype': 'float32',
'problems_test': [('inpaint', {'drop_prob': 0.5}),
('center', {'box_size_ratio': 0.4}),
('block', {'box_size_ratio': 0.2,
'total_box': 10}),
('superres', {'resize_ratio': 0.5}),
('superres', {'resize_ratio': 0.25}),
('cs', {'compress_ratio': 0.1}),
('videocs', {'compress_ratio': 0.1}),
('blurdisk', {'size': 4,
'radius': 2.}),
('blurmotion', {'size': 7}),
('videoblurdisk', {'size': 4,
'radius': 2.}),
('frameinterp', {'interp_ratio': 0.5}),
('frameinterp', {'interp_ratio': 0.25}),
('prediction', {'predict_ratio': 0.75}),
('prediction', {'predict_ratio': 0.5}),
('prediction', {'predict_ratio': 0.25}),
('colorization', {})]}
_data['problems_train'] = _data['problems_test']
_algo = {'batch_size': _data['batch_size'],
'shape1': _data['frames'],
'shape2': _data['size'],
'shape3': _data['size'],
'shape4': 3,
'max_iter': 13,
'filters': 256,
'filter_size_enc': 5,
'filter_size_dec': 7,
'rnn': True,
'l2': 0.,
'epochs': 50,
'patience': 5,
'lr': 1e-4,
'clipnorm': 1.,
'dtype': _data['dtype'],
'workers': 14,
'max_queue_size': 2,
'gpus': 2}
@ex.named_config
def videoonenet():
_algo = {'mode': 'videoonenet',
'rho': 0.3}
@ex.named_config
def videoonenetadmm():
_algo = {'mode': 'videoonenetadmm',
'rho': 0.3}
@ex.named_config
def videowaveletsparsityadmm():
_algo = {'mode': 'videowaveletsparsityadmm',
'rho': 0.3,
'lambda_l1': 0.05}
@ex.named_config
def rnn():
_algo = {'rnn': True}
@ex.named_config
def nornn():
_algo = {'rnn': False}
@ex.automain
def run(_data, _algo, _rnd, _seed):
_data_train = _data.copy()
_data_train['problems'] = _data['problems_train']
data = UCF101Dataset(config=_data_train, seed=_seed)
if _algo['mode'] == 'videoonenet':
alg = KerasVideoOneNet(results_dir=results_dir, config=_algo)
elif _algo['mode'] == 'videoonenetadmm':
alg = KerasVideoOneNetADMM(results_dir=results_dir, config=_algo)
elif _algo['mode'] == 'videowaveletsparsityadmm':
alg = NumpyVideoWaveletSparsityADMM(results_dir=results_dir, config=_algo)
alg.build()
result = []
alg.train(data.seq_train, X_val=data.seq_val)
# remove threading from generator for reproducibility in testing
_algo_problem = _algo.copy()
_algo_problem['workers'] = 1
_algo_problem['max_queue_size'] = 1
alg.set_config(_algo_problem)
for problem in _data['problems_test']:
_data_test = _data.copy()
_data_test['problems'] = [problem]
# generate some test images
data.generate_sequences(config=_data_test, seed=_seed)
for b, batch in enumerate(np.linspace(0, len(data.seq_test), 32, endpoint=False).astype(np.int64)):
alg.plot_predictions(data.seq_test[batch], problem, filepath=(results_dir + '/videos_test_%04d.png') % b)
# evaluate mean loss on test images
data.generate_sequences(config=_data_test, seed=_seed)
result_test = alg.test(data.seq_test)
result.append([problem[0], problem[1], result_test])
print(result[-1])
return result
| 36.647059 | 210 | 0.541734 | 0 | 0 | 0 | 0 | 4,173 | 0.837279 | 0 | 0 | 1,280 | 0.256822 |
e2943c239d9c2e7a22ad3b9b20bec4da90c41c08 | 594 | py | Python | master/fresh-samples-master/fresh-samples-master/python_samples/create_contact.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 4 | 2018-09-07T15:35:24.000Z | 2019-03-27T09:48:12.000Z | master/fresh-samples-master/fresh-samples-master/python_samples/create_contact.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | master/fresh-samples-master/fresh-samples-master/python_samples/create_contact.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 3 | 2019-06-18T19:57:17.000Z | 2020-11-06T03:55:08.000Z | ## This script requires "requests": http://docs.python-requests.org/
## To install: pip install requests
import requests
import json
FRESHDESK_ENDPOINT = "http://YOUR_DOMAIN.freshdesk.com" # check if you have configured https, modify accordingly
FRESHDESK_KEY = "YOUR_API_TOKEN"
user_info = {"user":{"name":"Example User", "email":"example@example.com"}}
my_headers = {"Content-Type": "application/json"}
r = requests.post(FRESHDESK_ENDPOINT + '/contacts.json',
auth=(FRESHDESK_KEY, "X"), data=json.dumps(user_info),
headers=my_headers)
print r.status_code
print r.content
| 31.263158 | 112 | 0.734007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.52862 |
e294c03490a934e79c6f93eaa739cbcd7738d18b | 1,102 | py | Python | ordenes/migrations/0003_auto_20200307_0359.py | Omar-Gonzalez/echangarro-demo | a7a970d9793c5e467ca117e9f515a9da423fac14 | [
"MIT"
] | null | null | null | ordenes/migrations/0003_auto_20200307_0359.py | Omar-Gonzalez/echangarro-demo | a7a970d9793c5e467ca117e9f515a9da423fac14 | [
"MIT"
] | 9 | 2021-03-19T11:25:28.000Z | 2022-03-12T00:35:18.000Z | ordenes/migrations/0003_auto_20200307_0359.py | Omar-Gonzalez/echangarro-demo | a7a970d9793c5e467ca117e9f515a9da423fac14 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2020-03-07 03:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ordenes', '0002_auto_20200305_0056'),
]
operations = [
migrations.AddField(
model_name='orden',
name='guia_de_envio',
field=models.CharField(blank=True, max_length=640, null=True),
),
migrations.AlterField(
model_name='orden',
name='estado',
field=models.CharField(choices=[('TENTATIVA', 'TENTATIVA'), ('PENDIENTE PAGO', 'PENDIENTE PAGO'), ('PAGADO', 'PAGADO'), ('ENVIADO', 'ENVIADO'), ('ENTREGADO', 'ENTREGADO'), ('CANCELADO', 'CANCELADO'), ('DEVUELTO', 'DEVUELTO')], default='INICIADO', max_length=110),
),
migrations.AlterField(
model_name='orden',
name='preferencia_de_pago',
field=models.CharField(choices=[('MERCADO PAGO', 'MERCADO PAGO'), ('PAYPAL', 'PAYPAL'), ('TRANSFERENCIA BANCARIA', 'TRANSFERENCIA BANCARIA')], default='MERCADO LIBRE', max_length=110),
),
]
| 38 | 275 | 0.606171 | 1,009 | 0.915608 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.376588 |
e294deddbae6ebac7150aad7bc72409c4cbb3ce7 | 8,311 | py | Python | fantasyfootball/tiersweekly.py | Robert-F-Mulligan/fantasy-football | 2e13142abd371dfaebebd0babb110a5324702e55 | [
"MIT"
] | 1 | 2020-12-18T00:10:13.000Z | 2020-12-18T00:10:13.000Z | fantasyfootball/tiersweekly.py | Robert-F-Mulligan/fantasy-football | 2e13142abd371dfaebebd0babb110a5324702e55 | [
"MIT"
] | 7 | 2021-01-17T21:28:08.000Z | 2022-01-29T20:07:43.000Z | fantasyfootball/tiersweekly.py | Robert-F-Mulligan/fantasy-football | 2e13142abd371dfaebebd0babb110a5324702e55 | [
"MIT"
] | null | null | null | #tiersweekly.py
from fantasyfootball import tiers
from fantasyfootball import fantasypros as fp
from fantasyfootball import config
from fantasyfootball import ffcalculator
from fantasyfootball.config import FIGURE_DIR
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from matplotlib import pyplot as plt
from matplotlib import patches as mpatches
from matplotlib.lines import Line2D
import numpy as np
import matplotlib.style as style
from datetime import date
from os import path
from collections import OrderedDict
flex_list = [
'Clyde Edwards-Helaire',
'Allen Robinson II',
'Adam Thielen',
'Robert Woods',
'Austin Ekeler',
'Joe Mixon',
'Terry McLaurin',
'Todd Gurley II',
'Chris Carson',
'Stefon Diggs',
'Miles Sanders',
'Diontae Johnson',
'Jarvis Landry',
'CeeDee Lamb',
'Melvin Gordon III',
'John Brown',
'Hunter Henry',
'Mark Ingram II',
'James White',
'Hayden Hurst',
'Sammy Watkins',
'Tarik Cohen',
'Christian Kirk',
'Chris Herndon IV',
'Leonard Fournette',
'Boston Scott',
'Frank Gore',
'Chris Thompson',
'Michael Thomas',
'George Kittle',
'Jack Doyle']
work_list = [
'Robert Woods',
'CeeDee Lamb',
'Chris Carson',
'Hunter Henry',
'Stefon Diggs',
'Todd Gurley II',
'John Brown',
'Miles Sanders',
'Hayden Hurst',
'Leonard Fournette',
'Michael Thomas',
'Golden Tate',
'Rob Gronkowski'
]
sean_list = [
'Adam Thielen',
'Robert Woods',
'Joe Mixon',
'Todd Gurley II',
'Jarvis Landry',
'Melvin Gordon III',
'Tarik Cohen',
'Christian Kirk',
'Jordan Reed',
'Chris Thompson',
'George Kittle',
'Giovani Bernard',
'Carlos Hyde'
]
justin_list = [
'Clyde Edwards-Helaire',
'Allen Robinson II',
'Robert Woods',
'Austin Ekeler',
'Terry McLaurin',
'Diontae Johnson',
'Hunter Henry',
'Mark Ingram II',
'James White',
'Sammy Watkins',
'Frank Gore',
'Jack Doyle',
'Denzel Mims',
'Justin Jackson',
'Mercole Hardman'
]
different_spelling = [
'Todd Gurley',
'Chris Herndon',
'Melvin Gordon',
'Allen Robinson',
'Mark Ingram',
]
def make_clustering_viz_flex(tiers=15, kmeans=False, league=config.sean, player_cutoff=150, player_per_chart=50, x_size=20, y_size=15, covariance_type='diag', save=True, export=False, player_list=None):
"""
Generates a chart with colored tiers; you can either use kmeans of GMM
Optional: Pass in a custom tier dict to show varying numbers of tiers; default will be uniform across position
Optional: Pass in a custom pos_n dict to show different numbers of players by position
"""
pos = 'FLEX'
palette = ['red', 'blue', 'green', 'orange', '#900C3F', 'maroon', 'cornflowerblue', 'greenyellow', 'coral', 'orchid', 'firebrick', 'lightsteelblue', 'palegreen', 'darkorange', 'crimson', 'darkred', 'aqua', 'forestgreen', 'navajowhite', 'mediumpurple']
pos_shape = {
'RB': 'o',
'WR': 's',
'TE': '^'
}
df = fp.create_fantasy_pros_ecr_df(league)
#derive pos for flex players
pos_df = df.loc[df['pos'] != pos]
pos_map = dict(zip(pos_df['player_name'].to_list(), pos_df['pos'].to_list()))
df['pos_map'] = df['player_name'].map(pos_map)
df = (df.loc[df['pos'] == pos]
.sort_values('rank')
.reset_index(drop=True)
.head(player_cutoff)
)
df['rank'] = df['rank'].astype('int')
today = date.today()
date_str = today.strftime('%m.%d.%Y')
x = df.loc[:, ['best', 'worst', 'avg']].copy()
if kmeans:
kmm = KMeans(n_clusters=tiers).fit(x)
labels = kmm.predict(x)
else: #gausianmixture
gmm = GaussianMixture(n_components=tiers, covariance_type=covariance_type, random_state=0).fit(x)
labels = gmm.predict(x)
unique_labels = list(OrderedDict.fromkeys(labels))
rank_dict = dict(zip(unique_labels, range(1,len(unique_labels)+1)))
df['tiers'] = labels
df['tiers'] = df['tiers'].map(rank_dict)
style.use('ggplot')
colors = dict(zip(range(1, tiers+1), palette[:tiers]))
tier_lookup = dict(zip(palette[:tiers], range(1, tiers+1)))
chart_n = (player_cutoff // player_per_chart) + (player_cutoff % player_per_chart > 0)
#filter current team players
if isinstance(player_list, list):
df = df.loc[df['player_name'].isin(player_list)].copy()
for ix, chunk_df in enumerate(np.array_split(df, chart_n)):
fig, ax = plt.subplots();
min_tier = min(chunk_df['tiers'])
max_tier = max(chunk_df['tiers'])
patches = []
color_chunk = [colors[i] for i in range(min_tier, max_tier + 1)]
patches = [mpatches.Patch(color=color, alpha=0.5, label=f'Tier {tier_lookup[color]}') for color in color_chunk]
pos_patches = [Line2D([0], [0], color='gray', label=pos, marker=shape, lw=0, markersize=12) for pos, shape in pos_shape.items()]
for _, row in chunk_df.iterrows():
xmin = row['best']
xmax = row['worst']
ymin, ymax = row['rank'], row['rank']
center = row['avg']
player = row['player_name'] + ', ' +row['tm'] + ' (' + row['pos_map'] + ')'
tier = row['tiers']
plt.scatter(center, ymax, color='gray', zorder=2, s=100, marker=pos_shape[row['pos_map']])
plt.scatter(xmin, ymax, marker= "|", color=colors.get(tier, 'moccasin'), alpha=0.5, zorder=1)
plt.scatter(xmax, ymax, marker= "|", color=colors.get(tier, 'moccasin'), alpha=0.5, zorder=1)
plt.plot((xmin, xmax), (ymin, ymax), color=colors.get(tier, 'moccasin'), alpha=0.5, zorder=1, linewidth=5.0)
plt.annotate(player, xy=(xmax+1, ymax))
#first legend
first_legend = plt.legend(handles=pos_patches, loc='lower left', borderpad=1, fontsize=12)
ax = plt.gca().add_artist(first_legend)
#second legend
plt.legend(handles=patches, borderpad=1, fontsize=12)
if player_list is not None:
league_name = league['name']
plt.title(f'{date_str} Fantasy Football Weekly - {pos} - {league_name} - {ix+1}')
else:
plt.title(f'{date_str} Fantasy Football Weekly - {pos} {ix+1}')
plt.xlabel('Average Expert Overall Rank')
plt.ylabel('Expert Consensus Position Rank')
fig.set_size_inches(x_size, y_size)
plt.gca().invert_yaxis()
#plt.tight_layout()
if save:
if kmeans:
if player_list is not None:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_kmeans__FLEX_{league_name}_{ix+1}.png'))
else:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_kmeans__{pos}_{ix+1}.png'))
else:
if player_list is not None:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_gmm__FLEX_list{league_name}_{ix+1}.png'))
else:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_gmm_{pos}_{ix+1}.png'))
if export:
df.to_csv(path.join(FIGURE_DIR,fr'{date_str}_ecr_tiers.csv'), index=False)
#return plt.show()
if __name__ == "__main__":
#run elbow chart or AIC/BIC chart to estimate optimal number of k for each pos
#revisit week 1 to see if URL changes for each week - if so, refactor viz func and fp df func
sean = config.sean
work = config.work
justin = config.justin
pos_tier_dict_viz = {
'RB' : 8,
'QB' : 6,
'WR' : 5,
'TE' : 5,
'DST' : 6,
'K' : 7
}
tiers.make_clustering_viz(tier_dict=pos_tier_dict_viz, league=sean, pos_n=35, covariance_type='diag', draft=False, save=True)
make_clustering_viz_flex(export=True)
make_clustering_viz_flex(league=sean, player_list=sean_list)
make_clustering_viz_flex(league=work, player_list=work_list)
make_clustering_viz_flex(league=justin, player_list=justin_list)
| 35.823276 | 255 | 0.611599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,831 | 0.340633 |
e2971d2e298fe27edbcd67a69085c01471fc7263 | 2,629 | py | Python | makememe/generator/prompts/types/ineffective_solution.py | OthersideAI/makememe_ai | d5cbf598e09375862930b5d8548b37c8b2371bad | [
"MIT"
] | null | null | null | makememe/generator/prompts/types/ineffective_solution.py | OthersideAI/makememe_ai | d5cbf598e09375862930b5d8548b37c8b2371bad | [
"MIT"
] | null | null | null | makememe/generator/prompts/types/ineffective_solution.py | OthersideAI/makememe_ai | d5cbf598e09375862930b5d8548b37c8b2371bad | [
"MIT"
] | null | null | null | from makememe.generator.prompts.prompt import Prompt
import datetime
from PIL import Image
from makememe.generator.design.image_manager import Image_Manager
class Ineffective_Solution(Prompt):
name = "Ineffective_Solution"
description = "the solution was a poor way of doing it"
def __init__(self):
self.instruction = '''
###
Message: There is a bunch of traffic in town. The government decided to make the roads wider, but that's not the problem
Meme:{"attempted_solution":"more roads", "failure":"traffic"}
###
Message: Some people who brush their hair still get messy hair.
Meme:{"attempted_solution":"brush", "failure":"messy hair"}
###
Message: I go for a walk daily, but then I end up eating a donut. Pretty ineffective
Meme:{"attempted_solution":"walk daily", "failure":"eating a donut"}
###
Message: I drink coffee to be more awake, but then I can't sleep and I am tired the next day
Meme:{"attempted_solution":"drink coffee", "failure":"can't sleep and I am tired the next day"}
###
Message: I try to read a book to spend less time on my phone, but I end up googling concepts on my phone
Meme:{"attempted_solution":"read a book to spend less time on my phone", "failure":"end up googling concepts on my phone"}
###
Message: bralkajsd;
Meme:{"attempted_solution":"bralkajsd;", "failure":"bralkajsd;"}
###
Message: I wish AI could help me make memes
Meme:{"attempted_solution":"AI making memes", "failure":"The memes are beyond my sense of humor"}
###
'''
def create(self, meme_text):
with Image.open(f"makememe/static/meme_pics/{self.name.lower()}.jpg").convert("RGBA") as base:
overlay_image = Image_Manager.add_text(base=base, text=meme_text['attempted_solution'], position=(75, 75), font_size=50, text_color="white", wrapped_width=14)
overlay_image_2 = Image_Manager.add_text(base=base, text=meme_text['failure'], position=(125, 725), font_size=50, text_color="white", wrapped_width=15, rotate_degrees=350)
watermark = Image_Manager.add_text(base=base, text='makememe.ai', position=(20, 1150), font_size=25, text_color="white")
base = Image.alpha_composite(base, watermark)
base = Image.alpha_composite(base, overlay_image_2)
out = Image.alpha_composite(base, overlay_image)
if out.mode in ("RGBA", "P"):
out = out.convert("RGB")
date = datetime.datetime.now()
image_name = f'{date}.jpg'
file_location = f'makememe/static/creations/{image_name}'
out.save(file_location)
return image_name
| 47.8 | 183 | 0.691137 | 2,468 | 0.93876 | 0 | 0 | 0 | 0 | 0 | 0 | 1,396 | 0.531 |
e298277ea2466de2883498bb7a044f52d8a88109 | 665 | py | Python | dodo_commands/extra/dodo_standard_commands/decorators/pause.py | mnieber/dodo-commands | 82330006af2c6739b030ce932ba1ff9078b241ee | [
"MIT"
] | 8 | 2016-12-01T16:45:45.000Z | 2020-05-05T20:56:57.000Z | dodo_commands/extra/dodo_standard_commands/decorators/pause.py | mnieber/dodo-commands | 82330006af2c6739b030ce932ba1ff9078b241ee | [
"MIT"
] | 75 | 2017-01-29T19:25:45.000Z | 2020-01-28T09:40:47.000Z | dodo_commands/extra/dodo_standard_commands/decorators/pause.py | mnieber/dodo-commands | 82330006af2c6739b030ce932ba1ff9078b241ee | [
"MIT"
] | 2 | 2017-06-01T09:55:20.000Z | 2017-06-08T14:45:08.000Z | """Pauses the execution."""
import time
from dodo_commands.framework.decorator_utils import uses_decorator
class Decorator:
def is_used(self, config, command_name, decorator_name):
return uses_decorator(config, command_name, decorator_name)
def add_arguments(self, parser): # override
parser.add_argument(
"--pause-ms", type=int, help="Pause in milliseconds before continuing"
)
def modify_args(self, command_line_args, args_tree_root_node, cwd): # override
if getattr(command_line_args, "pause_ms", 0):
time.sleep(command_line_args.pause_ms / 1000)
return args_tree_root_node, cwd
| 33.25 | 83 | 0.709774 | 554 | 0.833083 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.165414 |
e29976708d7430c0f899314db03ca3391999d114 | 310 | py | Python | 1313decompressRunLength.py | vkaushik189/ltcode_solutions | 035a74ee51d636989aa183e16c6ebb81dfccf74c | [
"MIT"
] | null | null | null | 1313decompressRunLength.py | vkaushik189/ltcode_solutions | 035a74ee51d636989aa183e16c6ebb81dfccf74c | [
"MIT"
] | null | null | null | 1313decompressRunLength.py | vkaushik189/ltcode_solutions | 035a74ee51d636989aa183e16c6ebb81dfccf74c | [
"MIT"
] | null | null | null | class Solution:
def decompressRLElist(self, nums: List[int]) -> List[int]:
de = []
for i in range(0, len(nums), 2):
pair = []
pair.append(nums[i])
pair.append(nums[i + 1])
arr = [nums[i + 1]] * nums[i]
de += arr
return de
| 28.181818 | 62 | 0.448387 | 309 | 0.996774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e299cca2abbc2b500fc05b59b682135e9c4e7a88 | 4,060 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/lib/schemas/user.py | thecoderstudio/cookiecutter-pyramid-api | b3122c0d98be7179bcf726c6527096c0327d7bb7 | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/lib/schemas/user.py | thecoderstudio/cookiecutter-pyramid-api | b3122c0d98be7179bcf726c6527096c0327d7bb7 | [
"MIT"
] | 1 | 2021-12-17T15:10:21.000Z | 2021-12-17T15:10:21.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/lib/schemas/user.py | thecoderstudio/cookiecutter-pyramid-api | b3122c0d98be7179bcf726c6527096c0327d7bb7 | [
"MIT"
] | 1 | 2021-03-01T14:27:10.000Z | 2021-03-01T14:27:10.000Z | from marshmallow import (fields, post_load, Schema, validate, validates,
validates_schema, ValidationError)
from {{cookiecutter.project_slug}}.lib.hash import compare_plaintext_to_hash, hash_plaintext
from {{cookiecutter.project_slug}}.models.user import User, get_user_by_email_address
MIN_PASSWORD_LENGTH = 8
class UserSchema(Schema):
id = fields.UUID(dump_only=True)
email_address = fields.Email(required=True)
password = fields.Str(required=True, load_only=True,
validate=validate.Length(min=MIN_PASSWORD_LENGTH))
verified = fields.Bool(dump_only=True)
@validates('email_address')
def check_if_email_unique(self, value):
if get_user_by_email_address(value):
raise ValidationError(
"A user with this email address already exists."
)
def hash_password(self, data, **kwargs):
data['password_hash'], data['password_salt'] = hash_plaintext(
data.pop('password')
)
return data
class CreateUserSchema(UserSchema):
@post_load
def create_user(self, data, **kwargs):
return User(**self.hash_password(data))
class UpdateUserSchema(UserSchema):
current_password = fields.Str(load_only=True)
verification_token = fields.Str(load_only=True)
def __init__(
self,
user: User = None,
requires_current_password: bool = True,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.context['user'] = user
self.context['requires_current_password'] = requires_current_password
@validates_schema
def validate_current_password_if_required(self, data, **kwargs):
if (not data.get('password') or
not self.context['requires_current_password']):
return
try:
current_password = data['current_password']
except KeyError:
raise ValidationError(
"This field is required when setting a new password",
'current_password'
)
self.validate_current_password(current_password)
def validate_current_password(self, current_password):
context_user = self.context['user']
if not compare_plaintext_to_hash(
current_password,
context_user.password_hash,
context_user.password_salt
):
raise ValidationError(
"Given password is incorrect",
'current_password'
)
@validates('verification_token')
def validate_verification_token(self, token):
context_user = self.context['user']
if context_user.verified:
raise ValidationError("This user is already verified.")
verification_token = context_user.active_verification_token
try:
if compare_plaintext_to_hash(token,
verification_token.token_hash,
verification_token.token_salt):
verification_token.used = True
return
except AttributeError:
# No token, continue to raise
pass
raise ValidationError("The given token is incorrect or expired.")
@post_load
def hash_password(self, data, **kwargs):
try:
return super().hash_password(data)
except KeyError:
# No password, which is fine since it's not required.
return data
@post_load
def set_verified(self, data, **kwargs):
if data.pop('verification_token', None):
data['verified'] = True
return data
class VerifyUserSchema(Schema):
def __init__(self, user: User = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.context['user'] = user
@validates_schema
def check_if_already_verified(self, data, **kwargs):
if self.context['user'].verified:
raise ValidationError("This user is already verified.")
| 32.222222 | 92 | 0.624384 | 3,710 | 0.913793 | 0 | 0 | 2,160 | 0.53202 | 0 | 0 | 570 | 0.140394 |
e299fc88d0fedbfb54b4403ee7bcb78d6d49e345 | 568 | py | Python | selfbot/types/sub_command.py | TibebeJS/tg-selfbot | ad36399597b7277768649d6645d57611a2928259 | [
"MIT"
] | 1 | 2021-03-05T12:03:53.000Z | 2021-03-05T12:03:53.000Z | selfbot/types/sub_command.py | TibebeJS/tg-selfbot | ad36399597b7277768649d6645d57611a2928259 | [
"MIT"
] | null | null | null | selfbot/types/sub_command.py | TibebeJS/tg-selfbot | ad36399597b7277768649d6645d57611a2928259 | [
"MIT"
] | 1 | 2021-01-14T18:03:11.000Z | 2021-01-14T18:03:11.000Z | class SubCommand:
def __init__(self, command, description="", arguments=[], mutually_exclusive_arguments=[]):
self._command = command
self._description = description
self._arguments = arguments
self._mutually_exclusive_arguments = mutually_exclusive_arguments
def getCommand(self):
return self._command
def get_description(self):
return self._description
def getArguments(self):
return self._arguments
def getMutuallyExclusiveArguments(self):
return self._mutually_exclusive_arguments | 40.571429 | 95 | 0.71831 | 568 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.003521 |
e29a8632b4bdbd80579ffbed9fa3602b87d880cb | 1,304 | py | Python | src/anaplan_api/Model.py | jeswils-ap/anaplan_transactional_api | 54ef377df8f885906c911b302d21b8927602cc6c | [
"BSD-2-Clause"
] | null | null | null | src/anaplan_api/Model.py | jeswils-ap/anaplan_transactional_api | 54ef377df8f885906c911b302d21b8927602cc6c | [
"BSD-2-Clause"
] | null | null | null | src/anaplan_api/Model.py | jeswils-ap/anaplan_transactional_api | 54ef377df8f885906c911b302d21b8927602cc6c | [
"BSD-2-Clause"
] | null | null | null | import logging
from typing import List
from .AnaplanRequest import AnaplanRequest
from .User import User
from .ModelDetails import ModelDetails
logger = logging.getLogger(__name__)
class Model(User):
def get_models_url(self) -> AnaplanRequest:
"""Get list of all Anaplan model for the specified user.
:return: Object containing API request details
:rtype: AnaplanRequest
"""
url = ''.join([super().get_url(), super().get_id(), "/models"])
get_header = {
"Content-Type": "application/json"
}
return AnaplanRequest(url=url, header=get_header)
@staticmethod
def parse_models(model_list: dict) -> List[ModelDetails]:
"""Get list of all Anaplan model for the specified user.
:param model_list: JSON list of models accessible to the current user
:type model_list: dict
:raises AttributeError: No models available for specified user.
:return: Details for all models the user can access.
:rtype: List[ModelDetails]
"""
model_details_list = [ModelDetails]
logger.info(f"Parsing models...")
if 'models' in model_list:
models = model_list['models']
logger.info("Finished parsing models.")
for item in models:
model_details_list.append(ModelDetails(item))
return model_details_list
else:
raise AttributeError("Models not found in response.")
| 27.166667 | 71 | 0.736196 | 1,119 | 0.858129 | 0 | 0 | 733 | 0.562117 | 0 | 0 | 583 | 0.447086 |
e29aea0adeb87fbcda39b28f2cebe8dcefd85597 | 883 | py | Python | tests/unit/lms/extensions/feature_flags/views/_predicates_test.py | mattdricker/lms | 40b8a04f95e69258c6c0d7ada543f4b527918ecf | [
"BSD-2-Clause"
] | 38 | 2017-12-30T23:49:53.000Z | 2022-02-15T21:07:49.000Z | tests/unit/lms/extensions/feature_flags/views/_predicates_test.py | mattdricker/lms | 40b8a04f95e69258c6c0d7ada543f4b527918ecf | [
"BSD-2-Clause"
] | 1,733 | 2017-11-09T18:46:05.000Z | 2022-03-31T11:05:50.000Z | tests/unit/lms/extensions/feature_flags/views/_predicates_test.py | mattdricker/lms | 40b8a04f95e69258c6c0d7ada543f4b527918ecf | [
"BSD-2-Clause"
] | 10 | 2018-07-11T17:12:46.000Z | 2022-01-07T20:00:23.000Z | from unittest import mock
from lms.extensions.feature_flags.views._predicates import FeatureFlagViewPredicate
class TestFeatureFlagsViewPredicate:
def test_text(self):
assert (
FeatureFlagViewPredicate("test_feature", mock.sentinel.config).text()
== "feature_flag = test_feature"
)
def test_phash(self):
assert (
FeatureFlagViewPredicate("test_feature", mock.sentinel.config).phash()
== "feature_flag = test_feature"
)
def test_it_delegates_to_request_dot_feature(self, pyramid_request):
view_predicate = FeatureFlagViewPredicate("test_feature", mock.sentinel.config)
matches = view_predicate(mock.sentinel.context, pyramid_request)
pyramid_request.feature.assert_called_once_with("test_feature")
assert matches == pyramid_request.feature.return_value
| 33.961538 | 87 | 0.714609 | 769 | 0.870895 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.129105 |
e29bd2810c7f8c926395ee26eadb354bd458bdc4 | 468 | py | Python | 2015/python/01.py | gcp825/advent_of_code | b4ea17572847e1a9044487041b3e12a0da58c94b | [
"MIT"
] | 1 | 2021-12-29T09:32:08.000Z | 2021-12-29T09:32:08.000Z | 2015/python/01.py | gcp825/advent_of_code | b4ea17572847e1a9044487041b3e12a0da58c94b | [
"MIT"
] | null | null | null | 2015/python/01.py | gcp825/advent_of_code | b4ea17572847e1a9044487041b3e12a0da58c94b | [
"MIT"
] | null | null | null | def read_file(filepath):
with open(filepath,'r') as i:
inst = [int(x) for x in i.read().replace(')','-1,').replace('(','1,').strip('\n').strip(',').split(',')]
return inst
def calculate(inst,floor=0):
for i,f in enumerate(inst):
floor += f
if floor < 0: break
return sum(inst), i+1
def main(filepath):
pt1, pt2 = calculate(read_file(filepath))
return pt1, pt2
print(main('1.txt'))
| 22.285714 | 112 | 0.532051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.074786 |
e29c27f26d7b20a1fcc5692f39ceeba53ab303aa | 4,810 | py | Python | Unit3_StructuredTypes/ps3_hangman.py | myzzdeedee/MITx_6001x | 0843ac666e1d58e52bd09c8ce9144fe9d6eb78c8 | [
"MIT"
] | null | null | null | Unit3_StructuredTypes/ps3_hangman.py | myzzdeedee/MITx_6001x | 0843ac666e1d58e52bd09c8ce9144fe9d6eb78c8 | [
"MIT"
] | null | null | null | Unit3_StructuredTypes/ps3_hangman.py | myzzdeedee/MITx_6001x | 0843ac666e1d58e52bd09c8ce9144fe9d6eb78c8 | [
"MIT"
] | null | null | null | # Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
import string
WORDLIST_FILENAME = "/Users/deedeebanh/Documents/MITx_6.00.1.x/ProblemSet3/words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
# FILL IN YOUR CODE HERE...
isAinB = [item in lettersGuessed for item in secretWord]
return (all(isAinB))
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
# FILL IN YOUR CODE HERE...
store = list('_'*len(secretWord)) #first set up ___ = length of secretWord
for i in range(len(secretWord)):
for j in range(len(lettersGuessed)):
if lettersGuessed[j] == secretWord[i]:
store[i] = lettersGuessed[j] #replace _ with the letter
return (''.join(store))
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
# FILL IN YOUR CODE HERE...
diff = [item for item in (list(string.ascii_lowercase)) if item not in lettersGuessed]
return (''.join(diff))
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
# FILL IN YOUR CODE HERE...
print('Welcome to the game, Hangman!')
print('I am thinking of a word that is ' + str(len(secretWord)) + ' letters long.')
print('-------------')
numOfGuesses = 8
lettersGuessed = list()
while numOfGuesses > 0:
print("You have " + str(numOfGuesses) + " guesses left.")
print("Available letters: " + getAvailableLetters(lettersGuessed))
var = input("Please guess a letter: ")
var = var.lower()
if var in lettersGuessed:
print("Oops! You've already guessed that letter: " + getGuessedWord(secretWord, lettersGuessed))
elif var not in secretWord:
print("Oops! That letter is not in my word: " + getGuessedWord(secretWord, lettersGuessed))
lettersGuessed.append(var)
numOfGuesses -= 1
else:
lettersGuessed.append(var)
print("Good Guess: " + getGuessedWord(secretWord, lettersGuessed))
print("------------")
if (isWordGuessed(secretWord, lettersGuessed) == True):
print("Congratulations, you won!")
return 1
if (numOfGuesses == 0):
print("Sorry, you ran out of guesses. The word was " + secretWord)
return 1
return 0
# When you've completed your hangman function, uncomment these two lines
# and run this file to test! (hint: you might want to pick your own
# secretWord while you're testing)
secretWord = chooseWord(wordlist).lower()
#secretWord = 'c'
hangman(secretWord)
| 33.636364 | 108 | 0.65447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,853 | 0.593139 |
e29cf657d8c556834680fb3f3c80b7dad515e1b9 | 39,909 | py | Python | tesa/database_creation/annotation_task.py | clementjumel/master_thesis | 5a39657a212f794690e7c426f60e10ba70d50da9 | [
"Apache-2.0"
] | 2 | 2020-07-08T19:33:52.000Z | 2020-07-18T16:52:59.000Z | tesa/database_creation/annotation_task.py | clementjumel/master_thesis | 5a39657a212f794690e7c426f60e10ba70d50da9 | [
"Apache-2.0"
] | null | null | null | tesa/database_creation/annotation_task.py | clementjumel/master_thesis | 5a39657a212f794690e7c426f60e10ba70d50da9 | [
"Apache-2.0"
] | null | null | null | from database_creation.nyt_article import Article
from database_creation.utils import Tuple, Wikipedia, Query, Annotation
from numpy import split as np_split
from numpy.random import seed, choice
from time import time
from glob import glob
from collections import defaultdict
from pickle import dump, load, PicklingError
from pandas import DataFrame, read_csv
from unidecode import unidecode
from wikipedia import search, page, WikipediaException, DisambiguationError
from xml.etree.ElementTree import ParseError
from itertools import chain, combinations
from re import findall
class AnnotationTask:
def __init__(self, years, max_tuple_size, short, short_size, random, debug, random_seed, save, silent, corpus_path,
results_path):
"""
Initializes an instance of AnnotationTask, which creates the queries asked to the annotation workers and gathers
their answers.
Args:
years: it, years (int) of the database to analyse.
max_tuple_size: int, maximum size of the entities tuple to compute.
short: bool, if True, limit the dataset to [short_size] initial articles.
short_size: int, number of initial articles to keep in shorten corpus.
random: bool, if short, whether to pick the articles at random or take the first ones.
debug: bool, whether to, for each step, write its effect in a text file.
random_seed: int, the seed to use for the random processes of numpy.
save: bool, saving option.
silent: bool, silence option.
corpus_path: str, path to the NYT annotated corpus.
results_path: str, path to the results folder
"""
self.years = years
self.max_tuple_size = max_tuple_size
self.short = short
self.short_size = short_size
self.random = random if short else None
self.debug = debug
self.save = save
self.silent = silent
self.corpus_path = corpus_path
self.results_path = results_path
self.articles = None
self.entities = None
self.tuples = None
self.wikipedia = None
self.queries = None
self.task = None
self.annotations = None
self.modulo_articles = 500
self.modulo_tuples = 1000
self.modulo_entities = 100
seed(random_seed)
# region Decorators
class Verbose:
""" Decorator for the display of a simple message. """
def __init__(self, message):
""" Initializes the Verbose decorator message. """
self.message = message
def __call__(self, func):
""" Performs the call to the decorated function. """
def f(*args, **kwargs):
""" Decorated function. """
slf = args[0]
t0 = time()
getattr(slf, "print")(self.message)
res = func(*args, **kwargs)
getattr(slf, "print")("Done. Elapsed time: %is.\n" % round(time() - t0))
return res
return f
class Attribute:
""" Decorator for monitoring the length of an attribute. """
def __init__(self, attribute):
""" Initializes the Attribute decorator attribute. """
self.attribute = attribute
def __call__(self, func):
""" Performs the call to the decorated function. """
def f(*args, **kwargs):
""" Decorated function. """
slf = args[0]
attribute = getattr(slf, self.attribute)
length = len(attribute) if attribute is not None else 0
getattr(slf, "print")("Initial length of %s: %i." % (self.attribute, length))
res = func(*args, **kwargs)
attribute = getattr(slf, self.attribute)
length = len(attribute) if attribute is not None else 0
getattr(slf, "print")("Final length of %s: %i." % (self.attribute, length))
return res
return f
# endregion
# region Main methods
@Verbose("Preprocessing the database...")
def preprocess_database(self):
""" Performs the preprocessing of the database. """
self.compute_articles()
self.clean_articles(criterion=Article.criterion_content, to_keep=None)
self.compute_metadata()
self.compute_entities()
self.compute_tuples()
self.filter_no_tuple()
@Verbose("Preprocessing the articles...")
def process_articles(self):
""" Performs the preprocessing of the articles. """
self.compute_corpus_annotations()
self.compute_contexts()
self.filter_no_query()
@Verbose("Processing the wikipedia information...")
def process_wikipedia(self, load, file_name):
"""
Performs the processing of the wikipedia information of the database, or load it.
Args:
load: bool, if True, load an existing file, else, computes it.
file_name: str, name of the wikipedia file to save or load; if None, deal with the standard files name.
"""
if load:
self.load_attr_pkl(attribute_name='wikipedia', file_name=file_name, folder_name='wikipedia')
self.compute_wikipedia(load=load)
self.save_attr_pkl(attribute_name='wikipedia', file_name=file_name, folder_name='wikipedia')
@Verbose("Processing the aggregation queries...")
def process_queries(self, load):
"""
Performs the processing of the annotation queries.
Args:
load: bool, if True, load an existing file.
"""
if load:
self.load_attr_pkl(attribute_name='queries', file_name=None, folder_name='queries')
else:
self.compute_queries()
self.save_attr_pkl(attribute_name='queries', file_name=None, folder_name='queries')
@Verbose("Processing the annotations batches...")
def process_annotation_batches(self, batches, batch_size, exclude_pilot):
"""
Saves a csv file for each batch of annotation task in queries/. If some csv files are already in queries/,
generate different queries than those already in there.
Args:
batches: int, number of batches to create.
batch_size: int, size of the batches.
exclude_pilot: bool, whether or not to take into account the pilot annotations.
"""
existing_ids, existing_batches = self.read_existing_batches(exclude_pilot=exclude_pilot)
self.save_annotation_batches(batches=batches,
batch_size=batch_size,
existing_ids=existing_ids,
existing_batches=existing_batches)
@Verbose("Processing the modeling task...")
def process_task(self, exclude_pilot):
"""
Process the annotations and the corresponding queries.
Args:
exclude_pilot: bool, whether or not to exclude the data from the pilot.
"""
self.compute_annotated_queries(exclude_pilot=exclude_pilot)
self.compute_annotations(exclude_pilot=exclude_pilot)
@Verbose("Combining the wikipedia files...")
def combine_wiki(self, current, in_names, out_name):
"""
Combines current wikipedia information and some other wikipedia files into a single file. Note that the
most up to date information should come from the last file form in_names.
Args:
current: bool, whether to use the current wikipedia information.
in_names: list, names of the file to combine.
out_name: str, name of the file to write in.
"""
out_wikipedia = {'found': dict(), 'not_found': set()}
if current:
self.print("Current wikipedia information: %i found/%i not_found..." % (len(self.wikipedia['found']),
len(self.wikipedia['not_found'])))
for type_ in ['found', 'not_found']:
out_wikipedia[type_].update(self.wikipedia[type_])
self.print("Global file updated: %i found/%i not_found.\n" % (len(out_wikipedia['found']),
len(out_wikipedia['not_found'])))
for in_name in in_names:
in_wikipedia = self.load_obj_pkl(file_name=in_name, folder_name='wikipedia')
self.print("File %s: %i found/%i not_found..." % (in_name, len(in_wikipedia['found']),
len(in_wikipedia['not_found'])))
for type_ in ['found', 'not_found']:
out_wikipedia[type_].update(in_wikipedia[type_])
self.print("Global file updated: %i found/%i not_found.\n" % (len(out_wikipedia['found']),
len(out_wikipedia['not_found'])))
self.save_obj_pkl(obj=out_wikipedia, file_name=out_name, folder_name='wikipedia')
self.wikipedia = out_wikipedia
@Verbose("Solving manually the wikipedia issues...")
def correct_wiki(self, step, out_name):
"""
Run the manual correction of the wikipedia tricky cases.
Args:
step: int, step of the correction to perform, between 1 and 4.
out_name: str, name of the wikipedia file to save; if None, deal with the standard files name.
"""
self.correction(step=step)
self.save_attr_pkl(attribute_name='wikipedia', file_name=out_name, folder_name='wikipedia')
# endregion
# region Methods compute_
@Verbose("Computing the database' article...")
@Attribute('articles')
def compute_articles(self):
""" Computes and initializes the articles in the database. """
patterns = [self.corpus_path + 'data/' + str(year) + '/*/*/*.xml' for year in self.years]
paths = [path for pattern in patterns for path in glob(pattern)]
if self.short:
if not self.random:
paths.sort()
paths = paths[:self.short_size]
else:
paths = choice(a=paths, size=self.short_size, replace=False)
paths.sort()
else:
paths.sort()
articles = {}
for data_path in paths:
id_ = data_path.split('/')[-1].split('.')[0]
year = data_path.split('/')[-4]
content_path = self.corpus_path + 'content_annotated/' + str(year) + 'content_annotated/' + id_ + '.txt.xml'
summary_path = self.corpus_path + 'summary_annotated/' + str(year) + 'summary_annotated/' + id_ + '.txt.xml'
articles[id_] = Article(data_path=data_path, content_path=content_path, summary_path=summary_path)
self.articles = articles
self.write_debug(field='articles', method='articles')
@Verbose("Computing the articles' metadata...")
def compute_metadata(self):
""" Computes the metadata of the articles. """
count, size = 0, len(self.articles)
for id_ in self.articles:
count = self.progression(count=count, modulo=self.modulo_articles, size=size, text='article')
self.articles[id_].compute_metadata()
self.write_debug(field='articles', method='metadata')
@Verbose("Computing the database' entities...")
@Attribute('entities')
def compute_entities(self):
""" Compute the entities of the database. """
self.entities = dict()
count, size = 0, len(self.articles)
for _, article in self.articles.items():
count = self.progression(count=count, modulo=self.modulo_articles, size=size, text='article')
try:
entities = article.get_entities()
except AssertionError:
entities = []
self.print("Several entities have the same name (%s); ignoring them..." %
'; '.join(article.get_vanilla_entities()))
for entity in entities:
if str(entity) in self.entities:
try:
self.entities[str(entity)].update_info(entity)
except AssertionError:
self.print("%s corresponds to both %s and %s, ignoring the later..." %
(str(entity), entity.type_, self.entities[str(entity)].type_))
else:
self.entities[str(entity)] = entity
article.entities = [self.entities[name] for name in [str(entity) for entity in entities]]
self.write_debug(field='articles', method='article_entities')
self.write_debug(field='entities', method='entities')
@Verbose("Computing the entity tuples...")
@Attribute('tuples')
def compute_tuples(self):
""" Compute the Tuples of the database as a sorted list of Tuples (by number of articles). """
def subtuples(s, max_size):
"""
Compute all the possible sorted subtuples of len between 2 and max_size from a set s.
Args:
s: set, original set.
max_size: int, maximal size of the tuples.
Returns:
set, all the possible sorted subtuples.
"""
s = sorted(s)
min_len, max_len = 2, min(len(s), max_size)
return set(chain.from_iterable(combinations(s, r) for r in range(min_len, max_len + 1)))
ids = defaultdict(set)
count, size = 0, len(self.articles)
for id_ in self.articles:
count = self.progression(count=count, modulo=self.modulo_articles, size=size, text='article')
entities = defaultdict(set)
for entity in self.articles[id_].entities:
entities[entity.type_].add(entity.name)
for type_ in entities:
for tuple_ in subtuples(s=entities[type_], max_size=self.max_tuple_size):
ids[tuple_].add(id_)
ranking = sorted(ids, key=lambda k: (len(ids[k]), str(k)), reverse=True)
self.tuples = [Tuple(id_=str(rank + 1),
entities=tuple([self.entities[name] for name in tuple_]),
article_ids=ids[tuple_])
for rank, tuple_ in enumerate(ranking)]
self.write_debug(field='tuples', method='tuples')
@Verbose("Computing the articles' annotations from the corpus...")
def compute_corpus_annotations(self):
""" Computes the corpus annotations of the articles. """
count, size = 0, len(self.articles)
for id_ in self.articles:
count = self.progression(count=count, modulo=self.modulo_articles, size=size, text='article')
try:
self.articles[id_].compute_corpus_annotations()
except ParseError:
raise Exception("Data is not clean, remove data %s and start again." % id_)
self.write_debug(field='articles', method='annotations')
@Verbose("Computing the contexts...")
def compute_contexts(self):
""" Compute the contexts of the articles for each Tuple. """
count, size = 0, len(self.tuples)
for tuple_ in self.tuples:
count = self.progression(count=count, modulo=self.modulo_tuples, size=size, text='tuple')
query_ids = set()
for article_id_ in tuple_.article_ids:
self.articles[article_id_].compute_contexts(tuple_=tuple_)
query_ids.update({tuple_.id_ + '_' + article_id_ + '_' + context_id_
for context_id_ in self.articles[article_id_].contexts[str(tuple_)]})
tuple_.query_ids = query_ids
self.write_debug(field='articles', method='contexts')
@Verbose("Computing the Wikipedia information...")
def compute_wikipedia(self, load):
"""
Compute the wikipedia information about the entities from self.tuples.
Args:
load: bool, if True, load an existing file.
"""
wikipedia = self.wikipedia if load else {'found': dict(), 'not_found': set()}
self.print("Initial entries: %i found/%i not found." % (len(wikipedia['found']), len(wikipedia['not_found'])))
try:
count, size = 0, len(self.entities)
for name, entity in self.entities.items():
count = self.progression(count=count, modulo=self.modulo_entities, size=size, text='entity')
if not load:
wiki = entity.get_wiki()
if wiki.summary is not None:
wikipedia['found'][name] = wiki
else:
wikipedia['not_found'].add(name)
else:
if name in wikipedia['found']:
wiki = wikipedia['found'][name]
elif name in wikipedia['not_found']:
wiki = Wikipedia()
else:
wiki = entity.get_wiki()
if wiki.summary is not None:
wikipedia['found'][name] = wiki
else:
wikipedia['not_found'].add(name)
entity.wiki = wiki
except (KeyboardInterrupt, WikipediaException) as err:
self.print("An error occurred, saving the loaded information and leaving... (%s)" % str(err))
self.print("Final entries: %i found/%i not found." % (len(wikipedia['found']), len(wikipedia['not_found'])))
self.wikipedia = wikipedia
self.write_debug(field='wikipedia', method='wikipedia')
@Verbose("Computing the Queries...")
@Attribute('queries')
def compute_queries(self):
""" Compute the Queries of the database. """
queries = dict()
count, size = 0, len(self.tuples)
for tuple_ in self.tuples:
count = self.progression(count=count, modulo=self.modulo_tuples, size=size, text='tuple')
for article_id_ in sorted(tuple_.article_ids):
article_contexts = self.articles[article_id_].contexts[str(tuple_)]
for context_id_, context in article_contexts.items():
query_id_ = '_'.join([article_id_, tuple_.id_, context_id_])
queries[query_id_] = Query(id_=query_id_,
tuple_=tuple_,
article=self.articles[article_id_],
context=context)
self.queries = queries
self.write_debug(field='queries', method='queries')
@Verbose("Computing the annotated queries...")
@Attribute('queries')
def compute_annotated_queries(self, exclude_pilot):
"""
Compute the queries corresponding to the annotations.
Args:
exclude_pilot: bool, whether or not to exclude the data from the pilot.
"""
queries = dict()
for path in sorted(glob(self.results_path + 'annotations/*/task/*.pkl')):
path = path.split(self.results_path)[1]
version = path.split('/')[1]
if not exclude_pilot or 'pilot' not in version:
folder_name = '/'.join(path.split('/')[:-1])
file_name = path.split('/')[-1].split('.pkl')[0]
queries.update(self.load_obj_pkl(file_name=file_name, folder_name=folder_name))
self.queries = queries
@Verbose("Computing the annotations...")
@Attribute('annotations')
def compute_annotations(self, exclude_pilot):
"""
Compute the annotations of the Mechanical Turks.
Args:
exclude_pilot: bool, whether or not to exclude the data from the pilot.
"""
annotations = defaultdict(list)
for path in sorted(glob(self.results_path + 'annotations/*/results/*.csv')):
path = path.split(self.results_path)[1]
version = path.split('/')[1]
batch = path.split('/')[-1].replace('_complete.csv', '')
if not exclude_pilot or 'pilot' not in version:
df = read_csv(self.results_path + path)
self.print("%s loaded from %s" % (batch, path))
for _, row in df.iterrows():
id_ = row.get('Input.id_')
annotations[id_].append(Annotation(id_=id_,
version=version,
batch=batch,
row=row,
silent=self.silent))
self.annotations = annotations
# endregion
# region Cleaning methods
@Verbose("Cleaning the database's articles...")
@Attribute('articles')
def clean_articles(self, criterion, to_keep):
"""
Removes from the database the articles which meet the Article's criterion or whose ids are not in to_keep.
Args:
criterion: function, criterion that an article must meet to be removed.
to_keep: set, ids of the articles that must be kept.
"""
to_del = set()
if criterion is not None and to_keep is None:
self.print("Criterion: %s" % [line for line in criterion.__doc__.splitlines() if line][0][8:])
for id_ in self.articles:
if criterion(self.articles[id_]):
to_del.add(id_)
elif criterion is None and to_keep is not None:
self.print("Criterion: keep only the designated articles.")
for id_ in self.articles:
if id_ not in to_keep:
to_del.add(id_)
else:
raise Exception("Either a criterion or to_keep must be specified.")
for id_ in to_del:
del self.articles[id_]
@Verbose("Cleaning the database's tuples...")
@Attribute('tuples')
def clean_tuples(self, to_keep):
"""
Removes from the database the tuples whose names are not in to_keep.
Args:
to_keep: set, names of the tuples that must be kept.
"""
self.print("Criterion: keep only the designated tuples.")
tuples = self.tuples
self.tuples = []
for tuple_ in tuples:
if str(tuple_) in to_keep:
self.tuples.append(tuple_)
@Verbose("Cleaning the database's entities...")
@Attribute('entities')
def clean_entities(self, to_keep):
"""
Removes from the database the entities whose names are not in to_keep.
Args:
to_keep: set, names of the entities that must be kept.
"""
self.print("Criterion: keep only the designated entities.")
to_del = set()
for name in self.entities:
if name not in to_keep:
to_del.add(name)
for name in to_del:
del self.entities[name]
@Verbose("Filtering the articles and entities that correspond to no tuple...")
def filter_no_tuple(self):
""" Filter out the articles and entities that correspond to no tuple. """
to_keep_articles, to_keep_entities = set(), set()
for tuple_ in self.tuples:
if len(tuple_.article_ids) >= 1:
to_keep_articles.update(tuple_.article_ids)
to_keep_entities.update([str(entity) for entity in tuple_.entities])
self.clean_articles(criterion=None, to_keep=to_keep_articles)
self.clean_entities(to_keep=to_keep_entities)
@Verbose("Filtering the articles, tuples and entities that correspond to no query...")
def filter_no_query(self):
""" Filter out the articles that correspond to no query. """
to_keep_articles, to_keep_tuples, to_keep_entities = set(), set(), set()
for tuple_ in self.tuples:
if len(tuple_.query_ids) >= 1:
to_keep_tuples.add(str(tuple_))
to_keep_articles.update(tuple_.article_ids)
to_keep_entities.update([str(entity) for entity in tuple_.entities])
self.clean_tuples(to_keep=to_keep_tuples)
self.clean_articles(criterion=None, to_keep=to_keep_articles)
self.clean_entities(to_keep=to_keep_entities)
# endregion
# region File methods
def file_name_suffix(self):
"""
Returns a standardized ending for a file name.
Returns:
str, ending of the name of the file (after the basic name of the file).
"""
return '_short' if self.short else ''
def save_attr_pkl(self, attribute_name, file_name, folder_name):
"""
Save an attribute designated by its name using pickle.
Args:
attribute_name: str, name of the attribute to save.
file_name: str, name of the file; if None, save an attribute with the attribute_name.
folder_name: str, name of the folder to save in.
"""
file_name = file_name or attribute_name + self.file_name_suffix()
obj = getattr(self, attribute_name)
self.save_obj_pkl(obj=obj, file_name=file_name, folder_name=folder_name)
def save_obj_pkl(self, obj, file_name, folder_name):
"""
Save an object using pickle.
Args:
obj: unknown type, object to save.
file_name: str, name of the file.
folder_name: str, name of the folder to save in.
"""
file_name = self.results_path + folder_name + "/" + file_name + ".pkl"
if self.save:
try:
with open(file_name, "wb") as file:
dump(obj=obj, file=file, protocol=-1)
self.print("Object saved at %s." % file_name)
except PicklingError as err:
self.print("Could not save (PicklingError), moving on: %s" % str(err))
else:
self.print("Not saving %s (not in save mode)." % file_name)
def load_attr_pkl(self, attribute_name, file_name, folder_name):
"""
Load an attribute designated by its name using pickle.
Args:
attribute_name: str, name of the attribute to load.
file_name: str, name of the file to load; if None, load the file with the corresponding attribute_name.
folder_name: str, name of the folder to load from.
"""
file_name = file_name or attribute_name + self.file_name_suffix()
obj = self.load_obj_pkl(file_name=file_name, folder_name=folder_name)
setattr(self, attribute_name, obj)
def load_obj_pkl(self, file_name, folder_name):
"""
Load an object using pickle.
Args:
file_name: str, name of the file to load.
folder_name: str, name of the folder to load from.
"""
file_name = self.results_path + folder_name + "/" + file_name + ".pkl"
with open(file_name, 'rb') as file:
obj = load(file)
self.print("Object loaded from %s." % file_name)
return obj
@Verbose("Reading the existing annotation batches...")
def read_existing_batches(self, exclude_pilot):
"""
Read in the folder queries and annotations the query ids and the batch indexes of the existing annotation
batches (in .csv files).
Args:
exclude_pilot: bool, whether or not to take into account the pilot annotations.
Returns:
existing_ids: set, ids in the existing annotation batches.
existing_batches: set, indexes of the existing annotation batches.
"""
ids, idxs = set(), set()
for path in glob(self.results_path + "queries/*.csv"):
batch = path.split("/")[-1].split(".")[0]
if not exclude_pilot or "pilot" not in batch:
df = read_csv(path)
df_ids = set([row.get('id_') for _, row in df.iterrows()])
ids.update(df_ids)
if batch.split("_")[0] == "batch":
idx = int(batch.split("_")[-1])
idxs.add(idx)
self.print("Reading %s from results/queries/ folder (%i queries)." % (batch, len(df_ids)))
for path in glob(self.results_path + "annotations/*/task/*.csv"):
version = path.split("/")[-3]
batch = path.split("/")[-1].split(".")[0]
if not exclude_pilot or 'pilot' not in version:
df = read_csv(path)
df_ids = set([row.get('id_') for _, row in df.iterrows()])
ids.update(df_ids)
if batch.split("_")[0] == "batch":
idx = int(batch.split("_")[-1])
idxs.add(idx)
self.print("Reading existing batch %s from %s (%i queries)." % (batch, version, len(df_ids)))
return ids, idxs
@Verbose("Saving new annotation batches...")
def save_annotation_batches(self, batches, batch_size, existing_ids, existing_batches):
"""
Save annotation batches in .csv files. Don't save queries that have been already saved.
Args:
batches: int, number of batches to create.
batch_size: int, size of the batches.
existing_ids: set, ids in the existing annotation batches.
existing_batches: set, indexes of the existing annotation batches.
"""
all_ids = set(self.queries.keys())
ids = all_ids.difference(existing_ids)
self.print("Removing %i existing queries from the %i total queries; %i remaining queries." %
(len(existing_ids), len(all_ids), len(ids)))
batches_ids = choice(a=sorted(ids), size=batches * batch_size, replace=False)
batches_ids = np_split(batches_ids, batches)
starting_idx = max(existing_batches) + 1 if existing_batches else 0
for batch in range(batches):
batch_ids = batches_ids[batch]
data = [self.queries[id_].to_html() for id_ in batch_ids]
df = DataFrame.from_records(data=data)
batch_idx = starting_idx + batch
batch_idx = "0" + str(batch_idx) if 0 <= batch_idx < 10 else str(batch_idx)
file_name = self.results_path + "queries/batch_" + batch_idx + ".csv"
if self.save:
df.to_csv(file_name, index=False)
self.print("batch_%s saved at %s." % (batch_idx, file_name))
else:
self.print("Not saving %s (not in save mode)." % file_name)
def write_debug(self, field, method):
"""
Write the debugging of a method into a text file.
Args:
field: str, field of the database we want to debug.
method: str, name of the method to debug.
"""
if self.debug:
if field == "articles":
lines = [[id_, getattr(article, "debug_" + method)()] for id_, article in self.articles.items()]
elif field == "entities":
lines = [[name, entity.debug_entities()] for name, entity in self.entities.items()]
elif field == "tuples":
lines = [[str(tuple_), tuple_.debug_tuples()] for tuple_ in self.tuples]
elif field == "wikipedia":
lines = [[name, wikipedia.debug_wikipedia()] for name, wikipedia in self.wikipedia['found'].items()] \
+ [[name, ": not found"] for name in self.wikipedia['not_found']]
elif field == "queries":
lines = [[id_, query.debug_queries()] for id_, query in self.queries.items()]
else:
raise Exception("Wrong field/method specified: %s/%s." % (field, method))
lines = [line[0] + line[1] + '\n' for line in lines if line[1]]
if lines:
file_name = self.results_path + "debug/" + method + ".txt"
if self.save:
with open(file_name, "w") as f:
f.writelines(lines)
self.print("Debugging Written in %s..." % file_name)
# endregion
# region Other methods
def print(self, *args):
""" Prints only if not in silent mode. """
if not self.silent:
print(*args)
def progression(self, count, modulo, size, text):
"""
Prints progression's updates and update the count.
Args:
count: int, current count.
modulo: int, how often to print updates.
size: int, size of the element to count.
text: str, what to print at the beginning of the updates.
Returns:
int, incremented count of articles.
"""
count += 1
if count % modulo == 0:
self.print(" %s %i/%i..." % (text, count, size))
return count
def correction(self, step):
"""
Performs the manual correction of the wikipedia information.
Args:
step: int, step of the correction to perform, between 1 and 4.
"""
to_correct = set([name for name, wiki in self.wikipedia['found'].items() if not wiki.exact])
corrected = set()
if not to_correct:
self.print("All the %i entities are exact, no correction to be made." % len(self.wikipedia['found']))
return
if self.silent:
raise Exception("Remove silent mode to correct the wikipedia information.")
self.print("Entities to correct: %i/%i." % (len(to_correct), len(self.wikipedia['found'])))
if step is None:
raise Exception("There are entities to correct, precise a step.")
try:
if step == 1:
count, size = 0, len(to_correct)
for name in sorted(to_correct):
count = self.progression(count=count, modulo=self.modulo_entities, size=size,
text="to correct entity")
preprocessed_name_1 = unidecode(name).lower().replace(".", "")
preprocessed_name_2 = " ".join([word for word in preprocessed_name_1.split() if len(word) > 1])
title = self.wikipedia['found'][name].title
before_parenthesis = findall(r'(.*?)\s*\(', title)
before_parenthesis = before_parenthesis[0] if before_parenthesis and before_parenthesis[0] \
else title
preprocessed_title_1 = unidecode(before_parenthesis).lower().replace(".", "")
preprocessed_title_2 = " ".join([word for word in preprocessed_title_1.split() if len(word) > 1])
if preprocessed_name_1 == preprocessed_title_1 or preprocessed_name_2 == preprocessed_title_2:
self.wikipedia['found'][name].exact = True
corrected.add(name)
to_correct, corrected = to_correct.difference(corrected), set()
self.print("First step over, remaining: %i/%i." % (len(to_correct), len(self.wikipedia['found'])))
elif step == 2:
count, size = 0, len(to_correct)
for name in sorted(to_correct):
count = self.progression(count=count, modulo=self.modulo_entities, size=size,
text="to correct entity")
while True:
answer = input(name + "/" + self.wikipedia['found'][name].title + ": is this good? [y/n/o/d]")
if answer in ["y", "n", "o", "d"]:
break
else:
self.print('Answer should be "y" (yes), "n" (no), "o" (open) or "d" (discard), try again.')
if answer == "o":
while True:
answer = input(self.wikipedia['found'][name].get_info() + ": is this good? [y/n/d]")
if answer in ["y", "n", "d"]:
break
else:
self.print('Answer should be "y" (yes), "n" (no) or "d" (discard), try again.')
if answer == "y":
self.wikipedia['found'][name].exact = True
corrected.add(name)
elif answer == "d":
del self.wikipedia['found'][name]
self.wikipedia['not_found'].add(name)
corrected.add(name)
to_correct, corrected = to_correct.difference(corrected), set()
self.print("Second step over, remaining: %i/%i." % (len(to_correct), len(self.wikipedia['found'])))
elif step == 3:
count, size = 0, len(to_correct)
for name in sorted(to_correct):
count = self.progression(count=count, modulo=self.modulo_entities, size=size,
text='to correct entity')
wiki_search = search(name)
self.print("Wikipedia search for %s:" % name)
for cmpt, title in enumerate(wiki_search):
self.print("%s: %s" % (str(cmpt + 1), + title))
while True:
try:
answer = int(input("Which number is the good one? (0 for giving up this example)"))
if answer in range(len(wiki_search) + 1):
break
else:
self.print("Answer should be between 0 and the length of the search, try again.")
except ValueError:
self.print("Answer should be an int, try again.")
if answer == 0:
del self.wikipedia['found'][name]
self.wikipedia['not_found'].add(name)
corrected.add(name)
self.print("Considered not found.")
else:
try:
p = page(wiki_search[answer - 1])
self.wikipedia['found'][name] = Wikipedia(p)
except DisambiguationError:
self.print("Search is still ambiguous, moving on to the next one...")
to_correct, corrected = to_correct.difference(corrected), set()
self.print("Third step over, remaining: %i/%i." % (len(to_correct), len(self.wikipedia['found'])))
elif step == 4:
count, size = 0, len(to_correct)
for name in sorted(to_correct):
count = self.progression(count=count, modulo=self.modulo_entities, size=size,
text='to correct entity')
del self.wikipedia['found'][name]
self.wikipedia['not_found'].add(name)
corrected.add(name)
to_correct, corrected = to_correct.difference(corrected), set()
self.print("Fifth step over, remaining: %i/%i." % (len(to_correct), len(self.wikipedia['found'])))
else:
raise Exception("Wrong step specified.")
except KeyboardInterrupt:
self.print("Keyboard interruption, saving the results...")
# endregion
| 38.008571 | 120 | 0.564033 | 39,327 | 0.985417 | 0 | 0 | 23,741 | 0.594878 | 0 | 0 | 12,876 | 0.322634 |
e29e8433c1fea0696a62aa72a90be6729781a080 | 3,623 | py | Python | main.py | jeffkub/forecast-display | 74cccd459ee195115f4ae79a9e65ede1eed08145 | [
"MIT"
] | 2 | 2018-09-24T01:39:55.000Z | 2019-02-14T10:01:45.000Z | main.py | jeffkub/forecast-display | 74cccd459ee195115f4ae79a9e65ede1eed08145 | [
"MIT"
] | null | null | null | main.py | jeffkub/forecast-display | 74cccd459ee195115f4ae79a9e65ede1eed08145 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import argparse
from datetime import datetime
import json
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
import os
import sys
from weather import Weather
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
DISP_SIZE = (640, 384)
WHITE = 0xffffffff
BLACK = 0xff000000
RED = 0xffff0000
def get_config(argv):
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', '-c', default=BASE_PATH + '/config.json')
parser.add_argument('--outfile', '-o', default=None)
parser.add_argument('--skip_weather', action='store_true')
args = vars(parser.parse_args(argv))
# Parse from config file
with open(args['config_file']) as file:
json_data = json.load(file)
args.update(json_data)
return args
def main():
# Initialize QT in offscreen mode (no window needed)
app = QApplication(sys.argv + '-platform offscreen'.split())
# Disable font anti-aliasing
font = app.font()
font.setStyleStrategy(QFont.NoAntialias)
app.setFont(font)
# Get configuration from command line and config file
config = get_config(app.arguments()[1:])
# Load fonts
QFontDatabase.addApplicationFont(BASE_PATH + '/fonts/freefont/FreeSans.ttf')
QFontDatabase.addApplicationFont(BASE_PATH + '/fonts/freefont/FreeSansBold.ttf')
QFontDatabase.addApplicationFont(BASE_PATH + '/fonts/weather-icons/weathericons-regular-webfont.ttf')
# Load weather icon map file
with open(BASE_PATH + '/icon-mapping.json') as file:
icon_map = json.load(file)
# Get weather forecast and conditions
weather = Weather(api_key=config['api_key'], city=config['city'], state=config['state'])
conditions = weather.get_conditions()
forecast = weather.get_forecast()
# Get current time
now = datetime.now()
# Load display layout
display = uic.loadUi(BASE_PATH + '/layout.ui')
# Update the display with weather data
if not config['skip_weather']:
display.high.setText('{}\N{DEGREE SIGN}'.format(forecast[0]['high']['fahrenheit']))
display.low.setText('{}\N{DEGREE SIGN}'.format(forecast[0]['low']['fahrenheit']))
display.temp.setText('{:.0f}\N{DEGREE SIGN}'.format(conditions['temp_f']))
display.feels_like.setText('Feels like {:.0f}\N{DEGREE SIGN}'.format(float(conditions['feelslike_f'])))
display.cond.setText(icon_map[conditions['icon']])
display.percip.setText('{}%'.format(forecast[0]['pop']))
display.weekday.setText(now.strftime('%A'))
display.date.setText(now.strftime('%B %d'))
for i in range(1, 5):
day = uic.loadUi(BASE_PATH + '/day.ui')
day.date.setText('{} {}'.format(forecast[i]['date']['weekday_short'].upper(), forecast[i]['date']['day']))
day.cond.setText(icon_map[forecast[i]['icon']])
day.high.setText('{}\N{DEGREE SIGN}'.format(forecast[i]['high']['fahrenheit']))
day.low.setText('{}\N{DEGREE SIGN}'.format(forecast[i]['low']['fahrenheit']))
day.percip.setText('{}%'.format(forecast[i]['pop']))
display.forecast.addWidget(day)
# Render to image
img = QImage(display.size(), QImage.Format_RGB888)
display.render(QPainter(img))
if config['outfile']:
# Save image to file
img.save(config['outfile'])
else:
# Send to e-paper display
from epd7in5 import EPD
epd = EPD()
epd.init()
epd.display_qimage(img, BLACK, RED)
epd.sleep()
if __name__ == '__main__':
main()
| 33.546296 | 118 | 0.656362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,061 | 0.292851 |
e29f7d09efd784db1167faf711962043e5a84615 | 677 | py | Python | commit_grtrans.py | HerculesJack/grtrans | bc005307d81dac1bdb9520e776e7627126dd690a | [
"MIT"
] | 25 | 2016-02-11T01:52:14.000Z | 2021-06-16T02:15:42.000Z | commit_grtrans.py | RAnantua/grtrans | a0353a8516335412b27fe4866eabafcfc0fe498f | [
"MIT"
] | 6 | 2016-11-10T15:25:20.000Z | 2018-01-18T15:15:57.000Z | commit_grtrans.py | RAnantua/grtrans | a0353a8516335412b27fe4866eabafcfc0fe498f | [
"MIT"
] | 6 | 2016-02-11T14:13:01.000Z | 2022-03-10T01:56:02.000Z | import os
from run_grtrans_test_problems import run_test_problems
from unit_tests import run_unit_tests
passed, max_passed, failed = run_test_problems(save=0)
nfailed, ufailed = run_unit_tests()
if passed < max_passed or nfailed > 0: print 'ERROR -- grtrans tests failed!'
else:
# os.chdir('..')
# os.system('cvs -d :ext:jdexter@grad16.phys.washington.edu:/phys/users/jdexter/cvs commit grtrans')
os.system('git commit -a')
#try:
# with open('tests_failed.p','rb') as f: print 'ERROR -- grtrans tests failed!'
#except IOError as e:
# os.chdir('..')
# os.system('cvs -d :ext:jdexter@grad16.phys.washington.edu:/phys/users/jdexter/cvs commit grtrans')
| 35.631579 | 103 | 0.716396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.589365 |
e29fa79eca53178795185c5b18107d7e3a5c1a43 | 349 | py | Python | luftdaten/exceptions.py | lrubaszewski/python-luftdaten | 69c29f4ee0ac6e8e8749eee44102d1b513cf5cd5 | [
"MIT"
] | 5 | 2018-02-08T12:39:19.000Z | 2019-08-16T11:23:23.000Z | luftdaten/exceptions.py | lrubaszewski/python-luftdaten | 69c29f4ee0ac6e8e8749eee44102d1b513cf5cd5 | [
"MIT"
] | 6 | 2017-12-26T16:58:20.000Z | 2020-03-31T06:54:18.000Z | luftdaten/exceptions.py | lrubaszewski/python-luftdaten | 69c29f4ee0ac6e8e8749eee44102d1b513cf5cd5 | [
"MIT"
] | 9 | 2017-12-26T15:29:13.000Z | 2020-03-30T18:12:08.000Z | """Exceptions for the Luftdaten Wrapper."""
class LuftdatenError(Exception):
"""General LuftdatenError exception occurred."""
pass
class LuftdatenConnectionError(LuftdatenError):
"""When a connection error is encountered."""
pass
class LuftdatenNoDataAvailable(LuftdatenError):
"""When no data is available."""
pass
| 17.45 | 52 | 0.716332 | 296 | 0.848138 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.481375 |
e29fe3df8dc5516581790a80c10fc304c7ec7af2 | 215 | py | Python | training/191210/123.py | SOOIN-KIM/lab-python | 4b85dc11c76e2d4f89be0d01864f9f61f3c6e2cc | [
"MIT"
] | null | null | null | training/191210/123.py | SOOIN-KIM/lab-python | 4b85dc11c76e2d4f89be0d01864f9f61f3c6e2cc | [
"MIT"
] | null | null | null | training/191210/123.py | SOOIN-KIM/lab-python | 4b85dc11c76e2d4f89be0d01864f9f61f3c6e2cc | [
"MIT"
] | null | null | null | from sklearn.metrics import r2_score
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
r2=r2_score(y_true, y_pred)
print(r2)
y_true = [5,6,7,8]
y_pred = [-100,524,-1,3]
r2=r2_score(y_true, y_pred)
print(r2)
r2_ | 15.357143 | 36 | 0.651163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e2a06902ee9718981ff25abb371fb5e304cb3890 | 178 | py | Python | tests/sedes/test_bitvector_instantiation.py | booleanfunction/py-ssz | 9d2e34c2257d7b69da824f8a8361dbc71b03b257 | [
"MIT"
] | 22 | 2018-12-01T21:16:06.000Z | 2021-11-04T14:12:31.000Z | tests/sedes/test_bitvector_instantiation.py | booleanfunction/py-ssz | 9d2e34c2257d7b69da824f8a8361dbc71b03b257 | [
"MIT"
] | 75 | 2018-11-27T10:09:19.000Z | 2021-12-09T03:52:27.000Z | tests/sedes/test_bitvector_instantiation.py | booleanfunction/py-ssz | 9d2e34c2257d7b69da824f8a8361dbc71b03b257 | [
"MIT"
] | 19 | 2018-12-01T11:36:01.000Z | 2022-03-28T18:53:59.000Z | import pytest
from ssz.sedes import Bitvector
def test_bitvector_instantiation_bound():
with pytest.raises(ValueError):
bit_count = 0
Bitvector(bit_count)
| 17.8 | 41 | 0.730337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e2a1706b79dfe59b8505a0173c3194887a5e11ef | 613 | py | Python | decrypt.py | angelodpadron/asymmetric-encryption-exercise | f204c3cc293db170e79be41a1125c329b07e9c3b | [
"Unlicense"
] | null | null | null | decrypt.py | angelodpadron/asymmetric-encryption-exercise | f204c3cc293db170e79be41a1125c329b07e9c3b | [
"Unlicense"
] | null | null | null | decrypt.py | angelodpadron/asymmetric-encryption-exercise | f204c3cc293db170e79be41a1125c329b07e9c3b | [
"Unlicense"
] | null | null | null | # Seguridad Informatica
# ejercicio de encriptacion
# Angelo Padron (42487)
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
with open('key.bin', 'rb') as k:
key = k.read()
with open('vector.bin', 'rb') as v:
init_vector = v.read()
cipher = AES.new(key, AES.MODE_CBC, init_vector)
with open('encrypted_file', 'rb') as encrypted:
e_file = encrypted.read()
# el metodo strip es utilizado para remover el padding agregado durante la encriptacion
with open('decrypted_file.txt', 'wb') as decrypted:
decrypted.write(cipher.decrypt(e_file).strip())
| 26.652174 | 88 | 0.698206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.386623 |
e2a24d1c616e8da3e37ec79b0f5a7095f04657c1 | 1,597 | py | Python | course/src/service/student_service.py | Cuiqingyao/course-exercise | 55c0ac3ba35c99d10f66d18a4401a3fa6b68db5a | [
"Apache-2.0"
] | null | null | null | course/src/service/student_service.py | Cuiqingyao/course-exercise | 55c0ac3ba35c99d10f66d18a4401a3fa6b68db5a | [
"Apache-2.0"
] | null | null | null | course/src/service/student_service.py | Cuiqingyao/course-exercise | 55c0ac3ba35c99d10f66d18a4401a3fa6b68db5a | [
"Apache-2.0"
] | null | null | null | """
@Time: 2018/5/11 10:57
@Author: qingyaocui
"""
from course.src.service import admin_service
from course.src.models import Student
login_stu = None
def show_choice():
show = '''
1.菜单
2.登录
3.注册
4.查看成绩
Q|q.退出系统
'''
print(show)
def login():
s_name = input('请输入姓名:')
stu = admin_service.find_student_by_name(s_name)
if stu:
global login_stu
login_stu = stu
print("登陆成功!")
print("当前用户: %s" % s_name)
print(login_stu)
else:
print("%s 不存在" % s_name)
def register():
school_name = input('请输入学校名称:')
sc = admin_service.find_school_by_name(school_name)
if sc:
class_name = input('请输入班级名称:')
my_class = admin_service.find_class_by_name(class_name)
if my_class:
s_name = input('请输入姓名:')
s_age = input('请输入年龄')
new_student = Student(s_name, s_age, sc.nid, my_class.nid)
new_student.save()
print("%s 学生注册成功!" % s_name)
else:
print("班级不存在,注册失败!")
else:
print("学校不存在,注册失败!")
def show_score():
pass
def quit_system():
print('Bye!')
exit(0)
def main():
choice_dict = {
'1':show_choice,
'2':login,
'3':register,
'4':show_score,
'Q':quit_system,
'q':quit_system
}
show_choice()
while True:
user_input = input("请输入选项:")
if user_input not in choice_dict:
print('请输入正确的选项~')
continue
option = choice_dict[user_input]
option()
| 21.876712 | 70 | 0.54665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.270496 |
e2a8050776c6316b74d8dea582a00176c3ee0231 | 2,328 | py | Python | src/pickleData.py | pdedumast/CondylesClassification | 59094d46f7c554750ea8bc68ef28c131b32ba0e9 | [
"Apache-2.0"
] | null | null | null | src/pickleData.py | pdedumast/CondylesClassification | 59094d46f7c554750ea8bc68ef28c131b32ba0e9 | [
"Apache-2.0"
] | null | null | null | src/pickleData.py | pdedumast/CondylesClassification | 59094d46f7c554750ea8bc68ef28c131b32ba0e9 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
from six.moves import cPickle as pickle
import neuralnetwork as nn
import inputdata
# ----------------------------------------------------------------------------- #
arser = argparse.ArgumentParser()
parser.add_argument('-valid_train', action='store', dest='valid_train', help='Valid/Train dataset',
default = "/Users/prisgdd/Documents/Projects/CNN/DataPriscille/surfSPHARM/5Groups-Feat/")
parser.add_argument('-tests', action='store', dest='tests', help='Test dataset',
default="/Users/prisgdd/Documents/Projects/CNN/DataPriscille/surfSPHARM/5Groups-Feat/")
args = parser.parse_args()
valid_train= args.valid_train
tests = args.tests
train_size = 256
valid_size = 32
test_size = 209
train_folders = inputdata.get_folder_classes_list(valid_train) # Folder class liste
test_folders = inputdata.get_folder_classes_list(tests)
train_datasets = inputdata.maybe_pickle(train_folders, 9)
test_datasets = inputdata.maybe_pickle(test_folders, 5)
valid_dataset, valid_labels, train_dataset, train_labels = inputdata.merge_datasets(train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = inputdata.merge_all_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
train_dataset, train_labels = inputdata.randomize(train_dataset, train_labels)
test_dataset, test_labels = inputdata.randomize(test_dataset, test_labels)
valid_dataset, valid_labels = inputdata.randomize(valid_dataset, valid_labels)
# ----------------------------------------------------------------------------- #
# Save the data for later reuse
pickle_file = 'condyles.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
| 34.235294 | 123 | 0.698024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 652 | 0.280069 |
e2a950f68a022f3db7e828101242b896367f18d0 | 184 | py | Python | tests/test_07_left_panel.py | skostya64/Selenium_tasks | d7a01b0149aee543fa6278fd532a43afe571cf70 | [
"Apache-2.0"
] | null | null | null | tests/test_07_left_panel.py | skostya64/Selenium_tasks | d7a01b0149aee543fa6278fd532a43afe571cf70 | [
"Apache-2.0"
] | null | null | null | tests/test_07_left_panel.py | skostya64/Selenium_tasks | d7a01b0149aee543fa6278fd532a43afe571cf70 | [
"Apache-2.0"
] | null | null | null |
def test_check_left_panel(app):
app.login(username="admin", password="admin")
app.main_page.get_menu_items_list()
app.main_page.check_all_admin_panel_items()
| 9.684211 | 49 | 0.711957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.076087 |