hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a2a33ed323999913f0d3da3c440981176e3bcd7
| 159
|
py
|
Python
|
Dashboard with Django/updates/forms.py
|
reddyprasade/Data-Analysis-with-Python-
|
2440e23486856eea5556c8262467b3a618032bc2
|
[
"MIT"
] | 1
|
2021-06-29T23:15:05.000Z
|
2021-06-29T23:15:05.000Z
|
Dashboard with Django/updates/forms.py
|
reddyprasade/Data-Analysis-with-Python-
|
2440e23486856eea5556c8262467b3a618032bc2
|
[
"MIT"
] | null | null | null |
Dashboard with Django/updates/forms.py
|
reddyprasade/Data-Analysis-with-Python-
|
2440e23486856eea5556c8262467b3a618032bc2
|
[
"MIT"
] | 1
|
2021-12-20T10:04:53.000Z
|
2021-12-20T10:04:53.000Z
|
from django.forms import ModelForm
from updates.models import Post
class Postform(ModelForm):
class Meta:
model = Post
fields = ['title','body','date']
| 17.666667
| 34
| 0.72956
| 89
| 0.559748
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.119497
|
5a2a74b028d05464645069f119b32c24c0d83261
| 1,965
|
py
|
Python
|
main.py
|
neuroidss/eeglstm
|
693d39347afb3c7fa8272e813ce1f841b892a212
|
[
"MIT"
] | 21
|
2018-11-17T11:46:46.000Z
|
2021-12-15T01:54:31.000Z
|
main.py
|
neuroidss/eeglstm
|
693d39347afb3c7fa8272e813ce1f841b892a212
|
[
"MIT"
] | 1
|
2018-05-15T14:04:49.000Z
|
2018-05-15T14:04:49.000Z
|
main.py
|
neuroidss/eeglstm
|
693d39347afb3c7fa8272e813ce1f841b892a212
|
[
"MIT"
] | 4
|
2018-12-21T03:16:20.000Z
|
2020-05-02T09:37:39.000Z
|
#%% [markdown]
#
# We will load EEG data from the lab and attemp to build a classifier that distinguishes between learners and non-learners
#%%
import mne
import numpy as np
import os.path
import glob
import re
import pandas as pd
# try to enable cuda support to speed up filtering, make sure the MNE_USE_CUDA environment variable is set to true
mne.cuda.init_cuda()
DATA_DIR = "../../EEGdata/Fish_5Block"
event_dict = {
"cat":{
"1": 20,
"2": 21
}
}
data_path = os.path.join(DATA_DIR, "Tail/Learner/126670_EXP_FISH.bdf")
test_data = mne.io.read_raw_edf(data_path, preload=True)
# find the related behavioral data
participant_number = re.search(r"^(\d+)_EXP_FISH", os.path.basename(data_path))[1]
behav_path = [filename for filename in glob.glob(os.path.join(DATA_DIR, "EXP_fish2_Tomy/Cat_data/*.csv")) if participant_number in filename][0]
behav_df = pd.read_csv(behav_path)
learning_curve = behav_df["Resultat"].rolling(20).mean() # our in house definition of current learning performance
learning_time = (learning_curve >= 0.8).idxmax() # using a 80% correct categorization threshold
#%% [markdown]
# We now need to find the event times and give the same code to all stimulus presentation events since we don't want to differentiate among category 1 or 2
#%%
events = mne.find_events(test_data)
events = np.array(events)
events[events[:,2]==event_dict["cat"]["2"],2] = 20
events = events.tolist()
#%% [markdown]
# visualize data
#%%
#test_data.plot()
#%%
test_data.set_eeg_reference("average", projection=False)
test_data.filter(0.1, 50.0, n_jobs="cuda")
stim_epochs = mne.Epochs(test_data, events=events, event_id={"stimulus presentation":20}, tmin=-0.2, tmax=0.8, reject={"eeg":200-6})
# do basic cleaning by bandpass filtering, we will need to load the data
stim_epochs.load_data()
stim_epochs.resample(256)
#%% building the pytorch model
pass
| 31.190476
| 156
| 0.707379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 919
| 0.467684
|
5a2a96f1206233db3ee9862dbb3187153e48e3d9
| 241
|
py
|
Python
|
ex066.py
|
dsjocimar/python
|
5716f46a9fa7f64aa78a39df9c262c5392571340
|
[
"MIT"
] | null | null | null |
ex066.py
|
dsjocimar/python
|
5716f46a9fa7f64aa78a39df9c262c5392571340
|
[
"MIT"
] | null | null | null |
ex066.py
|
dsjocimar/python
|
5716f46a9fa7f64aa78a39df9c262c5392571340
|
[
"MIT"
] | null | null | null |
# Exercício 066
soma = total = 0
while True:
n = int(input('Digite um valor [999 para parar]: '))
if n == 999:
break
soma += n
total += 1
print(f'O total de números digitados foi {total} e a soma deles vale {soma}')
| 24.1
| 78
| 0.59751
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.506173
|
5a2aef76ad354c4dafd74c644c7cdf56a923d14d
| 749
|
py
|
Python
|
test/test_api_data_utils.py
|
onap/optf-osdf
|
2b9e7f4fca3d510a201283a8561f6ff3424f5fd6
|
[
"Apache-2.0"
] | 3
|
2019-04-15T13:33:57.000Z
|
2019-10-21T17:19:19.000Z
|
test/test_api_data_utils.py
|
onap/optf-osdf
|
2b9e7f4fca3d510a201283a8561f6ff3424f5fd6
|
[
"Apache-2.0"
] | null | null | null |
test/test_api_data_utils.py
|
onap/optf-osdf
|
2b9e7f4fca3d510a201283a8561f6ff3424f5fd6
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
from osdf.utils import api_data_utils
from collections import defaultdict
BASE_DIR = os.path.dirname(__file__)
with open(os.path.join(BASE_DIR, "placement-tests/request.json")) as json_data:
req_json = json.load(json_data)
class TestVersioninfo():
#
# Tests for api_data_utils.py
#
def test_retrieve_version_info(self):
request_id = 'test12345'
test_dict = {'placementVersioningEnabled': False, 'placementMajorVersion': '1', 'placementPatchVersion': '0', 'placementMinorVersion': '0'}
test_verison_info_dict = defaultdict(dict ,test_dict )
#verison_info_dict = api_data_utils.retrieve_version_info(req_json, request_id)
#assert verison_info_dict == test_verison_info_dict
| 34.045455
| 147
| 0.750334
| 495
| 0.660881
| 0
| 0
| 0
| 0
| 0
| 0
| 311
| 0.41522
|
5a2b70864ff65608d3a0ed95eba0ce2781b1326a
| 10,396
|
py
|
Python
|
Model_SIR/no.py
|
AP-2020-1S/covid-19-guaya-kilera
|
f307d17b6540e881a93596ecd4b7857f5d7d9a18
|
[
"CC-BY-3.0",
"MIT"
] | null | null | null |
Model_SIR/no.py
|
AP-2020-1S/covid-19-guaya-kilera
|
f307d17b6540e881a93596ecd4b7857f5d7d9a18
|
[
"CC-BY-3.0",
"MIT"
] | null | null | null |
Model_SIR/no.py
|
AP-2020-1S/covid-19-guaya-kilera
|
f307d17b6540e881a93596ecd4b7857f5d7d9a18
|
[
"CC-BY-3.0",
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import integrate, optimize
from scipy.signal import savgol_filter
from dane import population as popu
dias_restar = 4 # Los últimos días de información que no se tienen en cuenta
dias_pred = 31 # Días sobre los cuáles se hará la predicción a corto plazo
media_movil = 4 # Días que se promediaran en las series para mitigar errores en los datos
Ciudades_dicc = {'Bog': 'Bogotá D.C.', 'Mde': 'Medellín', 'Cal': 'Cali', 'Brr': 'Barranquilla',
'Ctg': 'Cartagena de Indias'}
Ciudades = ['Bog','Mde','Cal', 'Brr', 'Ctg']
Covid_Col = pd.read_csv("https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD", sep=',',
encoding='utf-8', low_memory=False)
def limpieza_datos():
# Covid_Col=pd.read_csv("C:\Users\danie\DS\vagrant4docker-master\laboratorios\covid-19-guaya-kilera\Casos_positivos_de_COVID-19_en_Colombia.csv", sep=',', encoding='utf-8', low_memory=False)
Covid_Col.drop(['ID de caso', 'Código DIVIPOLA', 'Departamento o Distrito ', 'País de procedencia', 'Tipo',
'Codigo departamento',
'Codigo pais', 'Tipo recuperación', 'Pertenencia etnica', 'Nombre grupo etnico', 'atención'],
axis=1, inplace=True)
Covid_Col['FIS'] = Covid_Col['FIS'].replace('Asintomático', np.nan)
Covid_Col['FIS'] = pd.to_datetime(Covid_Col['FIS'].str[:10])
Covid_Col['fecha reporte web'] = pd.to_datetime(Covid_Col['fecha reporte web'].str[:10])
Covid_Col['Fecha de notificación'] = pd.to_datetime(Covid_Col['Fecha de notificación'].str[:10])
Covid_Col['Fecha de muerte'] = pd.to_datetime(Covid_Col['Fecha de muerte'].str[:10])
Covid_Col['Fecha diagnostico'] = pd.to_datetime(Covid_Col['Fecha diagnostico'].str[:10])
Covid_Col['Fecha recuperado'] = pd.to_datetime(Covid_Col['Fecha recuperado'].str[:10])
# Covid_Col[(Covid_Col['Fecha diagnostico']<Covid_Col['Fecha de notificación']) & Covid_Col['FIS'].isnull()]
Covid_Col['Fecha contagio'] = Covid_Col['FIS']
Covid_Col.loc[Covid_Col['Fecha contagio'].isnull(), 'Fecha contagio'] = Covid_Col['Fecha de notificación']
Covid_Col.drop(['Fecha de notificación', 'FIS', 'Fecha diagnostico', 'fecha reporte web'], axis=1, inplace=True)
Covid_Col['Cantidad de personas'] = 1
Fecha_Inicio = Covid_Col['Fecha contagio'][0]
Fecha_Fin = max(Covid_Col['Fecha contagio']) - pd.to_timedelta(dias_restar, unit='d')
Fecha_Fin_pred = Fecha_Fin + pd.to_timedelta(dias_pred - 1, unit='d')
globals()['Fechas_pred_i'] = pd.date_range(start=Fecha_Inicio, end=Fecha_Fin_pred)
Fechas_evaluar_i = pd.date_range(start=Fecha_Inicio, end=Fecha_Fin)
Fechas_evaluar = pd.DataFrame(index=Fechas_evaluar_i)
for ciudad in Ciudades:
globals()["Covid_" + str(ciudad)] = Covid_Col[Covid_Col['Ciudad de ubicación'] == Ciudades_dicc[ciudad]]
globals()["nuevos_" + str(ciudad)] = globals()["Covid_" + str(ciudad)].groupby('Fecha contagio').sum()
globals()["nuevos_" + str(ciudad)].drop(['Edad'], axis=1, inplace=True)
globals()["nuevos_" + str(ciudad)] = pd.merge(Fechas_evaluar, globals()["nuevos_" + str(ciudad)], \
how='left', left_index=True, right_index=True)
globals()["nuevos_" + str(ciudad)] = globals()["nuevos_" + str(ciudad)].replace(np.nan, 0)
globals()["confirmados_" + str(ciudad)] = globals()["nuevos_" + str(ciudad)].cumsum()
globals()["nuevos_" + str(ciudad)].rename(columns={'Cantidad de personas': "Casos_nuevos_"}, inplace=True)
globals()["confirmados_" + str(ciudad)].rename(columns={'Cantidad de personas': "Casos_confirmados_"},
inplace=True)
globals()["recuperados_" + str(ciudad)] = globals()["Covid_" + str(ciudad)].groupby('Fecha recuperado').sum()
globals()["recuperados_" + str(ciudad)].drop(['Edad'], axis=1, inplace=True)
globals()["recuperados_" + str(ciudad)] = pd.merge(Fechas_evaluar, globals()["recuperados_" + str(ciudad)], \
how='left', left_index=True, right_index=True)
globals()["recuperados_" + str(ciudad)] = globals()["recuperados_" + str(ciudad)].replace(np.nan, 0)
# globals()["recuperados_" + str(ciudad)]=globals()["recuperados_" + str(ciudad)].cumsum()
globals()["recuperados_" + str(ciudad)].rename(columns={'Cantidad de personas': "Casos_recuperados_"},
inplace=True)
globals()["muertes_" + str(ciudad)] = globals()["Covid_" + str(ciudad)].groupby('Fecha de muerte').sum()
globals()["muertes_" + str(ciudad)].drop(['Edad'], axis=1, inplace=True)
globals()["muertes_" + str(ciudad)] = pd.merge(Fechas_evaluar, globals()["muertes_" + str(ciudad)], how='left', \
left_index=True, right_index=True)
globals()["muertes_" + str(ciudad)] = globals()["muertes_" + str(ciudad)].replace(np.nan, 0)
# globals()["muertes_" + str(ciudad)]=globals()["muertes_" + str(ciudad)].cumsum()
globals()["muertes_" + str(ciudad)].rename(columns={'Cantidad de personas': "muertes_"}, inplace=True)
globals()["activos_" + str(ciudad)] = pd.concat([globals()["confirmados_" + str(ciudad)], \
globals()["recuperados_" + str(ciudad)],
globals()["muertes_" + str(ciudad)],
globals()["nuevos_" + str(ciudad)]], axis=1)
globals()["activos_" + str(ciudad)]['Casos_activos_'] = globals()["activos_" + str(ciudad)][
"Casos_confirmados_"] - \
globals()["activos_" + str(ciudad)][
"Casos_recuperados_"].cumsum() - \
globals()["activos_" + str(ciudad)]["muertes_"].cumsum()
globals()["Casos_" + str(ciudad)] = globals()["activos_" + str(ciudad)].copy()
globals()["activos_" + str(ciudad)].drop(
["Casos_confirmados_", "Casos_recuperados_", "muertes_", "Casos_nuevos_"], axis=1, inplace=True)
globals()["Casos_" + str(ciudad)]["Total_recuperados_"] = globals()["Casos_" + str(ciudad)][
"Casos_recuperados_"].cumsum()
globals()["Casos_" + str(ciudad)]["Total_muertes_"] = globals()["Casos_" + str(ciudad)]["muertes_"].cumsum()
#%%
limpieza_datos()
#%%
def casos():
for ciudad in Ciudades:
globals()['N'+str(ciudad)] = popu(ciudad)
globals()['real_'+str(ciudad)] = [i for i in globals()["confirmados_" + str(ciudad)]['Casos_confirmados_']]
globals()['poly_pred_'+str(ciudad)] = savgol_filter(globals()['real_'+str(ciudad)], 51,3) # window size 51, polynomial order 1
globals()['df_pred_'+str(ciudad)] = pd.DataFrame(globals()['poly_pred_'+str(ciudad)])
globals()['df_real_'+str(ciudad)] = pd.DataFrame(globals()['real_'+str(ciudad)]) #Casos confirmados por día desde el caso 0
# return N,df_poly,df_vec_real,poly,vec_real_140,ciudad
# plt.figure(figsize=(12,6))
# plt.plot(globals()['poly_pred_'+str(ciudad)])
# plt.plot(globals()['real_'+str(ciudad)])
# plt.legend(["Predicción","Real"], loc='upper left')
# plt.title("Infecciones por COVID-19 desde el primer caso"+" "+ str(Ciudades_dicc.get(ciudad)), size=15)
# plt.xlabel("Days", size=13)
# plt.ylabel("Infecciones", size=13)
# plt.ylim(0, max(globals()['real_'+str(ciudad)])+1000)
# plt.show()
N = globals()['N'+str(ciudad)]
depart_df = pd.DataFrame()
depart_df['ConfirmedCases'] = globals()['real_'+str(ciudad)]
depart_df = depart_df[10:]
depart_df['day_count'] = list(range(1,len(depart_df)+1))
ydata = [i for i in depart_df.ConfirmedCases]
xdata = depart_df.day_count
ydata = np.array(ydata, dtype=float)
xdata = np.array(xdata, dtype=float)
inf0 = ydata[0]
sus0 = N - inf0
rec0 = 0.0
def sir_model(y, x, beta, gamma):
sus = -beta * y[0] * y[1] / N
rec = gamma * y[1]
inf = -(sus + rec)
return sus, inf, rec
def fit_odeint(x, beta, gamma):
return integrate.odeint(sir_model, (sus0, inf0, rec0), x, args=(beta, gamma))[:,1]
if ciudad == 'Bog':
popt = np.array([0.2783922953043075, 0.2165019796859231])
else:
popt, pcov = optimize.curve_fit(fit_odeint, xdata, ydata, maxfev=5000)
fitted = fit_odeint(xdata, *popt)
plt.plot(xdata, ydata, 'o')
plt.plot(xdata, fitted)
plt.title("Modelo SIR"+" "+ str(Ciudades_dicc.get(ciudad)), size=15)
plt.ylabel("Population infected")
plt.xlabel("Days")
plt.show()
print("Optimal parameters: beta =", popt[0], " and gamma = ", popt[1])
#%%
casos()
#%%
# t = np.linspace(0,400,400)
# import plotly.offline as py
#
# for ciudad in Ciudades:
# py.iplot([{
# 'x': t,
# 'y': globals()['real_' + str(ciudad)]
# }], filename='cufflinks/multiple-lines-on-same-chart')
#
# max(globals()['real_' + str(ciudad)])
#%%
valores = [(popt[0],popt[1])]
def modelo(beta,gamma):
# Initial values
I0, R0 = ydata[0], 0
###
S0 = N - I0 - R0
def deriv(y,t,N,beta, gamma):
S,I,R = y
dSdt = -beta * S * I /N
dIdt = beta * S * I /N - gamma * I
dRdt = gamma * I
return dSdt, dIdt, dRdt
#Vector de condiciones iniciales
y0 = S0, I0, R0
#Solucion Equation System
ret = integrate.odeint(deriv, y0, t, args=(N, beta, gamma))
S, I, R =ret.T
return I
import cufflinks as cf
import plotly.offline as py
py.iplot([{
'x':t,
'y': modelo(*valor),
'name': str(valor),
} for valor in valores], filename = 'cufflinks/multiple-lines-on-same-chart')
# plt.figure(figsize=(12, 8))
# #plt.plot(modelo(0.42715777117416, 0.36645292847392247)[0])
# plt.plot(modelo(0.42715777117416, 0.36645292847392247)[1])
# # plt.plot(modelo(0.42715777117416, 0.36645292847392247)[2])
# plt.ylabel('Población')
# plt.legend(['Susceptible', 'Infectados', 'Recuperados'])
# plt.xlabel('Días')
# plt.show()
| 47.907834
| 194
| 0.60379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,940
| 0.378083
|
5a2c7e2ea60e80d086779df6d65e7f9d20374ff7
| 733
|
py
|
Python
|
backend/cw_backend/views/admin_courses.py
|
veronikks/pyladies-courseware
|
e1151a704159141e0b1cb649c17cfdd5ca5f689b
|
[
"MIT"
] | null | null | null |
backend/cw_backend/views/admin_courses.py
|
veronikks/pyladies-courseware
|
e1151a704159141e0b1cb649c17cfdd5ca5f689b
|
[
"MIT"
] | null | null | null |
backend/cw_backend/views/admin_courses.py
|
veronikks/pyladies-courseware
|
e1151a704159141e0b1cb649c17cfdd5ca5f689b
|
[
"MIT"
] | null | null | null |
import aiohttp
from aiohttp import web
from aiohttp_session import get_session
import asyncio
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
routes = web.RouteTableDef()
@routes.get('/api/admin/course/{course_id}/reload_course')
async def reload_course(req):
session = await get_session(req)
model = req.app['model']
if not session.get('user'):
raise web.HTTPForbidden()
user = await model.users.get_by_id(session['user']['id'])
if not user or not user.is_admin:
raise web.HTTPForbidden()
course = req.app['courses'].get().get_by_id(req.match_info['course_id'])
course.load_course()
return web.json_response({'course': course.export(sessions=True)})
| 28.192308
| 76
| 0.721692
| 0
| 0
| 0
| 0
| 527
| 0.718963
| 468
| 0.638472
| 96
| 0.130969
|
5a2e4a10cc2ee782907da20e988df75437125628
| 751
|
py
|
Python
|
duplicate_csv.py
|
AronFreyr/de1-project
|
9e95346db9a6955ee017d59c73c83251d529d8ff
|
[
"Apache-2.0"
] | null | null | null |
duplicate_csv.py
|
AronFreyr/de1-project
|
9e95346db9a6955ee017d59c73c83251d529d8ff
|
[
"Apache-2.0"
] | null | null | null |
duplicate_csv.py
|
AronFreyr/de1-project
|
9e95346db9a6955ee017d59c73c83251d529d8ff
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import os
write_to_csv_file = 'million_song_subset.csv'
csv_file_read = open(write_to_csv_file,'r')
csv_file_write = open(write_to_csv_file,'a')
while True:
next_line = csv_file_read.readline()
if not next_line:
break
csv_file_size = os.path.getsize(write_to_csv_file)
print("file size: {}".format(str(csv_file_size/1048576)))
# if the csv file larger than or euqal to 5GB exist for loop
if csv_file_size >= 5368709120:
break
if next_line.startswith("song_id"):
continue
csv_file_write.write(next_line)
print("appended: {}".format(next_line))
csv_file_read.close()
csv_file_write.close()
# In[ ]:
| 17.465116
| 64
| 0.660453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 181
| 0.241012
|
5a2e5a469bcfb11fd51f01901cb6f4cfecb26b08
| 4,444
|
py
|
Python
|
src/main/python/graphing-scripts/utils.py
|
DistributedSystemsGroup/cluster-scheduler-simulator
|
9733dc644736dd0f8c2e1baff09efeb680d6a4d8
|
[
"BSD-3-Clause"
] | 2
|
2018-06-28T04:31:55.000Z
|
2019-06-24T02:18:24.000Z
|
src/main/python/graphing-scripts/utils.py
|
DistributedSystemsGroup/cluster-scheduler-simulator
|
9733dc644736dd0f8c2e1baff09efeb680d6a4d8
|
[
"BSD-3-Clause"
] | null | null | null |
src/main/python/graphing-scripts/utils.py
|
DistributedSystemsGroup/cluster-scheduler-simulator
|
9733dc644736dd0f8c2e1baff09efeb680d6a4d8
|
[
"BSD-3-Clause"
] | 3
|
2017-06-22T11:32:41.000Z
|
2019-10-28T01:22:26.000Z
|
# Copyright (c) 2013, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution. Neither the name of the University of California, Berkeley
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission. THIS
# SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import os
from matplotlib import use, rc
use('Agg')
import matplotlib.pyplot as plt
def mkdir_p(path):
path = path.replace(" ", "_")
dir_path = os.path.dirname(path)
try:
os.makedirs(dir_path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dir_path):
pass
else:
raise
return path
# plot saving utility function
def writeout(filename_base, formats=['pdf']):
mkdir_p(os.path.dirname(filename_base))
for fmt in formats:
plt.savefig("%s.%s" % (filename_base, fmt), format=fmt, bbox_inches='tight')
# plt.savefig("%s.%s" % (filename_base, fmt), format=fmt)
def set_leg_fontsize(size):
rc('legend', fontsize=size)
def set_paper_rcs():
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica'],
'serif': ['Helvetica'], 'size': 22})
# rc('text', usetex=True)
# rc('legend', fontsize=7)
# rc('figure', figsize=(3.33, 2.22))
# # rc('figure.subplot', left=0.10, top=0.90, bottom=0.12, right=0.95)
# rc('axes', linewidth=0.5)
rc('lines', linewidth=4)
# rc('figure', figsize=[20, 6])
def set_rcs():
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica'],
'serif': ['Times'], 'size': 12})
rc('text', usetex=True)
rc('legend', fontsize=7)
rc('figure', figsize=(6, 4))
rc('figure.subplot', left=0.10, top=0.90, bottom=0.12, right=0.95)
rc('axes', linewidth=0.5)
rc('lines', linewidth=0.5, color='y')
def append_or_create(d, i, e):
if i not in d:
d[i] = [e]
else:
d[i].append(e)
# Append e to the array at position (i,k).
# d - a dictionary of dictionaries of arrays, essentially a 2d dictionary.
# i, k - essentially a 2 element tuple to use as the key into this 2d dict.
# e - the value to add to the array indexed by key (i,k).
def append_or_create_2d(d, i, k, e):
if i not in d:
d[i] = {k: [e]}
elif k not in d[i]:
d[i][k] = [e]
else:
d[i][k].append(e)
# Append e to the array at position (i,k).
# d - a dictionary of dictionaries of arrays, essentially a 2d dictionary.
# i, k - essentially a 2 element tuple to use as the key into this 2d dict.
# e - the value to add to the array indexed by key (i,k).
def append_or_create_3d(d, i, k, e, v):
if i not in d:
d[i] = {k: {e: [v]}}
elif k not in d[i]:
d[i][k] = {e: [v]}
elif e not in d[i][k]:
d[i][k][e] = [v]
else:
d[i][k][e].append(v)
def cell_to_anon(cell):
# if cell == 'A':
# return 'A'
# elif cell == 'B':
# return 'B'
# elif cell == 'C':
# return 'C'
# elif cell == 'Eurecom':
# return 'Eurecom'
# elif cell == 'example':
# return 'example'
# else:
# return 'SYNTH'
return cell
| 33.666667
| 84
| 0.645815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,815
| 0.633438
|
5a2f26092de22be2a78e6f158531b00a44283d31
| 4,351
|
py
|
Python
|
ror/CopelandVoter.py
|
jakub-tomczak/ror
|
cf9ab38a2d66f4816a1289b9726911960059fce7
|
[
"MIT"
] | null | null | null |
ror/CopelandVoter.py
|
jakub-tomczak/ror
|
cf9ab38a2d66f4816a1289b9726911960059fce7
|
[
"MIT"
] | null | null | null |
ror/CopelandVoter.py
|
jakub-tomczak/ror
|
cf9ab38a2d66f4816a1289b9726911960059fce7
|
[
"MIT"
] | null | null | null |
from typing import List, Tuple
import numpy as np
import pandas as pd
import os
import logging
class CopelandVoter():
def __init__(self) -> None:
self.__voting_matrix: np.ndarray = None
self.__voting_sum: List[Tuple[str, float]] = []
@property
def voting_matrix(self) -> np.ndarray:
return self.__voting_matrix
@property
def voting_sum(self) -> List[Tuple[str, float]]:
return self.__voting_sum
def save_voting_data(self, directory: str) -> List[str]:
if self.__voting_matrix is None or self.__voting_sum is None:
logging.warn('Copeland Voter was not used yet, skipping saving voting data')
return []
indices = [alternative_name for alternative_name, _ in self.voting_sum]
voting_matrix_file = os.path.join(directory, 'voting_matrix.csv')
logging.info(f'Saved voting matrix from Copeland voting to "{voting_matrix_file}"')
matrix = pd.DataFrame(data=self.voting_matrix, index=indices, columns=indices)
matrix.to_csv(voting_matrix_file, sep=';')
voting_sum_file = os.path.join(directory, 'voting_sum.csv')
data = [value for _, value in self.voting_sum]
headers = ['voting sum']
data = pd.DataFrame(
data=data,
index=indices,
columns=headers)
data.to_csv(voting_sum_file, sep=';')
logging.info(f'Saved voting sum from Copeland voting to "{voting_sum_file}"')
return [
voting_matrix_file,
voting_sum_file
]
def vote(self, data: pd.DataFrame, columns_with_ranks: List[str], eps: float) -> np.array:
numpy_alternatives: np.ndarray = np.array(list(data.index))
number_of_alternatives = len(numpy_alternatives)
votes = np.zeros(shape=(number_of_alternatives, number_of_alternatives))
# reset results
self.__voting_sum = []
for column_name in columns_with_ranks:
for row_idx, row_alternative_name in enumerate(numpy_alternatives):
# run only over columns that index is greater than row index - less calculations
for col_idx, column_alternative_name in zip(range(row_idx+1,number_of_alternatives), numpy_alternatives[row_idx+1:]):
row_alternative_value = data.loc[row_alternative_name, column_name]
column_alternative_value = data.loc[column_alternative_name, column_name]
# if in this rank alternative from row is preferred than the alternative from col
# then row alternative's value is increased by one (one vote)
# if alternative from column is preferred by alternative from row
# then alternative from column gets one point.
# Otherwise (alternatives' values are equal, with eps precision)
# both alternatives get 0.5
if row_alternative_value + eps < column_alternative_value:
logging.debug(f'Alternative in row {row_alternative_name} has greater value than alternative in column {column_alternative_name}')
votes[row_idx, col_idx] += 1
elif row_alternative_value > column_alternative_value + eps:
logging.debug(f'Alternative in row {row_alternative_name} has lower value than alternative in column {column_alternative_name}')
votes[col_idx, row_idx] += 1
else:
logging.debug(f'Alternative in row {row_alternative_name} has same value as alternative in column {column_alternative_name}')
votes[row_idx, col_idx] += 0.5
votes[col_idx, row_idx] += 0.5
self.__voting_matrix = votes
# aggregate votes - calculate
per_alternative_votes_mean = np.zeros(shape=(number_of_alternatives))
for alternative_idx in range(len(numpy_alternatives)):
per_alternative_votes_mean[alternative_idx] = np.sum(votes[alternative_idx, :]) / (len(columns_with_ranks) * (number_of_alternatives-1))
for alternative, mean_votes in zip(numpy_alternatives, per_alternative_votes_mean):
self.__voting_sum.append((alternative, mean_votes))
return per_alternative_votes_mean
| 54.3875
| 154
| 0.652264
| 4,255
| 0.977936
| 0
| 0
| 183
| 0.042059
| 0
| 0
| 1,053
| 0.242013
|
5a2fba5afd104e89bb7c06d80b25ac575e16cde2
| 2,528
|
py
|
Python
|
app/auth/forms/__init__.py
|
jg-725/IS219-FlaskAppProject
|
316aa298eda1bcda766ed085bb6f26ca7da7dfee
|
[
"BSD-3-Clause"
] | null | null | null |
app/auth/forms/__init__.py
|
jg-725/IS219-FlaskAppProject
|
316aa298eda1bcda766ed085bb6f26ca7da7dfee
|
[
"BSD-3-Clause"
] | null | null | null |
app/auth/forms/__init__.py
|
jg-725/IS219-FlaskAppProject
|
316aa298eda1bcda766ed085bb6f26ca7da7dfee
|
[
"BSD-3-Clause"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import validators
from wtforms.fields import *
class login_form(FlaskForm):
email = EmailField('Email Address', [
validators.DataRequired(),
])
password = PasswordField('Password', [
validators.DataRequired(),
validators.length(min=6, max=35)
])
submit = SubmitField()
class register_form(FlaskForm):
email = EmailField('Email Address', [
validators.DataRequired(),
], description="You need to signup with an email")
password = PasswordField('Create Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match'),
], description="Create a password ")
confirm = PasswordField('Repeat Password', description="Please retype your password to confirm it is correct")
submit = SubmitField()
class create_user_form(FlaskForm):
email = EmailField('Email Address', [
validators.DataRequired(),
], description="You need to signup with an email")
password = PasswordField('Create Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match'),
], description="Create a password ")
confirm = PasswordField('Repeat Password', description="Please retype your password to confirm it is correct")
is_admin = BooleanField('Admin', render_kw={'value':'1'})
submit = SubmitField()
class profile_form(FlaskForm):
about = TextAreaField('About', [validators.length(min=6, max=300)],
description="Please add information about yourself")
submit = SubmitField()
class user_edit_form(FlaskForm):
about = TextAreaField('About', [validators.length(min=6, max=300)],
description="Please add information about yourself")
is_admin = BooleanField('Admin', render_kw={'value':'1'})
submit = SubmitField()
class security_form(FlaskForm):
email = EmailField('Email Address', [
validators.DataRequired(),
], description="You can change your email address")
password = PasswordField('Create A New Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match'),
], description="Create a password ")
confirm = PasswordField('Re-Enter New Password', description="Please retype your password to confirm it is correct")
submit = SubmitField()
class csv_upload(FlaskForm):
file = FileField()
submit = SubmitField()
| 32.410256
| 120
| 0.679589
| 2,419
| 0.956883
| 0
| 0
| 0
| 0
| 0
| 0
| 728
| 0.287975
|
5a31ca41c47a23fa18c352e7e70fee2a9750f1a1
| 11,220
|
py
|
Python
|
tern/analyze/default/dockerfile/lock.py
|
mzachar/tern
|
ac9dea4c907f27c9a3b7d85d79806c8fdab1d7e7
|
[
"BSD-2-Clause"
] | 2
|
2020-05-21T00:00:36.000Z
|
2020-12-28T20:43:25.000Z
|
tern/analyze/default/dockerfile/lock.py
|
mzachar/tern
|
ac9dea4c907f27c9a3b7d85d79806c8fdab1d7e7
|
[
"BSD-2-Clause"
] | null | null | null |
tern/analyze/default/dockerfile/lock.py
|
mzachar/tern
|
ac9dea4c907f27c9a3b7d85d79806c8fdab1d7e7
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
Docker specific functions - used when trying to retrieve packages when
given a Dockerfile
"""
import logging
import os
import re
import sys
from tern.classes.docker_image import DockerImage
from tern.classes.notice import Notice
from tern.utils import constants
from tern.utils import general
from tern.report import errors
from tern.report import formats
from tern.analyze.default import filter as fltr
from tern.analyze.default.command_lib import command_lib
from tern.analyze.default.dockerfile import parse
from tern.utils.general import check_image_string
# dockerfile
dockerfile_global = ''
# dockerfile commands
docker_commands = []
# global logger
logger = logging.getLogger(constants.logger_name)
def load_docker_commands(dfobj):
'''Given a dockerfile object get a persistent list of docker commands'''
if not os.path.isfile(dfobj.filepath):
raise IOError('{} does not exist'.format(dfobj.filepath))
global docker_commands
docker_commands = dfobj.structure
global dockerfile_global
dockerfile_global = dfobj.filepath
def get_dockerfile_base():
'''Get the base image object from the dockerfile base instructions
1. get the instructions around FROM
2. get the base image and tag
3. Make notes based on what the image and tag rules are
4. Return an image object and the base instructions string
NOTE: Potential ARG values in the Dockerfile object have already been
expanded at this point. However, Dockerfile rules say that if no
--build-arg is passed during docker build and ARG has no default, the
build will fail. We assume for now that we will not be passing build
arguments in which case if there is no default ARG, we will raise an
exception indicating that since the build arguments are determined by
the user we will not be able to determine what the user wanted'''
try:
# Get the base image tag.
# NOTE: ARG values have already been expanded.
base_image_string, from_line = get_base_image_tag(docker_commands)
# check for scratch
if base_image_string == 'scratch':
# there is no base image to pull
raise ValueError("Cannot pull 'scratch' base image.")
# there should be some image object here
base_image = DockerImage(base_image_string)
base_image.origins.add_notice_origin(from_line)
base_image.name = base_image_string.split(':')[0]
# check if there is a tag
if not check_image_string(base_image_string):
message_string = errors.dockerfile_no_tag.format(
dockerfile_line=from_line)
base_image.origins.add_notice_to_origins(
docker_commands, Notice(message_string, 'warning'))
base_image.tag = 'latest'
else:
base_image.tag = base_image_string.split(':')[1]
# check if the tag is 'latest'
if base_image.tag == 'latest':
message_string = errors.dockerfile_using_latest.format(
dockerfile_line=from_line)
base_image.origins.add_notice_to_origins(
docker_commands, Notice(message_string, 'warning'))
return base_image, from_line
except ValueError as e:
logger.fatal("%s", errors.cannot_parse_base_image.format(
dockerfile=dockerfile_global, error_msg=e))
sys.exit(1)
def get_base_image_tag(dockerfile_lines):
'''Get the instructions around FROM, return the base image string
and the line containing FROM command'''
base_image_string = ''
from_line = ''
for i, cmd_dict in enumerate(dockerfile_lines):
if cmd_dict['instruction'] == 'FROM':
# Account for "as" keyword in FROM line
base_image_string = re.split(" as", cmd_dict['value'],
flags=re.IGNORECASE)[0]
from_line = 'FROM' + base_image_string
# Check that potential ARG values has default
if i != 0 and dockerfile_lines[i-1]['instruction'] == 'ARG':
if len(dockerfile_lines[i-1]['value'].split('=')) == 1:
raise ValueError('No ARG default value to pass to '
'FROM command in Dockerfile.')
break
return base_image_string, from_line
def get_dockerfile_image_tag():
'''Return the image and tag used to build an image from the dockerfile'''
image_tag_string = constants.image + parse.tag_separator + \
constants.tag
return image_tag_string
def created_to_instruction(created_by):
'''The 'created_by' key in a Docker image config gives the shell
command that was executed unless it is a #(nop) instruction which is
for the other Docker directives. Convert this line into a Dockerfile
instruction'''
instruction = re.sub('/bin/sh -c ', '', created_by).strip()
instruction = re.sub(re.escape('#(nop) '), '', instruction).strip()
first = instruction.split(' ').pop(0)
if first and first not in parse.directives and \
'RUN' not in instruction:
instruction = 'RUN ' + instruction
return instruction
def get_commands_from_history(image_layer):
'''Given the image layer object and the shell, get the list of command
objects that created the layer'''
# set up notice origin for the layer
origin_layer = 'Layer {}'.format(image_layer.layer_index)
if image_layer.created_by:
instruction = created_to_instruction(image_layer.created_by)
image_layer.origins.add_notice_to_origins(origin_layer, Notice(
formats.dockerfile_line.format(dockerfile_instruction=instruction),
'info'))
command_line = instruction.split(' ', 1)[1]
else:
instruction = ''
image_layer.origins.add_notice_to_origins(origin_layer, Notice(
formats.no_created_by, 'warning'))
command_line = instruction
# Image layers are created with the directives RUN, ADD and COPY
# For ADD and COPY instructions, there is no information about the
# packages added
if 'ADD' in instruction or 'COPY' in instruction:
image_layer.origins.add_notice_to_origins(origin_layer, Notice(
errors.unknown_content.format(files=command_line), 'warning'))
# return an empty list as we cannot find any commands
return []
# for RUN instructions we can return a list of commands
command_list, msg = fltr.filter_install_commands(command_line)
if msg:
image_layer.origins.add_notice_to_origins(origin_layer, Notice(
msg, 'warning'))
return command_list
def set_imported_layers(docker_image):
'''Given a Docker image object that was built from a Dockerfile, set the
layers that were imported using the Dockerfile's FROM command or the ones
that came before it'''
index = -1
from_line = ''
dockerfile_lines = docker_commands
for cmd in dockerfile_lines:
if cmd['instruction'] == 'FROM':
from_line = cmd['content'].rstrip()
break
command_list = parse.get_command_list(dockerfile_lines)
for layer in docker_image.layers:
instr = created_to_instruction(layer.created_by)
if instr in command_list:
index = docker_image.layers.index(layer)
break
if index != -1:
# index was set so all layers before this index has been imported
for i in range(0, index-1):
docker_image.layers[i].import_str = from_line
def get_env_vars(image_obj):
'''Given a docker image object, return the list of environment variables,
if any, based on their values in the config.'''
config = image_obj.get_image_config(image_obj.get_image_manifest())
# replace '\t' with '\\t' in the ENV
for idx, env_str in enumerate(config['config']['Env']):
config['config']['Env'][idx] = env_str.replace('\t', '\\t')
return config['config']['Env']
def lock_layer_instruction(dfobj, line_index, commands, image_layer):
"""Given the Dockerfile object, the line index that we are replacing,
the list command objects that installed packages, and the image layer,
rewrite the corresponding line in the Dockerfile with the package and
the version installed"""
for command in commands:
# get the version separator
vsep = command_lib.check_pinning_separator(command.name)
# replace the packages with package separators for each of the words
for word in command.words:
for pkg in image_layer.packages:
if pkg.name == word:
parse.expand_package(
dfobj.structure[line_index], pkg.name, pkg.version,
vsep)
return dfobj
def lock_dockerfile(dfobj, image_obj):
"""Given a Dockerfile object and the corresponding Image object, rewrite
the content to pin packages to their versions"""
# get all the RUN commands in the dockerfile
run_list = parse.get_run_layers(dfobj)
# go through the image layers to find the ones corresponding to the
# run commands
for layer in image_obj.layers:
if not layer.import_str:
# this layer is not from a FROM line
# we get the layer instruction
cmd, instr = fltr.get_run_command(layer.created_by)
if instr == 'RUN':
# find the line in the Dockerfile that matches this command
for run_dict in run_list:
if run_dict['value'] == cmd:
# get the list of install commands
command_list, _ = fltr.filter_install_commands(
general.clean_command(run_dict['value']))
# pin packages installed by each command
run_index = dfobj.structure.index(run_dict)
dfobj = lock_layer_instruction(
dfobj, run_index, command_list, layer)
return dfobj
def create_locked_dockerfile(dfobj):
'''Given a dockerfile object, the information in a new Dockerfile object
Copy the dfobj info to the destination output Dockerfile location'''
# packages in RUN lines, ENV, and ARG values are already expanded
parse.expand_from_images(dfobj)
parse.expand_add_command(dfobj)
# create the output file
dfile = ''
prev_endline = 0
for command_dict in dfobj.structure:
endline = command_dict["endline"]
diff = endline - prev_endline
# calculate number of new line characters to
# add before each line of content
delimeter = "\n" * (diff - 1) if diff > 1 else ""
dfile = dfile + delimeter + command_dict['content']
prev_endline = endline
return dfile
def write_locked_dockerfile(dfile, destination=None):
'''Write the pinned Dockerfile to a file'''
if destination is not None:
file_name = destination
else:
file_name = constants.locked_dockerfile
with open(file_name, 'w') as f:
f.write(dfile)
| 41.555556
| 79
| 0.668717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,234
| 0.377362
|
5a3209a99cbad4e38fb7649cdcdb53c050ccbf17
| 2,003
|
py
|
Python
|
utils/firebase.py
|
YangWanjun/sales-encrypt
|
dcf0975164f60dd53385661029c4a270abdfd30e
|
[
"Apache-2.0"
] | null | null | null |
utils/firebase.py
|
YangWanjun/sales-encrypt
|
dcf0975164f60dd53385661029c4a270abdfd30e
|
[
"Apache-2.0"
] | null | null | null |
utils/firebase.py
|
YangWanjun/sales-encrypt
|
dcf0975164f60dd53385661029c4a270abdfd30e
|
[
"Apache-2.0"
] | null | null | null |
import os
import firebase_admin
from firebase_admin import credentials, messaging
from django.conf import settings
from utils import common, constants
logger = common.get_system_logger()
cred = credentials.Certificate(os.path.join(
settings.BASE_DIR,
'data',
'sales-yang-firebase-adminsdk-2ga7e-17745491f0.json'
))
firebase_admin.initialize_app(credential=cred)
# def subscribe_to_topic(registration_tokens, topic):
# """トピックにデバイスを登録する。
#
# :param registration_tokens: Instance IDリスト
# :param topic: トピック名称
# :return:
# """
# res = messaging.subscribe_to_topic(registration_tokens, topic)
# return res.success_count, res.failure_count, res.errors
#
#
# def unsubscribe_from_topic(registration_tokens, topic):
# """トピックにデバイスの登録を解除する。
#
# :param registration_tokens: Instance IDリスト
# :param topic: トピック名称
# :return:
# """
# res = messaging.unsubscribe_from_topic(registration_tokens, topic)
# return res.success_count, res.failure_count, res.errors
def send_message_to_topic(topic, title, body, forward=None):
"""ユーザーにメッセージを通知する
メッセージを先にDB登録してから通知します、
そうしないと画面の通知一覧にメッセージが表示できない場合があります。
:param topic: マスターに登録済のトピック(Firebaseに登録済のトピックではありません)
:param title: タイトル
:param body: メッセージ内容
:param forward: メッセージを押下後の遷移先
:return:
"""
from account.models import Notification
from master.models import FirebaseDevice
Notification.add_by_topic(topic.name, title, body, forward=forward)
devices = FirebaseDevice.objects.filter(user__in=topic.users.all())
if devices.count() == 0:
# トピックに登録したデバイスがない場合
logger.info(constants.INFO_FIREBASE_NO_DEVICE.format(topic=topic.name))
return
# ユーザーに通知する
message = messaging.MulticastMessage(data={
'title': title,
'body': body
}, tokens=[item.token for item in devices])
res = messaging.send_multicast(message)
logger.info(constants.INFO_FIREBASE_SEND_MESSAGE.format(topic=topic.name))
| 29.028986
| 79
| 0.724413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,377
| 0.571132
|
5a3220a6933b741f74449b702618162293bca339
| 1,944
|
py
|
Python
|
tests/settings.py
|
matrixorz/firefly
|
fb8082ccc525bf7b266960ae49fc0b15e522fd92
|
[
"MIT"
] | 247
|
2015-04-13T05:58:10.000Z
|
2021-01-21T07:31:58.000Z
|
tests/settings.py
|
qiluosheng/firefly
|
fb8082ccc525bf7b266960ae49fc0b15e522fd92
|
[
"MIT"
] | 57
|
2015-04-13T15:10:50.000Z
|
2016-04-08T09:15:27.000Z
|
tests/settings.py
|
qiluosheng/firefly
|
fb8082ccc525bf7b266960ae49fc0b15e522fd92
|
[
"MIT"
] | 94
|
2015-04-12T06:03:30.000Z
|
2020-05-11T14:26:56.000Z
|
# coding=utf-8
DEBUG = True
TESTING = True
SECRET_KEY = 'secret_key for test'
# mongodb
MONGODB_SETTINGS = {
'db': 'firefly_test',
'username': '',
'password': '',
'host': '127.0.0.1',
'port': 27017
}
# redis cache
CACHE_TYPE = 'redis'
CACHE_REDIS_HOST = '127.0.0.1'
CACHE_REDIS_PORT = 6379
CACHE_REDIS_DB = 9
CACHE_REDIS_PASSWORD = ''
# mail sender
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = 'MAIL_USERNAME'
MAIL_PASSWORD = 'MAIL_PASSWORD'
MAIL_DEFAULT_SENDER = 'admin@python-cn.org'
SECURITY_PASSWORD_SALT = "abc"
SECURITY_PASSWORD_HASH = "bcrypt"
# SECURITY_PASSWORD_HASH = "pbkdf2_sha512"
SECURITY_EMAIL_SENDER = "support@python-cn.org"
SECURITY_CONFIRM_SALT = "570be5f24e690ce5af208244f3e539a93b6e4f05"
SECURITY_REMEMBER_SALT = "de154140385c591ea771dcb3b33f374383e6ea47"
# Set secret keys for CSRF protection
CSRF_ENABLED = False
WTF_CSRF_ENABLED = False
SERVER_EMAIL = 'Python-China <support@python-cn.org>'
# Flask-SocialBlueprint
SOCIAL_BLUEPRINT = {
# https://developers.facebook.com/apps/
"flask_social_blueprint.providers.Facebook": {
# App ID
'consumer_key': '197…',
# App Secret
'consumer_secret': 'c956c1…'
},
# https://apps.twitter.com/app/new
"flask_social_blueprint.providers.Twitter": {
# Your access token from API Keys tab
'consumer_key': 'bkp…',
# access token secret
'consumer_secret': 'pHUx…'
},
# https://console.developers.google.com/project
"flask_social_blueprint.providers.Google": {
# Client ID
'consumer_key': '797….apps.googleusercontent.com',
# Client secret
'consumer_secret': 'bDG…'
},
# https://github.com/settings/applications/new
"flask_social_blueprint.providers.Github": {
# Client ID
'consumer_key': '6f6…',
# Client Secret
'consumer_secret': '1a9…'
},
}
| 25.578947
| 67
| 0.679012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,169
| 0.596429
|
5a3481b2ed60e03ed802eb9ef17136804b5ee7a0
| 981
|
py
|
Python
|
pyhack/boris_stag.py
|
Krissmedt/runko
|
073306de9284f1502d0538d33545bc14c80e8b93
|
[
"MIT"
] | null | null | null |
pyhack/boris_stag.py
|
Krissmedt/runko
|
073306de9284f1502d0538d33545bc14c80e8b93
|
[
"MIT"
] | null | null | null |
pyhack/boris_stag.py
|
Krissmedt/runko
|
073306de9284f1502d0538d33545bc14c80e8b93
|
[
"MIT"
] | null | null | null |
import numpy as np
from pyhack.py_runko_aux import *
from pyhack.boris import *
def boris_staggered(tile,dtf=1):
c = tile.cfl
cont = tile.get_container(0)
pos = py_pos(cont)
vel = py_vel(cont)
E,B = py_em(cont)
nq = pos.shape[0]
dims = pos.shape[1]
vel = boris_rp(vel,E,B,c,cont.q,dtf=dtf)
g = ginv(c,vel*c)
for i in range(0,dims):
pos[:,i] += dtf*c*vel[:,i]*g
tile.delete_all_particles()
for i in range(0,nq):
cont.add_particle(pos[i,:],vel[i,:],1.0)
def boris_staggered_first(tile,dtf=1):
c = tile.cfl
cont = tile.get_container(0)
pos = py_pos(cont)
vel = py_vel(cont)
E,B = py_em(cont)
nq = pos.shape[0]
dims = pos.shape[1]
vel = boris_rp(vel,E,B,c,cont.q,dtf=0.5*dtf)
g = ginv(c,vel*c)
for i in range(0,dims):
pos[:,i] += dtf*c*vel[:,i]*g
tile.delete_all_particles()
for i in range(0,nq):
cont.add_particle(pos[i,:],vel[i,:],1.0)
| 18.509434
| 48
| 0.579001
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5a36eda2f990b0b613ca5b9070e7a670400461bc
| 1,806
|
py
|
Python
|
mbed_connector_api/tests/mock_data.py
|
ARMmbed/mbed-connector-python
|
a5024a01dc67cc192c8bf7a70b251fcf0a3f279b
|
[
"Apache-2.0"
] | 2
|
2017-01-05T07:16:03.000Z
|
2018-09-04T02:26:19.000Z
|
mbed_connector_api/tests/mock_data.py
|
ARMmbed/mbed-connector-python
|
a5024a01dc67cc192c8bf7a70b251fcf0a3f279b
|
[
"Apache-2.0"
] | 13
|
2016-02-29T17:31:56.000Z
|
2017-02-07T22:46:17.000Z
|
mbed_connector_api/tests/mock_data.py
|
ARMmbed/mbed-connector-python
|
a5024a01dc67cc192c8bf7a70b251fcf0a3f279b
|
[
"Apache-2.0"
] | 2
|
2017-02-07T22:10:41.000Z
|
2017-03-06T06:38:58.000Z
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
class mockData:
"""dictionary of mocking data for the mocking tests"""
# dictionary to hold the mock data
_data={}
# function to add mock data to the _mock_data dictionary
def _add(self,uri,status,payload):
self._data[uri] = {"status":status,
"payload":payload
}
return
def getPayload(self,input):
return self._data[input]['payload']
def getStatusCode(self,input):
return self._data[input]['status']
# initialize the _mock_data dictionary with all the appropriate mocking data
def __init__(self):
self._add( uri="limits", status=200,
payload='{"transaction-quota":10000,"transaction-count":259,"endpoint-quota":100,"endpoint-count":1}')
self._add( uri="connectorVersion", status=200,
payload='DeviceServer v3.0.0-520\nREST version = v2')
self._add( uri="apiVersion", status=200,
payload='["v1","v2"]')
self._add( uri="endpoints", status=200,
payload='[{"name":"51f540a2-3113-46e2-aef4-96e94a637b31","type":"test","status":"ACTIVE"}]')
self._add( uri="resources", status=200,
payload='[{"uri":"/Test/0/S","rt":"Static","obs":false,"type":""},{"uri":"/Test/0/D","rt":"Dynamic","obs":true,"type":""},{"uri":"/3/0/2","obs":false,"type":""},{"uri":"/3/0/1","obs":false,"type":""},{"uri":"/3/0/17","obs":false,"type":""},{"uri":"/3/0/0","obs":false,"type":""},{"uri":"/3/0/16","obs":false,"type":""},{"uri":"/3/0/11","obs":false,"type":""},{"uri":"/3/0/11/0","obs":false,"type":""},{"uri":"/3/0/4","obs":false,"type":""}]')
#self._add( uri="", status=200,
# payload="")
#self._add( uri="", status=200,
# payload="")
#self._add( uri="", status=200,
# payload="")
| 42
| 448
| 0.613511
| 1,682
| 0.93134
| 0
| 0
| 0
| 0
| 0
| 0
| 1,244
| 0.688815
|
5a37802b395a4a964c1285e03e992f8b1712b575
| 2,134
|
py
|
Python
|
examples/demo/eager_demo/src/demo_1_pybullet.py
|
eager-dev/eager
|
f10ccbd7452acb3a29881ecd95c759f632c91da9
|
[
"Apache-2.0"
] | 16
|
2021-07-02T14:48:53.000Z
|
2022-02-23T02:53:01.000Z
|
examples/demo/eager_demo/src/demo_1_pybullet.py
|
eager-dev/eager
|
f10ccbd7452acb3a29881ecd95c759f632c91da9
|
[
"Apache-2.0"
] | 37
|
2021-06-30T12:10:29.000Z
|
2022-02-02T09:46:34.000Z
|
examples/demo/eager_demo/src/demo_1_pybullet.py
|
eager-dev/eager
|
f10ccbd7452acb3a29881ecd95c759f632c91da9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import rospy
# Import eager packages
from eager_core.utils.file_utils import launch_roscore, load_yaml
from eager_core.eager_env import EagerEnv
from eager_core.objects import Object
from eager_core.wrappers.flatten import Flatten
from eager_bridge_pybullet.pybullet_engine import PyBulletEngine # noqa: F401
# Required for action processor
from eager_process_safe_actions.safe_actions_processor import SafeActionsProcessor
if __name__ == '__main__':
roscore = launch_roscore() # First launch roscore
rospy.init_node('eager_demo', anonymous=True, log_level=rospy.WARN)
rate = rospy.Rate(1/0.08)
# Define the engine
engine = PyBulletEngine(gui=True)
# Create robot
robot = Object.create('robot', 'eager_robot_vx300s', 'vx300s')
# Add action preprocessing
processor = SafeActionsProcessor(robot_type='vx300s',
vel_limit=0.25,
collision_height=0.15,
)
robot.actuators['joints'].add_preprocess(
processor=processor,
observations_from_objects=[robot],
)
# Add a camera for rendering
calibration = load_yaml('eager_demo', 'calibration')
cam = Object.create('cam', 'eager_sensor_realsense', 'd435',
position=calibration['position'],
orientation=calibration['orientation'],
)
# Create environment
env = EagerEnv(name='demo_env',
engine=engine,
objects=[robot, cam],
render_sensor=cam.sensors['camera_rgb'],
)
env = Flatten(env)
env.render()
obs = env.reset() # TODO: if code does not close properly, render seems to keep a thread open....
for i in range(200):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
if done:
obs = env.reset()
rate.sleep()
# todo: create a env.close(): close render screen, and env.shutdown() to shutdown the environment cleanly.
env.close()
| 33.873016
| 110
| 0.627929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 580
| 0.27179
|
5a3924093bca8ec08e3a6779656c4151c0bb55bf
| 3,811
|
py
|
Python
|
kerastuner/engine/tuner_utils.py
|
DL-2020-Shakespeare/keras-tuner
|
5f35f101883a7884e9521de7db4eb632ab659775
|
[
"Apache-2.0"
] | 1
|
2021-06-08T01:19:58.000Z
|
2021-06-08T01:19:58.000Z
|
kerastuner/engine/tuner_utils.py
|
DL-2020-Shakespeare/keras-tuner
|
5f35f101883a7884e9521de7db4eb632ab659775
|
[
"Apache-2.0"
] | null | null | null |
kerastuner/engine/tuner_utils.py
|
DL-2020-Shakespeare/keras-tuner
|
5f35f101883a7884e9521de7db4eb632ab659775
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Tuner class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import six
import tensorflow as tf
from tensorflow import keras
from ..abstractions import display
class TunerStats(object):
"""Track tuner statistics."""
def __init__(self):
self.num_generated_models = 0 # overall number of instances generated
self.num_invalid_models = 0 # how many models didn't work
self.num_oversized_models = 0 # num models with params> max_params
def summary(self, extended=False):
display.subsection('Tuning stats')
display.display_settings(self.get_config())
def get_config(self):
return {
'num_generated_models': self.num_generated_models,
'num_invalid_models': self.num_invalid_models,
'num_oversized_models': self.num_oversized_models
}
@classmethod
def from_config(cls, config):
stats = cls()
stats.num_generated_models = config['num_generated_models']
stats.num_invalid_models = config['num_invalid_models']
stats.num_oversized_models = config['num_oversized_models']
return stats
def get_max_epochs_and_steps(fit_args, fit_kwargs):
if fit_args:
x = tf.nest.flatten(fit_args)[0]
else:
x = tf.nest.flatten(fit_kwargs.get('x'))[0]
batch_size = fit_kwargs.get('batch_size', 32)
if hasattr(x, '__len__'):
max_steps = math.ceil(float(len(x)) / batch_size)
else:
max_steps = fit_kwargs.get('steps')
max_epochs = fit_kwargs.get('epochs', 1)
return max_epochs, max_steps
class TunerCallback(keras.callbacks.Callback):
def __init__(self, tuner, trial):
super(TunerCallback, self).__init__()
self.tuner = tuner
self.trial = trial
def on_epoch_begin(self, epoch, logs=None):
self.tuner.on_epoch_begin(
self.trial, self.model, epoch, logs=logs)
def on_batch_begin(self, batch, logs=None):
self.tuner.on_batch_begin(self.trial, self.model, batch, logs)
def on_batch_end(self, batch, logs=None):
self.tuner.on_batch_end(self.trial, self.model, batch, logs)
def on_epoch_end(self, epoch, logs=None):
self.tuner.on_epoch_end(
self.trial, self.model, epoch, logs=logs)
# TODO: Add more extensive display.
class Display(object):
def on_trial_begin(self, trial):
display.section('New model')
trial.summary()
def on_trial_end(self, trial):
display.section('Trial complete')
trial.summary()
def average_histories(histories):
"""Averages the per-epoch metrics from multiple executions."""
averaged = {}
metrics = histories[0].keys()
for metric in metrics:
values = []
for epoch_values in six.moves.zip_longest(
*[h[metric] for h in histories],
fillvalue=np.nan):
values.append(np.nanmean(epoch_values))
averaged[metric] = values
# Convert {str: [float]} to [{str: float}]
averaged = [dict(zip(metrics, vals)) for vals in zip(*averaged.values())]
return averaged
| 31.758333
| 78
| 0.680399
| 1,890
| 0.495933
| 0
| 0
| 289
| 0.075833
| 0
| 0
| 1,087
| 0.285227
|
5a395024f625042332e48560226cfb73aaa1b4a7
| 14,129
|
py
|
Python
|
angr/procedures/definitions/win32_d3dcompiler_47.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_d3dcompiler_47.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_d3dcompiler_47.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("d3dcompiler_47.dll")
prototypes = \
{
#
'D3DDisassemble11Trace': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeBottom(label="ID3D11ShaderTrace"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pTrace", "StartStep", "NumSteps", "Flags", "ppDisassembly"]),
#
'D3DReadFileToBlob': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pFileName", "ppContents"]),
#
'D3DWriteBlobToFile': SimTypeFunction([SimTypeBottom(label="ID3DBlob"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=True, label="Int32"), arg_names=["pBlob", "pFileName", "bOverwrite"]),
#
'D3DCompile': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Name": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Definition": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="D3D_SHADER_MACRO", pack=False, align=None), offset=0), SimTypeBottom(label="ID3DInclude"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pSourceName", "pDefines", "pInclude", "pEntrypoint", "pTarget", "Flags1", "Flags2", "ppCode", "ppErrorMsgs"]),
#
'D3DCompile2': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Name": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Definition": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="D3D_SHADER_MACRO", pack=False, align=None), offset=0), SimTypeBottom(label="ID3DInclude"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pSourceName", "pDefines", "pInclude", "pEntrypoint", "pTarget", "Flags1", "Flags2", "SecondaryDataFlags", "pSecondaryData", "SecondaryDataSize", "ppCode", "ppErrorMsgs"]),
#
'D3DCompileFromFile': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"Name": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Definition": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="D3D_SHADER_MACRO", pack=False, align=None), offset=0), SimTypeBottom(label="ID3DInclude"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pFileName", "pDefines", "pInclude", "pEntrypoint", "pTarget", "Flags1", "Flags2", "ppCode", "ppErrorMsgs"]),
#
'D3DPreprocess': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Name": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Definition": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="D3D_SHADER_MACRO", pack=False, align=None), offset=0), SimTypeBottom(label="ID3DInclude"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pSourceName", "pDefines", "pInclude", "ppCodeText", "ppErrorMsgs"]),
#
'D3DGetDebugInfo': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "ppDebugInfo"]),
#
'D3DReflect': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "pInterface", "ppReflector"]),
#
'D3DReflectLibrary': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "riid", "ppReflector"]),
#
'D3DDisassemble': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Flags", "szComments", "ppDisassembly"]),
#
'D3DDisassembleRegion': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Flags", "szComments", "StartByteOffset", "NumInsts", "pFinishByteOffset", "ppDisassembly"]),
#
'D3DCreateLinker': SimTypeFunction([SimTypePointer(SimTypeBottom(label="ID3D11Linker"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["ppLinker"]),
#
'D3DLoadModule': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3D11Module"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "cbSrcDataSize", "ppModule"]),
#
'D3DCreateFunctionLinkingGraph': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3D11FunctionLinkingGraph"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["uFlags", "ppFunctionLinkingGraph"]),
#
'D3DGetTraceInstructionOffsets': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), label="LPArray", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Flags", "StartInstIndex", "NumInsts", "pOffsets", "pTotalInsts"]),
#
'D3DGetInputSignatureBlob': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "ppSignatureBlob"]),
#
'D3DGetOutputSignatureBlob': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "ppSignatureBlob"]),
#
'D3DGetInputAndOutputSignatureBlob': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "ppSignatureBlob"]),
#
'D3DStripShader': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pShaderBytecode", "BytecodeLength", "uStripFlags", "ppStrippedBlob"]),
#
'D3DGetBlobPart': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="D3D_BLOB_PART"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Part", "Flags", "ppPart"]),
#
'D3DSetBlobPart': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="D3D_BLOB_PART"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "Part", "Flags", "pPart", "PartSize", "ppNewShader"]),
#
'D3DCreateBlob': SimTypeFunction([SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Size", "ppBlob"]),
#
'D3DCompressShaders': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"pBytecode": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "BytecodeLength": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="D3D_SHADER_DATA", pack=False, align=None), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["uNumShaders", "pShaderData", "uFlags", "ppCompressedData"]),
#
'D3DDecompressShaders': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pSrcData", "SrcDataSize", "uNumShaders", "uStartIndex", "pIndices", "uFlags", "ppShaders", "pTotalShaders"]),
#
'D3DDisassemble10Effect': SimTypeFunction([SimTypeBottom(label="ID3D10Effect"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="ID3DBlob"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pEffect", "Flags", "ppDisassembly"]),
}
lib.set_prototypes(prototypes)
| 190.932432
| 1,222
| 0.737561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,881
| 0.274683
|
5a3bb304d53c998d16ff4c3d532be4b3380720b2
| 16,392
|
py
|
Python
|
explorer/explorer.py
|
holarchy/Holon
|
2a557b300bce10fb2c2ab85a1db4bdfd5df470aa
|
[
"MIT"
] | null | null | null |
explorer/explorer.py
|
holarchy/Holon
|
2a557b300bce10fb2c2ab85a1db4bdfd5df470aa
|
[
"MIT"
] | null | null | null |
explorer/explorer.py
|
holarchy/Holon
|
2a557b300bce10fb2c2ab85a1db4bdfd5df470aa
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, flash, abort, redirect, url_for, request
import os
import common
import json
import numbers
import urllib.parse
import pandas as pd
from datetime import datetime
from math import log10, floor
base_dir = '/home/nick/Data/_ensembles'
app = Flask(__name__)
app.config['ENV'] = 'development'
app.config['DEBUG'] = True
app.config['TESTING'] = True
app.config.from_mapping(
SECRET_KEY='dev'
)
# predictions_home_dir = os.path.join(base_dir, 'outlier-predictions-2019_11_13-15_38_28')
predictions_home_dir = os.path.join(base_dir, 'outlier-predictions-2020_01_03-11_15_41')
file_config = common.load_file_config(predictions_home_dir)
labels_dir = os.path.join(predictions_home_dir, 'labels')
# priors_parent_dir = os.path.join(base_dir, 'priors-2019_11_12-19_33_13')
priors_parent_dir = os.path.join(base_dir, 'priors-2019_12_30-18_30_22')
predictions_dir = os.path.join(predictions_home_dir, 'predictions')
priors_dir = os.path.join(priors_parent_dir, 'priors')
prediction_summary = pd.read_csv(os.path.join(predictions_home_dir, 'summary.csv'))
prediction_summary = prediction_summary.sort_values('prediction', ascending=False)
prediction_summary = prediction_summary.reset_index()
def get_flow(flow):
file = os.path.join(predictions_dir, flow + '.json')
if not os.path.isfile(file):
flash(f'{flow} was not found.')
abort(404)
with open(file) as f:
flow = json.load(f)
return flow
def make_label(flow, username, threat_level, classifier, description):
if not os.path.isdir(labels_dir): # make label directory if it doesn't exist.
os.mkdir(labels_dir)
flow_data = get_flow(flow)
prediction_values = list()
for obj in flow_data['objects']:
prediction_values.append((obj['id'], obj['value'], obj['prediction']))
label_file = os.path.join(labels_dir, flow + '.json') # get filename based on flow name
if os.path.isfile(label_file):
# jsn = []
with open(label_file, 'r') as f:
jsn = json.load(f) # if file already exists, get json.
else:
jsn = []
dict = {'userName': username,
'threatLevel': threat_level,
'classifier': classifier,
'description': description,
'timestamp': str(datetime.now()),
'version': common.__version__,
'data': prediction_values}
jsn.append(dict)
with open(label_file, 'w') as f:
json.dump(jsn, f)
def remove_label(flow, index):
label_file = os.path.join(labels_dir, flow + '.json') # get filename based on flow name
with open(label_file, 'r') as f:
jsn = json.load(f) # if file already exists, get json.
del jsn[index]
with open(label_file, 'w') as f:
json.dump(jsn, f)
def get_labels(flow):
label_file = os.path.join(labels_dir, flow + '.json') # get filename based on flow name
if os.path.isfile(label_file):
with open(label_file, 'r') as f:
jsn = json.load(f) # if file already exists, get json.
else:
jsn = []
return jsn
def round_structure(x, sig=2):
if isinstance(x, numbers.Number):
if x == 0 or x != x: # alo check for NaN
return 0
return round(x, sig - int(floor(log10(abs(x)))) - 1)
elif isinstance(x, dict):
dct = dict()
for k, v in x.items():
dct[k] = round_structure(v, sig)
return dct
elif isinstance(x, list):
lst = list()
for itm in x:
lst.append(round_structure(itm, sig))
return lst
elif type(x) in (str, bool):
return x
else:
raise TypeError
class PredictionTrace(object):
levels = ['Flow', 'Object', 'Subject']
def __init__(self, flow, obj=None, subject=None):
if flow is None:
raise ValueError(f'Flow parameter cannot be None')
field_predictions = None
flow = urllib.parse.unquote(flow)
jsn = get_flow(flow)
jsn = round_structure(jsn)
raw_data = jsn.get('raw_data')
self.flow = flow
self.biflow_object = obj
self.subject = subject
self.raw_data = raw_data
level = self.levels[0]
prediction_trace = [(level, 'Outlier Score', '', jsn['prediction'])]
prediction_list = 'objects'
prediction_field = 'id'
if obj is not None:
obj = urllib.parse.unquote(obj)
jsn = self.get_level_json(jsn, obj, prediction_list, prediction_field)
level = self.levels[1]
prediction_trace.append((level, obj, jsn['value'], jsn['prediction']))
prediction_list = 'subjects'
prediction_field = 'id'
if subject is not None:
subject = urllib.parse.unquote(subject)
jsn = self.get_level_json(jsn, subject, prediction_list, prediction_field)
level = self.levels[2]
prediction_trace.append((level, subject, jsn['value'], jsn['prediction']))
prediction_list = None
prediction_field = None
field_predictions = jsn
predictions = []
if prediction_field is not None:
for identifier in jsn[prediction_list]:
predictions.append({'id': identifier[prediction_field],
'pred': identifier['prediction'],
'val': identifier.get('value')})
self.level = level
self.prediction_trace = prediction_trace
self.predictions = sorted(predictions, key=lambda i: i['pred'], reverse=True)
self.field_predictions = field_predictions
@property
def my_direction(self):
return file_config.my_direction(self.subject)
@property
def their_direction(self):
return file_config.their_direction(self.subject)
@property
def field_value(self):
if self.biflow_object == file_config.uniflow_indicator: # special case
return str(self.my_direction == file_config.biflow_src_prfx).lower()
else:
return self.raw_data[self.biflow_object]
@property
def field_prior(self):
if self.biflow_object is None:
raise ValueError(f'Can only pull prior based on a field.')
if self.subject.endswith(file_config.hierarchy[0]): # subnet
path = os.path.join(priors_dir,
self.raw_data[self.my_direction + file_config.hierarchy[0]])
elif self.subject.endswith(file_config.hierarchy[1]): # ip
path = os.path.join(priors_dir,
self.raw_data[self.my_direction + file_config.hierarchy[0]],
self.raw_data[self.my_direction + file_config.hierarchy[1]])
else:
raise ValueError(f'Did not recognize level "{self.subject}"')
file = os.path.join(path, '.json')
if not os.path.isfile(file):
raise ValueError(f'Priors file {file} was not found.')
with open(file) as f:
prior = json.load(f)
field_prior = prior[self.uniflow_object]
return field_prior
@property
def uniflow_object(self):
if self.subject is None:
raise ValueError(f'Cannot call uniflow_object without both a _subject_ (ex. dst.ip) and an _object_ (ex. '
f'src.bytes).')
if self.biflow_object.startswith(self.my_direction):
return self.biflow_object.replace(self.my_direction, file_config.uniflow_this_prfx)
elif self.biflow_object.startswith(self.their_direction):
return self.biflow_object.replace(self.their_direction, file_config.uniflow_that_prfx)
else:
return self.biflow_object
@property
def child_level(self):
this = self.level
print(this)
print(self.levels[2])
if this == self.levels[2]:
raise ValueError(f'"Subject" level has no child.')
return self.levels[self.levels.index(this) + 1]
def build_url(self, lvl):
if lvl not in self.levels:
raise ValueError(f'build_url requires one of the 4 defined levels')
segments = ['/prediction', urllib.parse.quote(self.flow)]
if lvl != self.levels[0]:
segments.append(urllib.parse.quote(self.biflow_object))
if lvl != self.levels[1]:
segments.append(urllib.parse.quote(self.subject))
return '/'.join(segments)
@staticmethod
def get_level_json(jsn, value, prediction_list, prediction_field):
level_json = [p for p in jsn.get(prediction_list) if p[prediction_field] == value]
if len(level_json) == 0:
flash(f'{level_json} was not found.')
abort(404)
return level_json.pop()
@property
def chart_data(self):
primary_color = '#007bff'
secondary_color = '#6c757d'
max_columns = 15
cdf = self.field_prior['cdf']
if self.uniflow_object in common.numeric_vars():
typ = 'scatter'
data = [{'x': float(k), 'y': v} for k, v in cdf.items()]
full_data = {'datasets': [{'label': self.uniflow_object,
'backgroundColor': secondary_color,
'data': data},
{'label': self.field_value,
'backgroundColor': primary_color,
'showLine': 'true',
'borderColor': primary_color,
'data': [{'x': 0, 'y': self.field_value},
{'x': 1, 'y': self.field_value}]},
]}
elif self.uniflow_object in common.binary_vars() or self.uniflow_object in common.categorical_vars():
typ = 'bar'
ln = len(cdf)
ix = None
if self.field_value in cdf.keys():
ix = list(cdf.keys()).index(self.field_value)
if ln < max_columns:
indexes = list(range(0, ln))
else:
if ix is None or ix < 10 or ix > ln - 4:
indexes = list(range(0, 10)) + [f'MANY\n({ln - 14})'] + list(range(ln - 4, ln))
else:
indexes = list(range(0, 10)) + [f'MANY\n({ix - 10})'] + [ix] + [f'MANY\n({ln - ix - 3})'] + list(range(ln - 3, ln))
labels = [list(cdf.keys())[idx] if type(idx) == int else idx for idx in indexes]
data = [list(cdf.values())[idx] if type(idx) == int else 0 for idx in indexes]
colors = [primary_color if itm == self.field_value else secondary_color for itm in labels]
full_data = {'labels': labels,
'datasets': [{'label': self.uniflow_object,
'backgroundColor': colors,
'data': data}]}
else:
raise ValueError(f'Field does not seem to be valid, has value {self.uniflow_object}')
chart_data = {'type': typ,
'data': full_data,
'options': {
'legend': {'display': 'false'},
'scales': {'yAxes': [{'ticks': {'min': 0}}]}}}
return chart_data
@app.route('/')
@app.route('/summary/')
@app.route('/prediction/')
def index():
return redirect(url_for('summary', page_num=1))
@app.route('/summary/<int:page_num>')
def summary(page_num=1):
results_per_page = 10
i = (page_num - 1) * results_per_page
if i > len(prediction_summary):
abort(404)
predictions = []
n = 0
while n < results_per_page and i < len(prediction_summary):
p = prediction_summary.loc[i]
id = p['filename'].replace('.json','')
data = id.split('_')
ts = datetime.fromtimestamp(int(data[0])/1000)
pred = round_structure(p['prediction'])
labels = get_labels(id)
if len(labels):
classification = labels[0]['threatLevel']
else:
classification = ''
predictions.append({'id': id, 'timestamp': ts, 'src_ip': data[1], 'src_port': p['src.port'], 'dst_ip': data[2],
'dst_port': p['dst.port'], 'classification': classification, 'pred': pred, 'index': i})
i += 1
n += 1
last_page = floor(len(prediction_summary) / results_per_page) + 1
nav_display = dict()
nav_display.update({1: '«', last_page: '»'})
if page_num not in (1, last_page):
nav_display.update({n: str(n) for n in list(range(page_num - 1, page_num + 2))})
if page_num <= 3:
nav_display.update({n: str(n) for n in list(range(1,4))})
if page_num >= last_page - 3:
nav_display.update({n: str(n) for n in list(range(last_page-2, last_page+1))})
nav_display = dict(sorted(nav_display.items()))
return render_template('summary.html', predictions=predictions, page_num=page_num, nav_display=nav_display)
def resolve_user_label(flow, request):
if request.method == "POST":
if request.form.get('threatLevel') is not None: # if user added new label
make_label(flow, username=request.form.get('userName'), threat_level=request.form.get('threatLevel'),
classifier=request.form.get('classifier'), description=request.form.get('description'))
else: # if user trying to delete label
i = 1
while i <= len(get_labels(flow)):
if request.form.get(str(i)) is not None:
print(i)
remove_label(flow, i-1)
i += 1
@app.route('/prediction/<flow>', methods=['GET', 'POST'])
@app.route('/prediction/<flow>/<object>', methods=['GET', 'POST'])
def flow_prediction(flow, object=None):
resolve_user_label(flow, request)
trace = PredictionTrace(flow, object)
return render_template('level_explorer.html', trace=trace, labels=get_labels(flow))
@app.route('/prediction/<flow>/<object>/<subject>', methods=['GET', 'POST'])
def field_prediction(flow, object, subject):
resolve_user_label(flow, request)
trace = PredictionTrace(flow, object, subject)
return render_template('field_explorer.html', trace=trace, labels=get_labels(flow))
@app.route('/refs')
def refs():
return render_template('references.html')
@app.route('/admin/')
def admin():
return redirect(url_for('admin_data'))
@app.route('/admin/data', methods=['GET', 'POST'])
def admin_data():
def get_metadata(dir, pattern):
metadata = list()
for subdir in os.listdir(dir):
path = os.path.join(dir, subdir)
if subdir.startswith(pattern) and os.path.isdir(path):
filepath = os.path.join(path, 'metadata.json')
if os.path.isfile(filepath):
with open(filepath) as f:
jsn = json.load(f)
md = {'directory': os.path.basename(dir),
'md5': jsn.get('md5'),
'filename': jsn.get('filename'),
'size (GB)': jsn.get('size (GB)'),
'number of rows': jsn.get('number of rows'),
'start date': jsn.get('start date'),
'end date': jsn.get('end date'),
'package version': jsn.get('package version'),
}
metadata.append(md)
return metadata
prior_metadata = get_metadata(base_dir, 'priors')
pred_metadata = get_metadata(base_dir, 'outlier-predictions')
raw_metadata = get_metadata(base_dir, 'raw-data')
return render_template('admin_data.html', pred_metadata=pred_metadata, prior_metadata=prior_metadata, raw_metadata=raw_metadata)
@app.route('/admin/labels', methods=['GET', 'POST'])
def admin_labels():
return render_template('admin_labels.html')
@app.route('/admin/file-config', methods=['GET', 'POST'])
def admin_data_config():
return render_template('admin_file_config.html')
@app.errorhandler(404)
def page_not_found(e):
flash(f'404: Page not found.')
return render_template('base.html')
| 37.944444
| 135
| 0.586872
| 7,791
| 0.475293
| 0
| 0
| 9,512
| 0.580283
| 0
| 0
| 2,766
| 0.168741
|
5a3c1f4058904f112a823d0ce1fa4d2ba743c174
| 6,151
|
py
|
Python
|
models/grammateus.py
|
monotasker/Online-Critical-Pseudepigrapha
|
456ef828834aeaedda8204a6107729f277063b9f
|
[
"W3C"
] | 1
|
2017-09-03T12:59:19.000Z
|
2017-09-03T12:59:19.000Z
|
models/grammateus.py
|
OnlineCriticalPseudepigrapha/Online-Critical-Pseudepigrapha
|
456ef828834aeaedda8204a6107729f277063b9f
|
[
"W3C"
] | 18
|
2018-05-11T17:08:48.000Z
|
2018-06-29T20:15:37.000Z
|
models/grammateus.py
|
monotasker/Online-Critical-Pseudepigrapha
|
456ef828834aeaedda8204a6107729f277063b9f
|
[
"W3C"
] | 1
|
2017-09-17T16:13:45.000Z
|
2017-09-17T16:13:45.000Z
|
#! /usr/bin/python2.7
# -*- coding: utf8 -*-
import datetime
# from plugin_ajaxselect import AjaxSelect
if 0:
from gluon import db, Field, auth, IS_EMPTY_OR, IS_IN_DB, current, URL
response = current.response
response.files.insert(5, URL('static',
'plugin_ajaxselect/plugin_ajaxselect.js'))
#response.files.append(URL('static', 'plugin_ajaxselect/plugin_ajaxselect.css'))
response.files.append(URL('static', 'plugin_listandedit/plugin_listandedit.css'))
db.define_table('genres',
Field('genre', 'string'),
format='%(genre)s')
db.define_table('biblical_figures',
Field('figure', 'string'),
format='%(figure)s')
db.define_table('draftdocs',
Field('name'),
Field('filename'),
Field('editor', db.auth_user),
Field('editor2', db.auth_user),
Field('editor3', db.auth_user),
Field('editor4', db.auth_user),
Field('assistant_editor', db.auth_user),
Field('assistant_editor2', db.auth_user),
Field('assistant_editor3', db.auth_user),
Field('proofreader', db.auth_user),
Field('proofreader2', db.auth_user),
Field('proofreader3', db.auth_user),
Field('version', 'double'),
Field('introduction', 'text'),
Field('provenance', 'text'),
Field('themes', 'text'),
Field('status', 'text'),
Field('manuscripts', 'text'),
Field('bibliography', 'text'),
Field('corrections', 'text'),
Field('sigla', 'text'),
Field('copyright', 'text'),
Field('citation_format', 'text'),
Field('genres', 'list:reference genres'),
Field('figures', 'list:reference biblical_figures'),
format='%(name)s')
db.draftdocs.editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.editor4.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.assistant_editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.assistant_editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.assistant_editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.proofreader.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.proofreader2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.proofreader3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.genres.requires = IS_EMPTY_OR(IS_IN_DB(db, 'genres.id',
db.genres._format,
multiple=True))
db.draftdocs.figures.requires = IS_EMPTY_OR(IS_IN_DB(db, 'biblical_figures.id',
db.biblical_figures._format,
multiple=True))
db.define_table('docs',
Field('name'),
Field('filename'),
Field('editor', db.auth_user),
Field('editor2', db.auth_user),
Field('editor3', db.auth_user),
Field('editor4', db.auth_user),
Field('assistant_editor', db.auth_user),
Field('assistant_editor2', db.auth_user),
Field('assistant_editor3', db.auth_user),
Field('proofreader', db.auth_user),
Field('proofreader2', db.auth_user),
Field('proofreader3', db.auth_user),
Field('version', 'double'),
Field('introduction', 'text'),
Field('provenance', 'text'),
Field('themes', 'text'),
Field('status', 'text'),
Field('manuscripts', 'text'),
Field('bibliography', 'text'),
Field('corrections', 'text'),
Field('sigla', 'text'),
Field('copyright', 'text'),
Field('citation_format', 'text'),
Field('genres', 'list:reference genres'),
Field('figures', 'list:reference biblical_figures'),
format='%(name)s')
db.docs.editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.editor4.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.assistant_editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.assistant_editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.assistant_editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.proofreader.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.proofreader2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.proofreader3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.genres.requires = IS_EMPTY_OR(IS_IN_DB(db, 'genres.id',
db.genres._format,
multiple=True))
db.docs.figures.requires = IS_EMPTY_OR(IS_IN_DB(db, 'biblical_figures.id',
db.biblical_figures._format,
multiple=True))
db.define_table('biblio',
Field('record'),
format='%(record)s')
db.define_table('pages',
Field('page_label', 'string'),
Field('title', 'string'),
Field('body', 'text'),
Field('poster', db.auth_user, default=auth.user_id),
Field('post_date', 'datetime', default=datetime.datetime.utcnow()),
format='%(title)s')
db.define_table('news',
Field('news_token', 'string'),
Field('title', 'string'),
Field('body', 'text'),
Field('poster', db.auth_user, default=auth.user_id),
Field('post_date', 'datetime', default=datetime.datetime.utcnow()),
format='%(title)s')
db.define_table('bugs',
Field('title'),
Field('body', 'text'),
Field('poster', db.auth_user, default=auth.user_id),
Field('post_date', 'datetime'),
format='%(title)s')
| 44.572464
| 105
| 0.662656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,835
| 0.298325
|
5a3ccdb8281af1ea0b8a669045afc2025efc659b
| 12,559
|
py
|
Python
|
interface.py
|
Kryptagora/pysum
|
5281d47b7fa4d5500230b6b30797ab1a3adabcc2
|
[
"MIT"
] | 3
|
2021-01-08T21:07:37.000Z
|
2021-11-29T19:26:56.000Z
|
interface.py
|
Kryptagora/pysum
|
5281d47b7fa4d5500230b6b30797ab1a3adabcc2
|
[
"MIT"
] | null | null | null |
interface.py
|
Kryptagora/pysum
|
5281d47b7fa4d5500230b6b30797ab1a3adabcc2
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import filedialog
from urllib.request import urlopen
from pathlib import Path
from tkinter import ttk
import numpy as np
import base64
import io
import re
from src.theme import theme
from src.algorithm import blosum
from src.utils import RichText
def qopen(path:str):
'''Opens and returns file content'''
with open(path, 'r') as fh:
content = fh.read()
return content
class Pysum(tk.Frame):
def __init__(self, title):
self.root = tk.Tk()
self.root.title(title)
self.root.configure(background='#ecffde')
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(1, weight=1)
self.style = ttk.Style()
self.style.theme_create('bio', settings=theme())
self.style.theme_use('bio')
self.font_1 = ('Helvetica', 10, 'bold')
self.font_2 = ('Helvetica', 10)
self.main_text = qopen('src/main_texts.txt').split('\n\n')
self.tabs = ttk.Notebook(self.root, padding=10)
self.result_frame = None
self.matrix_labels = None
self.matrix_result = None
self.add_tabs()
self.add_content_tool()
self.add_content_result()
self.add_content_about()
self.root.mainloop()
def add_tabs(self):
self.tool = ttk.Frame(self.tabs)
self.tabs.add(self.tool, text=' Tool ')
self.results = ttk.Frame(self.tabs)
self.tabs.add(self.results, text=' Results ')
self.about = ttk.Frame(self.tabs)
self.tabs.add(self.about, text=' About ')
self.tabs.grid(row=0, column=0)
def add_content_tool(self):
'''Adds all content to the tool tab'''
tool_frame = ttk.LabelFrame(self.tool, text="File Structure", padding=50, relief=tk.RIDGE)
tool_frame.grid(row=0, column=0, sticky=tk.E + tk.W + tk.N + tk.S)
tf_l1 = ttk.Label(tool_frame, text=self.main_text[0], font=self.font_1)
tf_l1.grid(row=0, column=0, pady=3, columnspan=3, sticky="w")
tf_l2 = ttk.Label(tool_frame, text=self.main_text[1], font=self.font_2)
tf_l2.grid(row=1, column=0, pady=3, columnspan=3, sticky="w")
tf_l3 = ttk.Label(tool_frame, text=self.main_text[2], font=self.font_1)
tf_l3.grid(row=2, column=0, pady=3, columnspan=3, sticky="w")
tf_l3 = ttk.Label(tool_frame, text=self.main_text[3], font=self.font_2)
tf_l3.grid(row=3, column=0, pady=3, columnspan=3, sticky="w")
# ---
in_frame = ttk.LabelFrame(self.tool, text="Input", padding=20, relief=tk.RIDGE)
in_frame.grid(row=1, column=0, sticky=tk.E + tk.W + tk.N + tk.S)
self.tf_textin = tk.Text(in_frame, height=6, width=50)
self.tf_textin.grid(row=1, column=0, columnspan=1, sticky="w")
self.tf_textin.insert(tk.END, self.main_text[4])
tf_open_text = ttk.Button(in_frame, text="Open File", command=self.tf_open_file)
tf_open_text.grid(row=1, column=1, sticky="news")
tf_clear_text = ttk.Button(in_frame, text="Clear Input", command=lambda: self.tf_textin.delete(1.0, tk.END))
tf_clear_text.grid(row=1, column=2, sticky="news")
tf_l4 = ttk.Label(in_frame, text=self.main_text[5], font=self.font_1)
tf_l4.grid(row=2, column=0, pady=5, columnspan=1, sticky="w")
self.xx_textin = tk.Text(in_frame, height=1, width=9)
self.xx_textin.grid(row=2, column=1, columnspan=1, sticky="w")
self.xx_textin.insert(tk.END, '')
tf_start_calc = ttk.Button(in_frame, text="CALCULATE!", command=self.check_input_and_pass)
tf_start_calc.grid(row=2, column=2, sticky="news")
def add_content_result(self):
'''Adds all content to the result tab, when calculate is called'''
if self.result_frame is not None:
# dynamicly resize window
self.result_frame.destroy()
self.result_frame = ttk.LabelFrame(self.results, text="Matrix Representation", padding=50, relief=tk.RIDGE)
self.result_frame.grid(row=0, column=0, sticky=tk.E + tk.W + tk.N + tk.S)
if self.matrix_result is None:
ttk.Label(self.result_frame, text="No result available.", font=self.font_2).grid(row=0, column=0, sticky="w")
return
for (row, col), value in np.ndenumerate(self.matrix_result):
if row == 0:
ttk.Label(self.result_frame, text=str(self.matrix_labels[col]), font=self.font_1).grid(row=row, column=col+1)
if col == 0:
ttk.Label(self.result_frame, width=2, text=str(self.matrix_labels[row]), font=self.font_1).grid(row=row+1, column=col)
_ = ttk.Entry(self.result_frame, width=8, font=self.font_2, justify='center')
_.insert(tk.END, str(value))
_.grid(row=row+1, column=col+1)
_.configure(state="readonly")
# ---
degree_frame = ttk.LabelFrame(self.results, text="BLOSUM Degree", padding=50, relief=tk.RIDGE)
degree_frame.grid(row=0, column=1, sticky=tk.E + tk.W + tk.N + tk.S)
ttk.Label(degree_frame, text=str(self.xx_textin.get("1.0", "end-1c").rstrip()), font=('consolas', 30, 'bold')).grid(row=0, column=0, sticky="news")
# ---
out_res_frame = ttk.LabelFrame(self.results, text="Output Settings", padding=50, relief=tk.RIDGE)
out_res_frame.grid(row=1, column=0, sticky=tk.E + tk.W + tk.N + tk.S)
out_res_printtoconsole = ttk.Button(out_res_frame, text="Print to console", command=self.print_res_console_save)
out_res_printtoconsole.grid(row=0, column=0, sticky="w")
out_res_printtoconsole = ttk.Button(out_res_frame, text="Save to file", command=lambda: self.print_res_console_save(save_file=True))
out_res_printtoconsole.grid(row=0, column=2, sticky="w")
def add_content_about(self, renderimg=False):
if renderimg and self.ab_frame is not None:
self.ab_frame.destroy()
self.render_about.destroy()
if not renderimg:
self.render_about = ttk.Button(self.about, text="RENDER IMAGES", command=lambda: self.add_content_about(True))
self.render_about.grid(row=0, column=0, sticky="e")
# This functions as README.md parser in combination witch the class RichText
self.ab_frame = ttk.LabelFrame(self.about, text='About this program', relief=tk.RIDGE)
self.ab_frame.grid(row=(0 if renderimg else 1), column=0, sticky=tk.E + tk.W + tk.N + tk.S)
self.images = [] # need to store reference because of tkinter
with open('README.md', 'r') as fh:
about = fh.readlines()
ab_text = RichText(self.ab_frame, width=73, wrap=tk.WORD)
ab_text.grid(row=0, column=0)
for line in about:
line = line.replace('\\', '')
line = line.replace('**', '')
line = line.replace('```', '')
# title of the readme
if line.startswith('##'):
ab_text.insert("end", line[3:], "h1")
elif line.startswith('#'):
ab_text.insert("end", 'PYSUM\n', "h1")
#extract the url in parentheis and insert image
elif line.startswith('!'):
if renderimg:
image_url = line.split('(')[1].split(')')[0]
image_url = image_url.replace('svg', 'gif').replace('dpi%7B300', 'dpi%7B200')
try:
image_byt = urlopen(image_url).read()
image_b64 = base64.encodestring(image_byt)
photo = tk.PhotoImage(data=image_b64)
ab_text.image_create(tk.END, image = photo)
ab_text.insert('end', '\n')
self.images.append(photo)
except:
self.warn(mode='badinternet', label_loc=self.about, row=2, col=0)
else:
ab_text.insert('end', '\n[NOT RENDERED YET, click on above button!]\n\n')
# draw bulletpoints
elif re.match(r'^[1-9]',line) or line.startswith('*'):
ab_text.insert_bullet('end', line.split(' ', 1)[1])
else:
ab_text.insert("end", line)
ab_text.configure(state='disabled')
return True
def print_res_console_save(self, save_file=False):
label_matrix = self.matrix_result.astype('str')
label2 = self.matrix_labels
label2 = np.asarray(['-'] + label2).reshape((len(label2)+1, 1))
label_matrix = np.vstack((self.matrix_labels, label_matrix))
label_matrix = np.hstack((label2, label_matrix))
header_str = f'BLOSUM{self.xx_textin.get("1.0", "end-1c").rstrip()} Matrix:'
result_str = '\n'.join([''.join(['{:8}'.format(item) for item in row]) for row in label_matrix])
if save_file:
file = filedialog.asksaveasfile(initialdir=str(Path.home()), mode='w', defaultextension=".txt")
if file is None:
return False
file.write(header_str + "\n" + result_str)
file.close()
else:
print(header_str + "\n" + result_str)
def tf_open_file(self):
tf_filename = filedialog.askopenfilename(initialdir=str(Path.home()), title="Select Text File", filetypes=
(("txt files", "*.txt"), ("all files", "*.*")))
if len(tf_filename) == 0:
return False
with open(tf_filename, 'r') as fh:
tf_text = fh.read()
self.tf_textin.delete("1.0", tk.END)
#self.tf_textin.insert(tk.END, tf_text)
self.tf_textin.insert(tk.END, f'--File sucessfully loaded: {len(tf_text.splitlines())} sequences found.--\n'+tf_text.replace(' ', ''))
def check_input_and_pass(self):
dna_sequences = []
initial_len = None
xx_number = self.xx_textin.get("1.0", "end-1c").rstrip().replace(' ', '')
# first check xx_blosum value
try:
xx_number = int(xx_number)
if not xx_number in range(1, 101):
self.warn(mode='xnumrange', label_loc=self.tool)
return False
except:
self.warn(mode='xnuminvalid', label_loc=self.tool)
return False
seq_string = self.tf_textin.get("1.0", tk.END).rstrip().replace(' ', '')
if len(seq_string.splitlines()) < 2:
self.warn(mode='empty', label_loc=self.tool)
return False
for i, line in enumerate(seq_string.upper().splitlines()):
if line.startswith('-'):
continue
if initial_len is None:
initial_len = len(line)
if initial_len != len(line):
self.warn(mode='len', line=i, label_loc=self.tool)
return False
else:
dna_sequences.append(line)
try:
matrix, lables = blosum(dna_sequences, xx_number)
if (matrix is None) and (labels is None):
return self.warn(mode='elimination', label_loc=self.tool)
else:
self.matrix_result, self.matrix_labels = matrix, lables
except:
self.warn(mode='something', line=i, label_loc=self.tool)
return False
self.add_content_result()
self.tabs.select([1])
def warn(self, mode:str, line:int=0, label_loc=None, row=2, col=0):
warn_msg = tk.StringVar()
if mode == 'len':
warn_msg.set(f'[WARNING] Sequence nr.{line+1} differs in lenght!')
elif mode == 'empty':
warn_msg.set(f'[WARNING] At least 2 Sequences must be given!')
elif mode == 'xnumrange':
warn_msg.set(f'[WARNING] BLOSUM Degree must be between 1-100!')
elif mode == 'xnuminvalid':
warn_msg.set(f'[WARNING] BLOSUM Degree must be a number!')
elif mode== 'elimination':
warn_msg.set(f'[WARNING] Only one Sequnce left after elimination!')
elif mode == 'something':
warn_msg.set(f'[WARNING] BLOSUM cant be computed with that sequences!')
elif mode== 'badinternet':
warn_msg.set(f'[WARNING] Internet connection is reqired!')
else:
warn_msg.set(f'[WARNING] This will never happen.')
warning_label = tk.Label(label_loc, textvariable=warn_msg, font=self.font_1, fg="red", bg='#ecffde')
warning_label.grid(row=row, column=col, pady=5, sticky="w")
self.root.after(4000, lambda: warn_msg.set(""))
| 40.124601
| 155
| 0.597022
| 12,132
| 0.966
| 0
| 0
| 0
| 0
| 0
| 0
| 1,897
| 0.151047
|
5a3d662e5f34dbe67eeb69437b64718da7a2b8ce
| 4,050
|
py
|
Python
|
view/python_core/movies/colorizer/aux_funcs.py
|
galizia-lab/pyview
|
07bef637b0c60fae8830c1b3947e4a7bcd14bb2c
|
[
"BSD-3-Clause"
] | 2
|
2021-11-07T10:17:16.000Z
|
2021-11-07T10:17:19.000Z
|
view/python_core/movies/colorizer/aux_funcs.py
|
galizia-lab/pyview
|
07bef637b0c60fae8830c1b3947e4a7bcd14bb2c
|
[
"BSD-3-Clause"
] | 5
|
2021-11-03T12:43:03.000Z
|
2021-12-16T10:34:52.000Z
|
view/python_core/movies/colorizer/aux_funcs.py
|
galizia-lab/pyview
|
07bef637b0c60fae8830c1b3947e4a7bcd14bb2c
|
[
"BSD-3-Clause"
] | 1
|
2021-09-23T15:46:26.000Z
|
2021-09-23T15:46:26.000Z
|
import numpy as np
import re
def apply_colormaps_based_on_mask(mask, data_for_inside_mask, data_for_outside_mask,
colormap_inside_mask, colormap_outside_mask):
"""
Returns the combination of applying two colormaps to two datasets on two mutually exclusive sets of pixels
as follows. Applies <colormap_inside_mask> to <data_for_inside_mask> for pixels where <thresh_mask> is True and applies
<colormap_outside_mask> to <data_for_outside_mask> for pixels where <thresh_mask> is False.
:param mask: boolean numpy.ndarray
:param data_for_inside_mask: float numpy.ndarray, having the same shape as thresh_mask
:param data_for_outside_mask: float numpy.ndarray, having the same shape as thresh_mask
:param colormap_inside_mask: matplotlib colormap
:param colormap_outside_mask: matplotlib colormap
:return: numpy.ndarray, having the same shape as thresh_mask
"""
assert data_for_inside_mask.shape == data_for_outside_mask.shape, f"data_within_mask and data_outside_mask " \
f"must have " \
f"the same shape. Given: {data_for_inside_mask.shape} " \
f"and {data_for_outside_mask.shape}"
assert mask.shape == data_for_inside_mask.shape, f"The shape of given thresh_mask ({mask.shape}) " \
f"does not match shape of data given " \
f"({data_for_inside_mask.shape})"
data_colorized = np.empty(list(data_for_inside_mask.shape) + [4])
data_colorized[mask, :] = colormap_inside_mask(data_for_inside_mask[mask])
data_colorized[~mask, :] = colormap_outside_mask(data_for_outside_mask[~mask])
return data_colorized
#
# data_masked_inside = np.ma.MaskedArray(data_for_outside_mask, mask, fill_value=0)
# data_masked_outside = np.ma.MaskedArray(data_for_inside_mask, ~mask, fill_value=0)
#
# data_colorized_outside = colormap_outside_mask(data_masked_inside)
# data_colorized_inside = colormap_inside_mask(data_masked_outside)
#
# return data_colorized_inside + data_colorized_outside
def stack_duplicate_frames(frame, depth):
"""
Retuns a numpy.ndarray formed by stacking <frame> along the third axis
:param frame: numpy.ndarray, of 2 dimensions
:param depth: int
:return: numpy.ndarray of shape (frame.shape[0], frame.shape[1], depth)
"""
return np.stack([frame] * depth, axis=2)
def resolve_thresholdOnValue(data, mv_thresholdOnValue):
"""
Interprets <mv_thresholdOnValue> in the context of <data>, calculates the threshold and returns it
:param data: numpy.ndarray
:param mv_thresholdOnValue: str
:return: float
"""
assert re.fullmatch(r"[ra][\-\.0-9]+", mv_thresholdOnValue) is not None, f"{mv_thresholdOnValue} is not a valid" \
f"threshold indicator. Valid formats are " \
f"'rxxx' for relative threshold and 'ayyy' " \
f" for absolute threshold where 'xxx' and" \
f"'yyy' represent numbers. " \
f"E.g.: a123.123, r0.4 and r-0.12533"
threshold_value = float(mv_thresholdOnValue[1:])
if mv_thresholdOnValue.startswith("r"):
thres_pc = np.clip(threshold_value, 0, 100)
data_min, data_max = data.min(), data.max()
threshold = data_min + 0.01 * thres_pc * (data_max - data_min)
elif mv_thresholdOnValue.startswith("a"):
threshold = threshold_value
else:
# Should not come here
raise ValueError()
return threshold
| 46.551724
| 123
| 0.604691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,072
| 0.511605
|
5a3e53b2797ea32423806b35230113ec63c34d58
| 4,242
|
py
|
Python
|
bigml/tests/create_cluster_steps.py
|
javinp/python
|
bdec1e206ed028990503ed4bebcbc7023d3ff606
|
[
"Apache-2.0"
] | 1
|
2021-06-20T11:51:22.000Z
|
2021-06-20T11:51:22.000Z
|
bigml/tests/create_cluster_steps.py
|
javinp/python
|
bdec1e206ed028990503ed4bebcbc7023d3ff606
|
[
"Apache-2.0"
] | null | null | null |
bigml/tests/create_cluster_steps.py
|
javinp/python
|
bdec1e206ed028990503ed4bebcbc7023d3ff606
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime, timedelta
from world import world
from read_cluster_steps import i_get_the_cluster
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
#@step(r'I create a cluster$')
def i_create_a_cluster(step):
dataset = world.dataset.get('resource')
resource = world.api.create_cluster(
dataset, {'seed': 'BigML',
'cluster_seed': 'BigML',
'k': 8})
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.cluster = resource['object']
world.clusters.append(resource['resource'])
#@step(r'I create a cluster from a dataset list$')
def i_create_a_cluster_from_dataset_list(step):
resource = world.api.create_cluster(world.dataset_ids)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.cluster = resource['object']
world.clusters.append(resource['resource'])
#@step(r'I create a cluster with options "(.*)"$')
def i_create_a_cluster_with_options(step, options):
dataset = world.dataset.get('resource')
options = json.loads(options)
options.update({'seed': 'BigML',
'cluster_seed': 'BigML',
'k': 8})
resource = world.api.create_cluster(
dataset, options)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.cluster = resource['object']
world.clusters.append(resource['resource'])
#@step(r'I wait until the cluster status code is either (\d) or (-\d) less than (\d+)')
def wait_until_cluster_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
i_get_the_cluster(step, world.cluster['resource'])
status = get_status(world.cluster)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_cluster(step, world.cluster['resource'])
status = get_status(world.cluster)
assert status['code'] == int(code1)
#@step(r'I wait until the cluster is ready less than (\d+)')
def the_cluster_is_finished_in_less_than(step, secs):
wait_until_cluster_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'I make the cluster shared')
def make_the_cluster_shared(step):
resource = world.api.update_cluster(world.cluster['resource'],
{'shared': True})
world.status = resource['code']
assert world.status == HTTP_ACCEPTED
world.location = resource['location']
world.cluster = resource['object']
#@step(r'I get the cluster sharing info')
def get_sharing_info(step):
world.shared_hash = world.cluster['shared_hash']
world.sharing_key = world.cluster['sharing_key']
#@step(r'I check the cluster status using the model\'s shared url')
def cluster_from_shared_url(step):
world.cluster = world.api.get_cluster("shared/cluster/%s" % world.shared_hash)
assert get_status(world.cluster)['code'] == FINISHED
#@step(r'I check the cluster status using the model\'s shared key')
def cluster_from_shared_key(step):
username = os.environ.get("BIGML_USERNAME")
world.cluster = world.api.get_cluster(world.cluster['resource'],
shared_username=username, shared_api_key=world.sharing_key)
assert get_status(world.cluster)['code'] == FINISHED
| 37.539823
| 87
| 0.698963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,453
| 0.342527
|
5a3f02391584923bfc3115e774e687008ccfb69b
| 3,649
|
py
|
Python
|
tests/ptp_clock_sim_time/test_ptp_clock_sim_time.py
|
psumesh/cocotbext-eth
|
39c585a8dd8dcdcfd56822a4f879ef059653757b
|
[
"MIT"
] | 15
|
2020-11-26T14:40:54.000Z
|
2022-03-25T06:42:30.000Z
|
tests/ptp_clock_sim_time/test_ptp_clock_sim_time.py
|
psumesh/cocotbext-eth
|
39c585a8dd8dcdcfd56822a4f879ef059653757b
|
[
"MIT"
] | 1
|
2021-03-24T06:28:20.000Z
|
2021-03-25T06:10:02.000Z
|
tests/ptp_clock_sim_time/test_ptp_clock_sim_time.py
|
psumesh/cocotbext-eth
|
39c585a8dd8dcdcfd56822a4f879ef059653757b
|
[
"MIT"
] | 7
|
2020-12-06T09:59:39.000Z
|
2021-08-25T04:15:37.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.utils import get_sim_time
from cocotbext.eth import PtpClockSimTime
class TB:
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.fork(Clock(dut.clk, 6.4, units="ns").start())
self.ptp_clock = PtpClockSimTime(
ts_96=dut.ts_96,
ts_64=dut.ts_64,
pps=dut.pps,
clock=dut.clk
)
@cocotb.test()
async def run_test(dut):
tb = TB(dut)
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
start_time = get_sim_time('sec')
start_ts_96 = (dut.ts_96.value.integer >> 48) + ((dut.ts_96.value.integer & 0xffffffffffff)/2**16*1e-9)
start_ts_64 = dut.ts_64.value.integer/2**16*1e-9
for k in range(10000):
await RisingEdge(dut.clk)
stop_time = get_sim_time('sec')
stop_ts_96 = (dut.ts_96.value.integer >> 48) + ((dut.ts_96.value.integer & 0xffffffffffff)/2**16*1e-9)
stop_ts_64 = dut.ts_64.value.integer/2**16*1e-9
time_delta = stop_time-start_time
ts_96_delta = stop_ts_96-start_ts_96
ts_64_delta = stop_ts_64-start_ts_64
ts_96_diff = time_delta - ts_96_delta
ts_64_diff = time_delta - ts_64_delta
tb.log.info("sim time delta : %g s", time_delta)
tb.log.info("96 bit ts delta : %g s", ts_96_delta)
tb.log.info("64 bit ts delta : %g s", ts_64_delta)
tb.log.info("96 bit ts diff : %g s", ts_96_diff)
tb.log.info("64 bit ts diff : %g s", ts_64_diff)
assert abs(ts_96_diff) < 1e-12
assert abs(ts_64_diff) < 1e-12
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
# cocotb-test
tests_dir = os.path.dirname(__file__)
def test_ptp_clock(request):
dut = "test_ptp_clock_sim_time"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(tests_dir, f"{dut}.v"),
]
parameters = {}
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| 28.960317
| 107
| 0.693615
| 377
| 0.103316
| 0
| 0
| 1,216
| 0.333242
| 1,201
| 0.329131
| 1,314
| 0.360099
|
5a3f1fd52edcbc6a770d3bea9dab8192d49a92e5
| 1,838
|
py
|
Python
|
dex/section/section.py
|
callmejacob/dexfactory
|
2de996927ee9f036b2c7fc6cb04f43ac790f35af
|
[
"BSD-2-Clause"
] | 7
|
2018-06-14T10:40:47.000Z
|
2021-05-18T08:55:34.000Z
|
dex/section/section.py
|
callmejacob/dexfactory
|
2de996927ee9f036b2c7fc6cb04f43ac790f35af
|
[
"BSD-2-Clause"
] | 1
|
2020-05-28T08:59:50.000Z
|
2020-05-28T08:59:50.000Z
|
dex/section/section.py
|
callmejacob/dexfactory
|
2de996927ee9f036b2c7fc6cb04f43ac790f35af
|
[
"BSD-2-Clause"
] | 3
|
2018-02-28T02:08:06.000Z
|
2018-09-12T03:09:18.000Z
|
# -- coding: utf-8 --
from section_base import *
from section_map_item import *
from section_header import *
from section_string_id import *
from section_type_id import *
from section_proto_id import *
from section_field_id import *
from section_method_id import *
from section_class_def import *
from section_type_list import *
from section_class_data import *
from section_annotation_set_ref_list import *
from section_annotation_set_item import *
from section_annotation_item import *
from section_string_list import *
from section_encoded_array import *
from section_annotations_directory import *
from section_code import *
from section_debug_info import *
'''
section中的映射表: (类型,Section类)
'''
section_class_map = {
TYPE_HEADER_ITEM : HeaderSection,
TYPE_STRING_ID_ITEM : StringIdListSection,
TYPE_TYPE_ID_ITEM : TypeIdListSection,
TYPE_PROTO_ID_ITEM : ProtoIdListSection,
TYPE_FIELD_ID_ITEM : FieldIdListSection,
TYPE_METHOD_ID_ITEM : MethodIdListSection,
TYPE_CLASS_DEF_ITEM : ClassDefListSection,
TYPE_MAP_LIST : MapItemListSection,
TYPE_TYPE_LIST : TypeListSection,
TYPE_ANNOTATION_SET_REF_LIST : AnnotationSetRefListSection,
TYPE_ANNOTATION_SET_ITEM : AnnotationSetItemSection,
TYPE_CLASS_DATA_ITEM : ClassDataListSection,
TYPE_CODE_ITEM : CodeSection,
TYPE_STRING_DATA_ITEM : StringListSection,
TYPE_DEBUG_INFO_ITEM : DebugInfoSection,
TYPE_ANNOTATION_ITEM : AnnotationItemSection,
TYPE_ENCODED_ARRAY_ITEM : EncodedArraySection,
TYPE_ANNOTATIONS_DIRECTORY_ITEM : AnnotationsDirectorySection,
}
| 39.106383
| 69
| 0.699674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 74
| 0.039871
|
5a408ec9d28877bdb362b94265d0d74be34141c1
| 91
|
py
|
Python
|
Code coach problems/Easy/Python/Skee-Ball.py
|
Djivs/sololearn-code-solutions
|
7727dd97f79863a88841548770481f6f2abdc7bf
|
[
"MIT"
] | 1
|
2020-07-27T07:32:57.000Z
|
2020-07-27T07:32:57.000Z
|
Code coach problems/Easy/Python/Skee-Ball.py
|
Djivs/sololearn-code-solutions
|
7727dd97f79863a88841548770481f6f2abdc7bf
|
[
"MIT"
] | null | null | null |
Code coach problems/Easy/Python/Skee-Ball.py
|
Djivs/sololearn-code-solutions
|
7727dd97f79863a88841548770481f6f2abdc7bf
|
[
"MIT"
] | 1
|
2020-11-07T12:45:21.000Z
|
2020-11-07T12:45:21.000Z
|
a = int(input())
b = int(input())
if a >=b*12:
print("Buy it!")
else:
print("Try again")
| 13
| 19
| 0.56044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.21978
|
5a41217fc99d7ef188d90f55041a7803b426c258
| 22
|
py
|
Python
|
gsb/rest/__init__.py
|
pfrancois/grisbi_django
|
4e27149522847c78ab9c0f0a06f0b1d371f7c205
|
[
"BSD-3-Clause"
] | null | null | null |
gsb/rest/__init__.py
|
pfrancois/grisbi_django
|
4e27149522847c78ab9c0f0a06f0b1d371f7c205
|
[
"BSD-3-Clause"
] | null | null | null |
gsb/rest/__init__.py
|
pfrancois/grisbi_django
|
4e27149522847c78ab9c0f0a06f0b1d371f7c205
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
# init
| 7.333333
| 14
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.909091
|
5a4164758499f35ed2ad174d38480235b72e03a1
| 4,416
|
py
|
Python
|
chris_turtlebot_dashboard/src/chris_turtlebot_dashboard/dashboard.py
|
xabigarde/chris_ros_turtlebot
|
ca26db3eafcb8aba7a322cca8fd44443f015e125
|
[
"BSD-3-Clause"
] | null | null | null |
chris_turtlebot_dashboard/src/chris_turtlebot_dashboard/dashboard.py
|
xabigarde/chris_ros_turtlebot
|
ca26db3eafcb8aba7a322cca8fd44443f015e125
|
[
"BSD-3-Clause"
] | null | null | null |
chris_turtlebot_dashboard/src/chris_turtlebot_dashboard/dashboard.py
|
xabigarde/chris_ros_turtlebot
|
ca26db3eafcb8aba7a322cca8fd44443f015e125
|
[
"BSD-3-Clause"
] | 1
|
2021-07-23T14:09:18.000Z
|
2021-07-23T14:09:18.000Z
|
import roslib;roslib.load_manifest('kobuki_dashboard')
import rospy
import diagnostic_msgs
from rqt_robot_dashboard.dashboard import Dashboard
from rqt_robot_dashboard.widgets import ConsoleDashWidget, MenuDashWidget, IconToolButton
from python_qt_binding.QtWidgets import QMessageBox, QAction
from python_qt_binding.QtCore import QSize,QTimer
from .battery_widget import BatteryWidget
from .led_widget import LedWidget
from .motor_widget import MotorWidget
from .monitor_dash_widget import MonitorDashWidget
class KobukiDashboard(Dashboard):
def setup(self, context):
self.message = None
self._dashboard_message = None
self._last_dashboard_message_time = 0.0
self._last_laptop_battery_update = 0.0
self._last_kobuki_battery_update = 0.0
self._stale_timer = QTimer()
self._stale_timer.timeout.connect(self.update_stale)
self._stale_timer.start(2500) # Set timeout for 2.5 seconds
self._motor_widget = MotorWidget('mobile_base/commands/motor_power')
self._laptop_bat = BatteryWidget("Laptop")
self._kobuki_bat = BatteryWidget("Kobuki")
self._dashboard_agg_sub = rospy.Subscriber('diagnostics_agg', diagnostic_msgs.msg.DiagnosticArray, self.dashboard_callback)
def get_widgets(self):
leds = [LedWidget('mobile_base/commands/led1'), LedWidget('mobile_base/commands/led2')]
return [[MonitorDashWidget(self.context), ConsoleDashWidget(self.context), self._motor_widget], leds, [self._laptop_bat, self._kobuki_bat]]
def update_stale(self):
current_time = rospy.get_time()
if ((current_time - self._last_kobuki_battery_update) > 15.0):
rospy.logwarn("Kobuki battery update is stale! last update=%f",self._last_dashboard_message_time);
self._kobuki_bat.set_stale()
self._last_kobuki_battery_update = self._last_dashboard_message_time # no need to keep calling
if ((current_time - self._last_laptop_battery_update) > 15.0):
rospy.logwarn("Laptop battery update is stale! last update=%f",self._last_dashboard_message_time);
self._laptop_bat.set_stale()
self._last_laptop_battery_update = self._last_dashboard_message_time # no need to keep calling
def dashboard_callback(self, msg):
self._dashboard_message = msg
self._last_dashboard_message_time = rospy.get_time()
laptop_battery_status = {}
for status in msg.status:
if status.name == "/Kobuki/Motor State":
motor_state = int(status.values[0].value)
self._motor_widget.update_state(motor_state)
elif status.name == "/Power System/Battery":
for value in status.values:
if value.key == 'Percent':
self._kobuki_bat.update_perc(float(value.value))
# This should be self._last_dashboard_message_time?
# Is it even used graphically by the widget
self._kobuki_bat.update_time(float(value.value))
self._kobuki_bat.unset_stale()
self._last_kobuki_battery_update = self._last_dashboard_message_time
elif value.key == "Charging State":
if value.value == "Trickle Charging" or value.value == "Full Charging":
self._kobuki_bat.set_charging(True)
else:
self._kobuki_bat.set_charging(False)
elif status.name == "/Power System/Laptop Battery":
for value in status.values:
laptop_battery_status[value.key]=value.value
if (laptop_battery_status):
percentage = float(laptop_battery_status['Charge (Ah)'])/float(laptop_battery_status['Capacity (Ah)'])
self._laptop_bat.update_perc(percentage*100)
self._laptop_bat.update_time(percentage*100)
charging_state = True if float(laptop_battery_status['Current (A)']) > 0.0 else False
self._laptop_bat.set_charging(charging_state)
self._laptop_bat.unset_stale()
self._last_laptop_battery_update = self._last_dashboard_message_time
def shutdown_dashboard(self):
self._dashboard_agg_sub.unregister()
self._stale_timer.stop()
del self._stale_timer
| 47.483871
| 147
| 0.673234
| 3,902
| 0.883605
| 0
| 0
| 0
| 0
| 0
| 0
| 581
| 0.131567
|
5a421a3520f2cd9636eea2d36b206d6735096aca
| 3,339
|
py
|
Python
|
msgpack_lz4block/__init__.py
|
AlsidOfficial/python-msgpack-lz4block
|
4cfa6fc69799530c72b73c660d0beabb4ebd5a81
|
[
"MIT"
] | 1
|
2021-07-01T12:41:41.000Z
|
2021-07-01T12:41:41.000Z
|
msgpack_lz4block/__init__.py
|
AlsidOfficial/python-msgpack-lz4block
|
4cfa6fc69799530c72b73c660d0beabb4ebd5a81
|
[
"MIT"
] | null | null | null |
msgpack_lz4block/__init__.py
|
AlsidOfficial/python-msgpack-lz4block
|
4cfa6fc69799530c72b73c660d0beabb4ebd5a81
|
[
"MIT"
] | null | null | null |
import msgpack
import lz4.block
from msgpack.ext import Timestamp, ExtType
import re
def __map_obj(obj, key_map):
if not isinstance(key_map, list):
raise Exception('The key_map should be a list')
elif len(obj) != len(key_map):
raise Exception(
'The key_map list has length {} whereas the object has length {}'.format(len(key_map), len(obj)))
else:
dict_obj = {}
for index in range(0, len(key_map)):
key = key_map[index]
if isinstance(key, str):
dict_obj[key] = obj[index]
else:
dict_obj[key[0]] = __map_obj(obj[index], key[1])
return dict_obj
PATTERN_1 = re.compile(
rb'\xd9jSystem.Object\[\], System.Private.CoreLib, Version=[0-9][0-9.]*, Culture=neutral, PublicKeyToken=7cec85d7bea7798e.*?\xd9.(?P<payload>.*)')
def ext_hook(code, data):
if code == 100:
for k in [
b'\xd9jSystem.Object[], System.Private.CoreLib, Version=5.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e\x91\xa6',
b'\xd9jSystem.Object[], System.Private.CoreLib, Version=5.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e\x91\xa4']:
if data.startswith(k):
decoded = data[len(k):]
return [decoded.decode()]
for k in [
b'\xd9jSystem.Object[], System.Private.CoreLib, Version=5.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e\x92\xa3',
b'\xd9jSystem.Object[], System.Private.CoreLib, Version=5.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e\x94\xa3',
b'\xd9jSystem.Object[], System.Private.CoreLib, Version=5.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e\x93\xa3']:
if data.startswith(k):
decoded = [d.decode() for d in re.split(b'\xa5|\xa6|\xb4|\xa4|\xa9', data[len(k):]) if d != b'']
return decoded
match = PATTERN_1.search(data)
if match is not None:
payload = match.group('payload')
decoded = [d.decode() for d in re.split(b'\xd9.', payload) if d != b'']
return decoded
return ExtType(code, data)
def jsonify(data):
if isinstance(data, Timestamp):
return data.to_datetime().strftime('%Y-%m-%dT%H:%M:%fZ')
elif isinstance(data, list):
for i in range(0, len(data)):
data[i] = jsonify(data[i])
return data
def deserialize(bytes_data, key_map=None, buffer_size=100 * 1024 * 1024):
"""
Deserialize the bytes array data outputted by the MessagePack-CSharp lib using using lz4block compression
:param bytes_data: Serialized bytes array data that has been generated by the MessagePack-CSharp lib using using
lz4block compression.
:param key_map: A key list to produce a key value dict.
:param buffer_size: Buffer size to be used when decompressing.
:return: deserialized data
"""
deserialized = msgpack.unpackb(bytes_data)
decompressed = b''
for data in deserialized:
if isinstance(data, bytes):
decompressed += lz4.block.decompress(data, uncompressed_size=buffer_size)
obj = msgpack.unpackb(decompressed, ext_hook=ext_hook, raw=False)
obj = jsonify(obj)
if key_map is not None:
return __map_obj(obj, key_map)
return obj
| 42.265823
| 150
| 0.634921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,362
| 0.407907
|
5a42367cb5c3c6ae30a847d5d4575149e7bc2d38
| 2,169
|
py
|
Python
|
scilpy/version.py
|
fullbat/scilpy
|
8f5b95a0b298ac95268c94d04a162b14fe2773ad
|
[
"MIT"
] | null | null | null |
scilpy/version.py
|
fullbat/scilpy
|
8f5b95a0b298ac95268c94d04a162b14fe2773ad
|
[
"MIT"
] | null | null | null |
scilpy/version.py
|
fullbat/scilpy
|
8f5b95a0b298ac95268c94d04a162b14fe2773ad
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import glob
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "Scilpy: diffusion MRI tools and utilities"
# Long description will go up on the pypi page
long_description = """
Scilpy
========
Scilpy is a small library mainly containing small tools and utilities
to quickly work with diffusion MRI. Most of the tools are based
on or wrapper of the Dipy_ library.
.. _Dipy: http://dipy.org
License
=======
``scilpy`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2012--, Sherbrooke Connectivity Imaging Lab [SCIL],
Université de Sherbrooke.
"""
NAME = "scilpy"
MAINTAINER = "Jean-Christophe Houde"
MAINTAINER_EMAIL = "jean.christophe.houde@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "https://github.com/scilus/scilpy"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "The SCIL developers"
AUTHOR_EMAIL = ""
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
REQUIRES = ["numpy"]
SCRIPTS = glob.glob("scripts/*.py")
| 30.985714
| 77
| 0.720609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,370
| 0.631336
|
5a448e7214b3790abd510a4b2f97d52ddcfd5d87
| 3,765
|
py
|
Python
|
fireflies.py
|
dvsd/Firefly-Synchronization
|
89aec8513a386cf274f333ba8b4fa64555766619
|
[
"MIT"
] | 1
|
2021-04-22T14:04:19.000Z
|
2021-04-22T14:04:19.000Z
|
fireflies.py
|
dvsd/Firefly-Synchronization
|
89aec8513a386cf274f333ba8b4fa64555766619
|
[
"MIT"
] | null | null | null |
fireflies.py
|
dvsd/Firefly-Synchronization
|
89aec8513a386cf274f333ba8b4fa64555766619
|
[
"MIT"
] | null | null | null |
from graphics import *
import math
import random
windowWidth = 400
windowHeight = 400
fireflyRadius = 3
win = GraphWin("Fireflies",windowWidth,windowHeight,autoflush=False)
win.setBackground('black')
closeWindow = False
fireflies = []
flashedFliesOpenSet = [] # flies that need to reset urge of neighbors
flashedFliesClosedSet = [] # flies that have already flashed and reset its urge
colorTraits = [
[255,0,0], #red
[0,255,0], # green
[0,0,255], # blue
[255,255,0], # yellow
[255,0,255], # purple
[0,255,255], # cyan
[232, 30, 99], # pink
[255, 152, 0], # orange
[96, 125, 139], # blue gray
[255,87,51] # blood orange
]
def distbetween(start,end):
return math.sqrt((start.x-end.x)**2+(start.y-end.y)**2)
class Firefly():
def __init__(self,i,j):
self.x = i
self.y = j
self.radius = fireflyRadius
self.currentUrge = random.randint(0,100)
self.threshold = 100
self.circle = Circle(Point(self.x,self.y),self.radius)
self.flashed = False
self.colorTrait = colorTraits[random.randint(0,9)]
self.hue = [0,0,0]
def draw(self):
self.circle.setFill('black')
self.circle.setOutline('black')
self.circle.draw(win)
def compute_hue(self,colorTraits):
if self.currentUrge < (self.threshold-30):
self.hue = [0,0,0]
elif self.currentUrge < (self.threshold-15):
self.hue[0] = min(colorTraits[0],0+colorTraits[0]*(self.currentUrge - (self.threshold-30))/(30/2))
self.hue[1] = min(colorTraits[1],0+colorTraits[1]*(self.currentUrge - (self.threshold-30))/(30/2))
self.hue[2] = min(colorTraits[2],0+colorTraits[2]*(self.currentUrge - (self.threshold-30))/(30/2))
else:
self.hue[0] = max(0,colorTraits[0]-colorTraits[0]*(self.currentUrge - (self.threshold-15))/(30/2))
self.hue[1] = max(0,colorTraits[1]-colorTraits[1]*(self.currentUrge - (self.threshold-15))/(30/2))
self.hue[2] = max(0,colorTraits[2]-colorTraits[2]*(self.currentUrge - (self.threshold-15))/(30/2))
# As time progresses, increase urge every second
for i in range(random.randint(40,85)): # randomly generate Firefly instances at random coordinates within frame
fireflies.append(Firefly(random.randint(fireflyRadius,windowWidth-fireflyRadius),random.randint(fireflyRadius,windowHeight-fireflyRadius)))
for fly in fireflies:
fly.draw()
previousTime = time.time()
while not closeWindow:
currentTime = time.time() # get currentTime in seconds
if (currentTime-previousTime) > .1: # if one second has elapsed
previousTime = currentTime # previous time becomes the old current time
for fly in fireflies: # for all fireflies
if fly.flashed:
fly.flashed = False
fly.compute_hue(fly.colorTrait)
fly.circle.setFill(color_rgb(fly.hue[0],fly.hue[1],fly.hue[2]))
fly.circle.setOutline(color_rgb(fly.hue[0],fly.hue[1],fly.hue[2]))
fly.currentUrge += 1 # increase urge by one every one second
win.flush()
if fly.currentUrge >= fly.threshold: # if current urge exceeds the fireflies' threshold
fly.flashed = True
flashedFliesOpenSet.append(fly)
fly.currentUrge = 0 # reset phase/currentUrge
for flashedFly in flashedFliesOpenSet:
# TODO: alter this loop to eliminate every visited fly to reduce iterations.
# Would need to reset the list of flies on the outside of the loop to ensure every fly is visitied.
for fly in fireflies:
if fly not in flashedFliesOpenSet and fly not in flashedFliesClosedSet:
if distbetween(flashedFly,fly) <= 50 and (flashedFly!= fly) and fly.currentUrge < fly.threshold and fly.currentUrge != 0:
fly.currentUrge = 0
fly.colorTrait = flashedFly.colorTrait
flashedFliesOpenSet.append(fly)
flashedFliesOpenSet.remove(flashedFly)
flashedFliesClosedSet.append(flashedFly)
if win.checkKey():
closeWindow = True
win.getMouse()
| 34.227273
| 140
| 0.712882
| 1,195
| 0.317397
| 0
| 0
| 0
| 0
| 0
| 0
| 739
| 0.196282
|
5a44e929a11797422604acb7129e5a00747b908f
| 2,350
|
py
|
Python
|
gb/tests/test_gibbs_sampler.py
|
myozka/granger-busca
|
e6922f85aa58ab0809951ec4d60b5df43d6c74e8
|
[
"BSD-3-Clause"
] | 5
|
2018-09-06T13:37:04.000Z
|
2019-12-16T13:53:26.000Z
|
gb/tests/test_gibbs_sampler.py
|
myozka/granger-busca
|
e6922f85aa58ab0809951ec4d60b5df43d6c74e8
|
[
"BSD-3-Clause"
] | 1
|
2021-06-09T06:08:25.000Z
|
2021-07-13T18:10:09.000Z
|
gb/tests/test_gibbs_sampler.py
|
myozka/granger-busca
|
e6922f85aa58ab0809951ec4d60b5df43d6c74e8
|
[
"BSD-3-Clause"
] | 4
|
2020-03-30T14:54:27.000Z
|
2021-09-23T18:48:14.000Z
|
# -*- coding: utf8
from gb.randomkit.random import RNG
from gb.samplers import BaseSampler
from gb.samplers import CollapsedGibbsSampler
from gb.stamps import Timestamps
from gb.sloppy import SloppyCounter
from numpy.testing import assert_equal
import numpy as np
def test_get_probability():
d = {}
d[0] = [1, 2, 3, 4, 5, 6, 7]
d[1] = [11, 12, 13]
stamps = Timestamps(d)
causes = stamps._get_causes(0)
causes[0] = 0
causes[1] = 0
causes[2] = 0
causes[3] = 1
causes[4] = 1
causes[5] = 1
causes[6] = 1
causes = stamps._get_causes(1)
causes[0] = 0
causes[1] = 0
causes[2] = 1
nb = np.array([5, 5], dtype='uint64')
init_state = np.array([[5, 5]], dtype='uint64')
id_ = 0
sloppy = SloppyCounter(1, 9999, nb, init_state)
sampler = CollapsedGibbsSampler(BaseSampler(stamps, sloppy, id_, 0.1,
RNG()), 2)
sampler._set_current_process(0)
assert_equal(0.5961538461538461, sampler._get_probability(0))
assert_equal(0.7884615384615383, sampler._get_probability(1))
sampler._set_current_process(1)
assert_equal(0.40384615384615385, sampler._get_probability(0))
assert_equal(0.21153846153846154, sampler._get_probability(1))
def test_inc_dec():
d = {}
d[0] = [1, 2, 3, 4, 5, 6, 7]
d[1] = [11, 12, 13]
stamps = Timestamps(d)
causes = stamps._get_causes(0)
causes[0] = 0
causes[1] = 0
causes[2] = 0
causes[3] = 1
causes[4] = 1
causes[5] = 1
causes[6] = 1
causes = stamps._get_causes(1)
causes[0] = 0
causes[1] = 0
causes[2] = 1
nb = np.array([5, 5], dtype='uint64')
init_state = np.array([[5, 5]], dtype='uint64')
id_ = 0
sloppy = SloppyCounter(1, 9999, nb, init_state)
sampler = CollapsedGibbsSampler(BaseSampler(stamps, sloppy, id_, 0.1,
RNG()), 2)
sampler._set_current_process(0)
assert_equal(0.5961538461538461, sampler._get_probability(0))
assert_equal(0.7884615384615383, sampler._get_probability(1))
sampler._inc_one(0)
assert_equal(0.6612903225806451, sampler._get_probability(0))
assert_equal(0.7884615384615383, sampler._get_probability(1))
sampler._dec_one(0)
assert_equal(0.5961538461538461, sampler._get_probability(0))
| 28.313253
| 73
| 0.631064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.021277
|
5a44f541b7846b979545c92ddcc2e62d26b600d3
| 9,163
|
py
|
Python
|
python/tHome/sma/Link.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 18
|
2016-04-17T19:39:28.000Z
|
2020-11-19T06:55:20.000Z
|
python/tHome/sma/Link.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 6
|
2016-10-31T13:53:45.000Z
|
2019-03-20T20:47:03.000Z
|
python/tHome/sma/Link.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 12
|
2016-10-31T12:29:08.000Z
|
2021-12-28T12:18:28.000Z
|
#===========================================================================
#
# Primary SMA API.
#
#===========================================================================
import socket
from .. import util
from . import Auth
from . import Reply
from . import Request
#==============================================================================
class Link:
"""SMA WebConnection link
Units: Watt, Watt-hours, C, seconds
l = Link( '192.168.1.14' )
print l.acTotalEnergy()
See also: report for common requests.
"""
def __init__( self, ip, port=9522, group="USER", password="0000",
connect=True, timeout=120, decode=True, raw=False ):
if group != "USER" and group != "INSTALLER":
raise util.Error( "Invalid group '%s'. Valid groups are 'USER' "
"'INSTALLER'." % group )
self.ip = ip
self.port = port
self.group = group
self.password = password
self.timeout = timeout
self.decode = decode
self.raw = raw
self.socket = None
if connect:
self.open()
#---------------------------------------------------------------------------
def info( self ):
p = Request.Data( command=0x58000200, first=0x00821E00, last=0x008220FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.StringItem( "name", 40, timeVar="timeWake" ),
Reply.AttrItem( "type", 40 ),
Reply.AttrItem( "model", 40 ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def status( self ):
p = Request.Data( command=0x51800200, first=0x00214800, last=0x002148FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.AttrItem( "status", 32, timeVar="time" ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def gridRelayStatus( self ):
p = Request.Data( command=0x51800200, first=0x00416400, last=0x004164FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.AttrItem( "gridStatus", 32, timeVar="timeOff" ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def temperature( self ):
"""Return the inverter temp in deg C (or 0 if unavailable)."""
p = Request.Data( command=0x52000200, first=0x00237700, last=0x002377FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.I32Item( "temperature", 16, mult=0.01 ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def version( self ):
"""Return the inverter software version string."""
p = Request.Data( command=0x58000200, first=0x00823400, last=0x008234FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.VersionItem( "version" ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def acTotalEnergy( self ):
p = Request.Data( command=0x54000200, first=0x00260100, last=0x002622FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.I64Item( "totalEnergy", 16, mult=1.0, timeVar="timeLast" ),
Reply.I64Item( "dailyEnergy", 16, mult=1.0 ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def acTotalPower( self ):
p = Request.Data( command=0x51000200, first=0x00263F00, last=0x00263FFF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.I32Item( "acPower", 28, mult=1.0, timeVar="timeOff" ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def acPower( self ):
p = Request.Data( command=0x51000200, first=0x00464000, last=0x004642FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.I32Item( "acPower1", 28, mult=1.0, timeVar="timeOff" ),
Reply.I32Item( "acPower2", 28, mult=1.0 ),
Reply.I32Item( "acPower3", 28, mult=1.0 ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def acMaxPower( self ):
p = Request.Data( command=0x51000200, first=0x00411E00, last=0x004120FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.U32Item( "acMaxPower1", 28, mult=1.0, timeVar="time" ),
Reply.U32Item( "acMaxPower2", 28, mult=1.0 ),
Reply.U32Item( "acMaxPower3", 28, mult=1.0 ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def operationTime( self ):
p = Request.Data( command=0x54000200, first=0x00462E00, last=0x00462FFF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.I64Item( "operationTime", 16, mult=1.0, timeVar="timeLast" ),
Reply.I64Item( "feedTime", 16, mult=1.0 ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def dcPower( self ):
p = Request.Data( command=0x53800200, first=0x00251E00, last=0x00251EFF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.I32Item( "dcPower1", 28, mult=1.0, timeVar="timeOff" ),
Reply.I32Item( "dcPower2", 28, mult=1.0 ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def dcVoltage( self ):
p = Request.Data( command=0x53800200, first=0x00451F00, last=0x004521FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.I32Item( "dcVoltage1", 28, mult=0.01, timeVar="timeOff" ),
Reply.I32Item( "dcVoltage2", 28, mult=0.01 ),
Reply.I32Item( "dcCurrent1", 28, mult=0.001 ),
Reply.I32Item( "dcCurrent2", 28, mult=0.001 ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def acVoltage( self ):
p = Request.Data( command=0x51000200, first=0x00464800, last=0x004652FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.U32Item( "acVoltage1", 28, mult=0.01, timeVar="timeOff" ),
Reply.U32Item( "acVoltage2", 28, mult=0.01 ),
Reply.U32Item( "acVoltage3", 28, mult=0.01 ),
Reply.U32Item( "acGridVoltage", 28, mult=0.01 ),
Reply.U32Item( "unknown1", 28, mult=0.01 ),
Reply.U32Item( "unknown2", 28, mult=0.01 ),
Reply.U32Item( "acCurrent1", 28, mult=0.001 ),
Reply.U32Item( "acCurrent2", 28, mult=0.001 ),
Reply.U32Item( "acCurrent3", 28, mult=0.001 ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def gridFrequency( self ):
p = Request.Data( command=0x51000200, first=0x00465700, last=0x004657FF )
bytes = p.send( self.socket )
decoder = Reply.Value( [
Reply.U32Item( "frequency", 28, mult=0.01, timeVar="timeOff" ),
] )
return self._return( bytes, decoder )
#---------------------------------------------------------------------------
def __del__( self ):
self.close()
#---------------------------------------------------------------------------
def open( self ):
if self.socket:
return
self.socket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
self.socket.settimeout( self.timeout )
try:
self.socket.connect( ( self.ip, self.port ) )
p = Auth.LogOn( self.group, self.password )
p.send( self.socket )
except:
if self.socket:
self.socket.close()
self.socket = None
raise
#---------------------------------------------------------------------------
def close( self ):
if not self.socket:
return
p = Auth.LogOff()
try:
p.send( self.socket )
finally:
self.socket.close()
self.socket = None
#---------------------------------------------------------------------------
def __enter__( self ):
return self
#---------------------------------------------------------------------------
def __exit__( self, type, value, traceback ):
self.close()
#---------------------------------------------------------------------------
def _return( self, bytes, decoder ):
if self.decode:
return decoder.decode( bytes, self.raw )
else:
return ( bytes, decoder )
#==============================================================================
| 38.020747
| 79
| 0.460984
| 8,724
| 0.95209
| 0
| 0
| 0
| 0
| 0
| 0
| 2,705
| 0.295209
|
5a453d50864469ccb2ceb29c181778bf81f77b45
| 1,988
|
py
|
Python
|
src/tinerator/visualize/qt_app.py
|
lanl/tinerator
|
b34112f01d64801b6539650af2e40edff33f9f9b
|
[
"BSD-3-Clause"
] | 2
|
2021-09-13T17:10:25.000Z
|
2021-09-17T18:36:21.000Z
|
src/tinerator/visualize/qt_app.py
|
lanl/tinerator
|
b34112f01d64801b6539650af2e40edff33f9f9b
|
[
"BSD-3-Clause"
] | 15
|
2021-08-16T18:23:58.000Z
|
2022-02-03T04:38:24.000Z
|
src/tinerator/visualize/qt_app.py
|
lanl/tinerator
|
b34112f01d64801b6539650af2e40edff33f9f9b
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtCore import QCoreApplication, QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWebEngineWidgets import QWebEngineProfile
class MainWindowWeb(QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tw_title = "TINerator"
self.browser = QWebEngineView()
self.browser.loadFinished.connect(self.onLoadFinished)
self.setCentralWidget(self.browser)
self.setWindowTitle(f"{self.tw_title} (Loading...)")
def setParams(
self, title: str = None, window_size: tuple = None, allow_resize: bool = False
):
if title:
self.tw_title = title
self.setWindowTitle(f"{title} (Loading...)")
if window_size:
self.resize(window_size[0], window_size[1])
if not allow_resize:
self.setFixedSize(self.width(), self.height())
def loadURL(self, url: str):
self.browser.load(QUrl(url))
def onLoadFinished(self):
self.setWindowTitle(self.tw_title)
def closeEvent(self, event):
self.setWindowTitle(f"{self.tw_title} (Closing...)")
self.browser.deleteLater()
self.browser.stop()
self.browser.destroy()
del self.browser
self.close()
QCoreApplication.quit()
def run_web_app(
url: str,
title: str = "TINerator",
width: int = 900,
height: int = 600,
allow_resize: bool = True,
):
qt_app = QtWidgets.QApplication.instance()
if qt_app is None:
qt_app = QApplication(sys.argv)
qt_app.setQuitOnLastWindowClosed(True)
window = MainWindowWeb()
window.setParams(
title=title, window_size=(width, height), allow_resize=allow_resize
)
window.loadURL(url)
window.show()
err = qt_app.exec_()
del window
del qt_app
return err
| 26.864865
| 86
| 0.65493
| 1,165
| 0.586016
| 0
| 0
| 0
| 0
| 0
| 0
| 107
| 0.053823
|
5a46d6b1d5ad18765586dcbd1b433a5a6d49394a
| 2,487
|
py
|
Python
|
openstack/tests/unit/clustering/v1/test_receiver.py
|
anton-sidelnikov/openstacksdk
|
98f0c67120b65814c3bd1663415e302551a14536
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/clustering/v1/test_receiver.py
|
anton-sidelnikov/openstacksdk
|
98f0c67120b65814c3bd1663415e302551a14536
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/clustering/v1/test_receiver.py
|
anton-sidelnikov/openstacksdk
|
98f0c67120b65814c3bd1663415e302551a14536
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.clustering.v1 import receiver
from openstack.tests.unit import base
FAKE_ID = 'ae63a10b-4a90-452c-aef1-113a0b255ee3'
FAKE_NAME = 'test_receiver'
FAKE = {
'id': FAKE_ID,
'name': FAKE_NAME,
'type': 'webhook',
'cluster_id': 'FAKE_CLUSTER',
'action': 'CLUSTER_RESIZE',
'created_at': '2015-10-10T12:46:36.000000',
'updated_at': '2016-10-10T12:46:36.000000',
'actor': {},
'params': {
'adjustment_type': 'CHANGE_IN_CAPACITY',
'adjustment': 2
},
'channel': {
'alarm_url': 'http://host:port/webhooks/AN_ID/trigger?V=1',
},
'user': 'FAKE_USER',
'project': 'FAKE_PROJECT',
'domain': '',
}
class TestReceiver(base.TestCase):
def setUp(self):
super(TestReceiver, self).setUp()
def test_basic(self):
sot = receiver.Receiver()
self.assertEqual('receiver', sot.resource_key)
self.assertEqual('receivers', sot.resources_key)
self.assertEqual('/receivers', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_instantiate(self):
sot = receiver.Receiver(**FAKE)
self.assertEqual(FAKE['id'], sot.id)
self.assertEqual(FAKE['name'], sot.name)
self.assertEqual(FAKE['type'], sot.type)
self.assertEqual(FAKE['cluster_id'], sot.cluster_id)
self.assertEqual(FAKE['action'], sot.action)
self.assertEqual(FAKE['params'], sot.params)
self.assertEqual(FAKE['created_at'], sot.created_at)
self.assertEqual(FAKE['updated_at'], sot.updated_at)
self.assertEqual(FAKE['user'], sot.user_id)
self.assertEqual(FAKE['project'], sot.project_id)
self.assertEqual(FAKE['domain'], sot.domain_id)
self.assertEqual(FAKE['channel'], sot.channel)
| 34.541667
| 75
| 0.668275
| 1,258
| 0.50583
| 0
| 0
| 0
| 0
| 0
| 0
| 1,054
| 0.423804
|
5a48e8486f10a1984a1d5c43962af125191eae02
| 4,137
|
py
|
Python
|
gan/kdd_utilities.py
|
mesarcik/Efficient-GAN-Anomaly-Detection
|
15568abb57d2965ce70d4fd0dc70f3fe00c68d1b
|
[
"MIT"
] | 408
|
2018-02-27T05:10:49.000Z
|
2022-03-24T10:32:07.000Z
|
gan/kdd_utilities.py
|
phuccuongngo99/Efficient-GAN-Anomaly-Detection
|
849ffd91436f4ab8908e0d0ae9e6eadff5f67110
|
[
"MIT"
] | 21
|
2018-05-21T09:18:02.000Z
|
2021-08-30T21:51:38.000Z
|
gan/kdd_utilities.py
|
phuccuongngo99/Efficient-GAN-Anomaly-Detection
|
849ffd91436f4ab8908e0d0ae9e6eadff5f67110
|
[
"MIT"
] | 139
|
2018-03-05T13:42:11.000Z
|
2022-03-20T09:02:41.000Z
|
import tensorflow as tf
"""Class for KDD10 percent GAN architecture.
Generator and discriminator.
"""
learning_rate = 0.00001
batch_size = 50
layer = 1
latent_dim = 32
dis_inter_layer_dim = 128
init_kernel = tf.contrib.layers.xavier_initializer()
def generator(z_inp, is_training=False, getter=None, reuse=False):
""" Generator architecture in tensorflow
Generates data from the latent space
Args:
z_inp (tensor): variable in the latent space
reuse (bool): sharing variables or not
Returns:
(tensor): last activation layer of the generator
"""
with tf.variable_scope('generator', reuse=reuse, custom_getter=getter):
name_net = 'layer_1'
with tf.variable_scope(name_net):
net = tf.layers.dense(z_inp,
units=64,
kernel_initializer=init_kernel,
name='fc')
net = tf.nn.relu(net, name='relu')
name_net = 'layer_2'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=128,
kernel_initializer=init_kernel,
name='fc')
net = tf.nn.relu(net, name='relu')
name_net = 'layer_4'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=121,
kernel_initializer=init_kernel,
name='fc')
return net
def discriminator(x_inp, is_training=False, getter=None, reuse=False):
""" Discriminator architecture in tensorflow
Discriminates between real data and generated data
Args:
x_inp (tensor): input data for the encoder.
reuse (bool): sharing variables or not
Returns:
logits (tensor): last activation layer of the discriminator (shape 1)
intermediate_layer (tensor): intermediate layer for feature matching
"""
with tf.variable_scope('discriminator', reuse=reuse, custom_getter=getter):
name_net = 'layer_1'
with tf.variable_scope(name_net):
net = tf.layers.dense(x_inp,
units=256,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net, rate=0.2, name='dropout',
training=is_training)
name_net = 'layer_2'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=128,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net, rate=0.2, name='dropout',
training=is_training)
name_net = 'layer_3'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=dis_inter_layer_dim,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net,
rate=0.2,
name='dropout',
training=is_training)
intermediate_layer = net
name_net = 'layer_4'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=1,
kernel_initializer=init_kernel,
name='fc')
net = tf.squeeze(net)
return net, intermediate_layer
def leakyReLu(x, alpha=0.1, name=None):
if name:
with tf.variable_scope(name):
return _leakyReLu_impl(x, alpha)
else:
return _leakyReLu_impl(x, alpha)
def _leakyReLu_impl(x, alpha):
return tf.nn.relu(x) - (alpha * tf.nn.relu(-x))
| 32.833333
| 79
| 0.513899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 896
| 0.216582
|
5a48f16367b8db551ede0ba75c39ecf9f879f676
| 646
|
py
|
Python
|
setup.py
|
jhakonen/wotdisttools
|
2194761baaf1f6ade5fa740d134553b77300211b
|
[
"MIT"
] | 9
|
2019-08-15T14:59:39.000Z
|
2021-06-24T22:03:31.000Z
|
setup.py
|
jhakonen/wotdisttools
|
2194761baaf1f6ade5fa740d134553b77300211b
|
[
"MIT"
] | 1
|
2019-08-06T19:22:44.000Z
|
2019-08-11T09:23:31.000Z
|
setup.py
|
jhakonen/setuptools-wotmod
|
2194761baaf1f6ade5fa740d134553b77300211b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='setuptools-wotmod',
version='0.2',
packages=find_packages(),
description='setuptools integration for creating World of Tanks mods',
long_description=open('README.md').read(),
author='jhakonen',
url='https://github.com/jhakonen/setuptools-wotmod/',
license='MIT License',
setup_requires=['pytest-runner'],
tests_require=[
'mock',
'nose',
'pytest<5',
],
entry_points={
"distutils.commands": [
"bdist_wotmod = setuptools_wotmod.bdist_wotmod:bdist_wotmod",
],
},
)
| 24.846154
| 74
| 0.630031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 301
| 0.465944
|
5a492602297201d4f7e69fbf52b8fafe45beb71d
| 2,264
|
py
|
Python
|
services/prepare_snps_data.py
|
eliorav/Population-Genotype-Frequency
|
11780b182bf417ac10ae86919ee313e39158267d
|
[
"Apache-2.0"
] | null | null | null |
services/prepare_snps_data.py
|
eliorav/Population-Genotype-Frequency
|
11780b182bf417ac10ae86919ee313e39158267d
|
[
"Apache-2.0"
] | null | null | null |
services/prepare_snps_data.py
|
eliorav/Population-Genotype-Frequency
|
11780b182bf417ac10ae86919ee313e39158267d
|
[
"Apache-2.0"
] | null | null | null |
import os
from glob import glob
import pandas as pd
from tqdm import tqdm
from constants import SNPS_DATA_PATH, SNPS_DATA_FOLDER, SNPS_DATA_FILE_NAME
from services.docker_runner import Hg38dbDockerRunner
def fetch_snps_data(snps_file_path):
"""
Fetch SNPs data from hg38 db
:param snps_file_path: the path of the SNPs list
"""
print("retrieving SNPs data (chrom, position)")
snps_df = pd.read_csv(snps_file_path, sep="\t", names=['snp', 'allele'])
snps = snps_df['snp'].unique()
step_size = 500
steps = int(len(snps) / step_size) + 1
hg38db_docker_runner = Hg38dbDockerRunner()
with tqdm(total=len(snps)) as pbar:
for step in range(steps):
start = step * step_size
end = -1 if step == (steps - 1) else (step + 1) * step_size
snps_query = '", "'.join(snps[start:end])
pbar.set_description(f"Processing snps in range {start} - {end if end != -1 else len(snps)}")
hg38db_docker_runner(environment={
'QUERY': f'select chrom, chromEnd, name from snp150 where name in ("{snps_query}")',
'FILE_NAME': f'{SNPS_DATA_FOLDER}/snps_data_{step}'
})
pbar.update(step_size if step != (steps - 1) else len(snps) - step * step_size)
def merge_snps_data():
"""
Merge the multiple files from hg38 db to a single file
"""
print("merge SNPs data to a single file")
snps_files = SNPS_DATA_PATH.glob('*.csv')
snps_df = pd.concat([pd.read_csv(snps_file) for snps_file in snps_files], ignore_index=True)
snps_df = snps_df[~snps_df['chrom'].str.contains('alt')]
snps_df.sort_values(by=['chrom', 'chromEnd'], inplace=True)
snps_df.rename(columns={"chrom": "#chrom", "chromEnd": "position", "name": "rsid"}, inplace=True)
snps_df.to_csv(SNPS_DATA_PATH/SNPS_DATA_FILE_NAME, index=False)
def prepare_snps_data(args):
"""
Prepare SNPs data
:param args: script args - should include snps_file_path - the path of the SNPs list
"""
if not SNPS_DATA_PATH.exists():
SNPS_DATA_PATH.mkdir(exist_ok=True, parents=True)
fetch_snps_data(args.snps_file_path)
merge_snps_data()
else:
print(f"SNPs data: {SNPS_DATA_PATH} already exist")
| 39.034483
| 105
| 0.659452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 719
| 0.31758
|
5a4bcf1b59efc03b155e47a1a800ec05299ddea9
| 258
|
py
|
Python
|
lab1/lab1/views/home.py
|
ZerocksX/Service-Oriented-Computing-2019
|
eac6b0e9a40eed76b452f6524fd899e7107b0f69
|
[
"Apache-2.0"
] | null | null | null |
lab1/lab1/views/home.py
|
ZerocksX/Service-Oriented-Computing-2019
|
eac6b0e9a40eed76b452f6524fd899e7107b0f69
|
[
"Apache-2.0"
] | null | null | null |
lab1/lab1/views/home.py
|
ZerocksX/Service-Oriented-Computing-2019
|
eac6b0e9a40eed76b452f6524fd899e7107b0f69
|
[
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render, redirect
from lab1.views import login
def docs(request):
if not request.user.is_authenticated:
return redirect(login.login_view)
return render(request, 'docs.html')
| 23.454545
| 45
| 0.763566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.042636
|
5a4c04b5d165286adafed51f08e73b407e82dac3
| 2,154
|
py
|
Python
|
ssdp/socketserver.py
|
vintozver/ssdp
|
ab3199068e3af93d95b00dcd79fbb444aa4ba13b
|
[
"MIT"
] | null | null | null |
ssdp/socketserver.py
|
vintozver/ssdp
|
ab3199068e3af93d95b00dcd79fbb444aa4ba13b
|
[
"MIT"
] | null | null | null |
ssdp/socketserver.py
|
vintozver/ssdp
|
ab3199068e3af93d95b00dcd79fbb444aa4ba13b
|
[
"MIT"
] | null | null | null |
import logging
import socket
import socketserver
import struct
import typing
from ssdp.entity import *
from ssdp.network import *
logger = logging.getLogger("ssdp.socketserver")
class RequestHandler(socketserver.BaseRequestHandler):
def handle(self):
packet_bytes = self.request[0]
try:
packet_str = packet_bytes.decode("utf-8")
except UnicodeDecodeError:
return
msg = SSDPMessage.parse(packet_str)
if isinstance(msg, SSDPRequest):
logger.debug("request received: %s from %s", str(msg), self.request[1])
self.request_received(msg)
elif isinstance(msg, SSDPResponse):
logger.debug("response received: %s from %s", str(msg), self.request[1])
self.response_received(msg)
else:
logger.debug("unknown received: %s from %s", str(msg), self.request[1])
def request_received(self, request: SSDPRequest):
raise NotImplementedError()
def resonse_received(self, response: SSDPResponse):
raise NotImplementedError()
class Server6(socketserver.UDPServer):
address_family = socket.AF_INET6
allow_reuse_address = True
def server_bind(self):
s = self.socket
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(self.server_address)
try:
s.setsockopt(
socket.IPPROTO_IPV6,
20, # IPV6_ADD_MEMBERSHIP
struct.pack(
"16si",
socket.inet_pton(socket.AF_INET6, self.server_address[0]),
self.server_address[3],
), # struct ipv6_mreq
)
except OSError as err:
logging.error(
"Failed to subscribe to IPv6 multicast. Error: %d, %s"
% (err.errno, err.strerror)
)
def __init__(
self, ifindex: int, request_handler: typing.Callable[[], RequestHandler]
):
self.ifindex = ifindex
super(Server6, self).__init__(
(str(MULTICAST_ADDRESS_IPV6_LINK_LOCAL), PORT, 0, ifindex), request_handler
)
| 31.217391
| 87
| 0.606778
| 1,968
| 0.913649
| 0
| 0
| 0
| 0
| 0
| 0
| 216
| 0.100279
|
5a4c53204a1b7bd48e50214561ae151641713f7f
| 1,040
|
py
|
Python
|
giggleliu/tba/hgen/multithreading.py
|
Lynn-015/Test_01
|
88be712b2d17603f7a3c38836dabe8dbdee2aba3
|
[
"MIT"
] | 2
|
2015-11-12T01:11:20.000Z
|
2015-11-12T23:32:28.000Z
|
giggleliu/tba/hgen/multithreading.py
|
Lynn-015/Test_01
|
88be712b2d17603f7a3c38836dabe8dbdee2aba3
|
[
"MIT"
] | 3
|
2015-10-28T02:25:48.000Z
|
2015-11-25T18:21:22.000Z
|
giggleliu/tba/hgen/multithreading.py
|
Lynn-015/NJU_DMRG
|
88be712b2d17603f7a3c38836dabe8dbdee2aba3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from numpy import *
from mpi4py import MPI
from matplotlib.pyplot import *
#MPI setting
try:
COMM=MPI.COMM_WORLD
SIZE=COMM.Get_size()
RANK=COMM.Get_rank()
except:
COMM=None
SIZE=1
RANK=0
__all__=['mpido']
def mpido(func,inputlist,bcastouputmesh=True):
'''
MPI for list input.
func:
The function defined on inputlist.
inputlist:
The input list.
bcastouputmesh:
broadcast output mesh if True.
'''
N=len(inputlist)
ntask=(N-1)/SIZE+1
datas=[]
for i in xrange(N):
if i/ntask==RANK:
datas.append(func(inputlist[i]))
datal=COMM.gather(datas,root=0)
if RANK==0:
datas=[]
for datai in datal:
datas+=datai
#broadcast mesh
if bcastouputmesh:
datas=COMM.bcast(datas,root=0)
return datas
def test_mpido():
x=linspace(0,1,100)
y=mpido(func=lambda x:x**2,inputlist=x)
if RANK==0:
plot(x,y)
show()
if __name__=='__main__':
test_mpido()
| 19.622642
| 46
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.238462
|
5a4c57677f4df8cc0dad6ecf21973ff01725bd89
| 1,480
|
py
|
Python
|
manage.py
|
forestmonster/flask-microservices-users
|
84b6edb1d57bd5882a48346bba5ff67a2ce44d9c
|
[
"MIT"
] | null | null | null |
manage.py
|
forestmonster/flask-microservices-users
|
84b6edb1d57bd5882a48346bba5ff67a2ce44d9c
|
[
"MIT"
] | null | null | null |
manage.py
|
forestmonster/flask-microservices-users
|
84b6edb1d57bd5882a48346bba5ff67a2ce44d9c
|
[
"MIT"
] | null | null | null |
import unittest
import coverage
from flask_script import Manager
from project import create_app, db
from project.api.models import User
COV = coverage.coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/server/config.py',
'project/server/*/__init__.py'
]
)
COV.start()
app = create_app()
manager = Manager(app)
@manager.command
def test():
"""Run the unit tests without code coverage."""
tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def cov():
"""Run the unit tests with coverage."""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print("Coverage summary:")
COV.report()
COV.html_report()
COV.erase()
return 0
return 1
@manager.command
def recreate_db():
"""Recreate a database."""
db.drop_all()
db.create_all()
db.session.commit()
@manager.command
def seed_db():
"""Seed the database."""
db.session.add(User(username='forest', email='forest.monsen@gmail.com'))
db.session.add(User(username='newuser', email='newuser@example.com'))
db.session.commit()
if __name__ == '__main__':
manager.run()
| 21.449275
| 79
| 0.646622
| 0
| 0
| 0
| 0
| 1,038
| 0.701351
| 0
| 0
| 352
| 0.237838
|
5a4d72f7295e946813a914b8b8596cf8a6802ccb
| 2,691
|
py
|
Python
|
cocotb/_py_compat.py
|
lavanyajagan/cocotb
|
2f98612016e68510e264a2b4963303d3588d8404
|
[
"BSD-3-Clause"
] | 350
|
2015-01-09T12:50:13.000Z
|
2019-07-12T09:08:17.000Z
|
cocotb/_py_compat.py
|
lavanyajagan/cocotb
|
2f98612016e68510e264a2b4963303d3588d8404
|
[
"BSD-3-Clause"
] | 710
|
2015-01-05T16:42:29.000Z
|
2019-07-16T13:40:00.000Z
|
cocotb/_py_compat.py
|
lavanyajagan/cocotb
|
2f98612016e68510e264a2b4963303d3588d8404
|
[
"BSD-3-Clause"
] | 182
|
2015-01-08T09:35:20.000Z
|
2019-07-12T18:41:37.000Z
|
# Copyright (c) cocotb contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Backports and compatibility shims for newer python features.
These are for internal use - users should use a third party library like `six`
if they want to use these shims in their own code
"""
import sys
# backport of Python 3.7's contextlib.nullcontext
class nullcontext:
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
# On python 3.7 onwards, `dict` is guaranteed to preserve insertion order.
# Since `OrderedDict` is a little slower that `dict`, we prefer the latter
# when possible.
if sys.version_info[:2] >= (3, 7):
insertion_ordered_dict = dict
else:
import collections
insertion_ordered_dict = collections.OrderedDict
| 42.046875
| 81
| 0.751394
| 555
| 0.206243
| 0
| 0
| 0
| 0
| 0
| 0
| 2,279
| 0.846897
|
5a4dfb65c9293913510af1677af7923d5236e918
| 8,259
|
py
|
Python
|
tomopal/crtomopy/demo/pjt_demo.py
|
robinthibaut/TomoPal
|
bb3d1f9d56afc53c641a72b47e4419ee0cfd587b
|
[
"BSD-3-Clause"
] | 2
|
2021-03-01T11:06:17.000Z
|
2021-09-24T11:49:31.000Z
|
tomopal/crtomopy/demo/pjt_demo.py
|
robinthibaut/TomoPal
|
bb3d1f9d56afc53c641a72b47e4419ee0cfd587b
|
[
"BSD-3-Clause"
] | 53
|
2021-03-30T14:05:17.000Z
|
2022-03-31T09:55:14.000Z
|
tomopal/crtomopy/demo/pjt_demo.py
|
robinthibaut/TomoPal
|
bb3d1f9d56afc53c641a72b47e4419ee0cfd587b
|
[
"BSD-3-Clause"
] | 1
|
2020-06-16T11:16:39.000Z
|
2020-06-16T11:16:39.000Z
|
# Copyright (c) 2020. Robin Thibaut, Ghent University
from os.path import join as jp
import numpy as np
from tomopal.crtomopy.crtomo.crc import (
Crtomo,
datread,
import_res,
mesh_geometry,
mtophase,
)
from ..parent import inventory
from ...geoview.diavatly import model_map # To plot results
# %% Directories
# Input here the folders to structure your project. It is not necessary to previously create them
# (except the data folder)
# they will be automatically generated once you initialize a crtomo object.
# Note: the function 'jp' simply joins the arguments to build a path.
main_dir = inventory.hello() # Current working directory of the project
data_dir = jp(main_dir, "data", "demo") # Data files directory
mesh_dir = jp(main_dir, "mesh", "demo") # Mesh files directory
iso_dir = jp(main_dir, "iso", "demo") # ISO file dir
ref_dir = jp(main_dir, "ref", "demo") # Reference model files dir
start_dir = jp(main_dir, "start", "demo") # Start model files dir
results_dir = jp(main_dir, "results", "demo") # Results files directory
# %% Exe names
# Input here the path to your exe files.
mesh_exe_name = jp(main_dir, "mesh.exe")
crtomo_exe_name = jp(main_dir, "crtomo.exe")
# %% Create crtomo object
# Folders will be generated here if they don't exist already.
myinv = Crtomo(
working_dir=main_dir,
data_dir=data_dir,
mesh_dir=mesh_dir,
iso_dir=iso_dir,
ref_dir=ref_dir,
start_dir=start_dir,
crtomo_exe=crtomo_exe_name,
mesh_exe=mesh_exe_name,
)
# %% Generating the mesh
# Data file name A B M N in meters
df = jp(data_dir, "demo_elecs.dat") # Path to electrode configuration file
dat = datread(df) # Use built-in function to extract data (optional)
# Electrode spacing in meters
es = 5
# Electrodes elevation
# Data elevation file name X Z in meters
ef = jp(data_dir, "demo_elevation.dat")
elev = datread(ef) # Use built-in function to extract data (optional)
# %% Build the mesh
# The following command generates the mesh in the folder indicated previously.
# It requires 3 arguments:
# the numpy array of electrodes position of shape (n, 4) (required)
# the electrode spacing (required)
# the elevation data (optional)
myinv.meshmaker(abmn=dat[:, [0, 1, 2, 3]], electrode_spacing=es, elevation_data=elev)
# If you already have generated a mesh, comment the line above and instead
# load the previously generated Mesh.dat file as described below.
# %% Read the mesh data (number of cells, blocks coordinates, x-y coordinates of the center of the blocks) from Mesh.dat
mshf = jp(mesh_dir, "Mesh.dat") # Path to the generated 'Mesh.dat' file.
ncol, nlin, nelem, blocks, centerxy = mesh_geometry(mshf) # Extract mesh properties
# %% Build configuration file
# 0 Mesh.dat file
mesh_file = mshf
# 1 elec.dat file
elec_file = jp(mesh_dir, "elec.dat")
# 2 Data file
data_file = jp(data_dir, "demo_data.dat")
# 3 Results folder file
# Specify the path where the results will be loaded
frname = (
"" # If you want to save the results in a sub-folder in the main results folder
)
result_folder = jp(results_dir, frname)
# 8 Flag for reference model constraint (0/1)
reference_model = 0
#
reference_model_file = None
# %% 12 File for reference model (model weights)
reference_weights_file = None
# You can use the tool ModelMaker from mohinh to interactively create prior models, and automatically save the results
# in a dat file if you provide a file name.
# Otherwise you can access the final results with (ModelMaker object).final_results and export it yourself.
# Example with a background resistivity of 100 ohm.m :
# rfwm = ModelMaker(blocks=blocks, values_log=1, bck=100)
# my_model = rfwm.final_results
# Alternatively, use a simpler approach to produce a reference model file:
# with open(reference_weights_file, 'w') as rw:
# rw.write(str(nelem)+'\n')
# [rw.write('0.1'+'\n') for i in range(nelem)]
# rw.close()
# %% 22 Maximum numbers of iterations
iterations = 20
# 23 Min data RMS
rms = 1.0000
# 24 Flag for DC inversion (0 = with IP / 1 = only DC)
dc = 1
# 25 Flag for robust inversion (0/1)
robust = 1
# 26 Flag for checking polarity (0/1)
check_polarity = 1
# 27 Flag for final phase improvement (0/1)
final_phase_improvement = 1
# 29 Relative magnitude error level (%)
error_level = 2.5
# 30 Minimum absolute magnitude error (ohm)
min_abs_error = 0.00015
# 31 Error in phase (mrad)
phase_error = 0.5
# 36 Flag for MGS inversion (0/1)
mgs = 0
# 37 Beta value
beta = 0.002
# 38 Flag for starting model (0/1)
starting_model = 0
# 39 Starting model file
starting_model_file = None
# %% 19 ISO file 1
iso_file1 = jp(iso_dir, "iso.dat")
# dm = datread(starting_model_file, start=1)[:, 0]
# isom = ModelMaker(blocks=blocks, values=dm, values_log=1, bck=1)
# #
# with open(iso_file1, 'w') as rw:
# rw.write(str(nelem)+'\n')
# [rw.write('{} 1'.format(str(i))+'\n') for i in isom.final_results]
# rw.close()
# %% Generate configuration file
# If erase = 1, every item in the result folder will be deleted. If you don't want that, pick 0 instead.
# Use help(Crtomo.write_config) to see which parameters you can implement.
myinv.write_config(
erase=1,
mesh_file=mesh_file,
elec_file=elec_file,
data_file=data_file,
result_folder=result_folder,
reference_model=reference_model,
reference_model_file=reference_model_file,
reference_weights_file=reference_weights_file,
iso_file1=iso_file1,
iterations=iterations,
rms=rms,
dc=dc,
robust=robust,
check_polarity=check_polarity,
final_phase_improvement=final_phase_improvement,
error_level=error_level,
min_abs_error=min_abs_error,
phase_error=phase_error,
mgs=mgs,
beta=beta,
starting_model=starting_model,
starting_model_file=starting_model_file,
)
# Forward modeling example :
# # Results folder file
# fwname = 'fwd' # If you want to save the results in a sub-folder in the main results folder
#
# result_folder_fwd = jp(results_dir, fwname)
#
# myfwd = Crtomo(working_dir=cwd,
# data_dir=data_dir,
# mesh_dir=mesh_dir,
# crtomo_exe=crtomo_exe_name)
#
# # # res2mod(jp(result_folder, 'rho1.txt'))
# myfwd.write_config(mesh_file=mesh_file,
# elec_file=elec_file,
# fwd_only=1,
# result_folder=result_folder_fwd,
# starting_model_file=jp(cwd, 'rho1.dat'))
# myfwd.run()
# %% Run CRTOMO
# This will make your Crtomo object run the inversion. The configuration files are
# automatically saved in the results folder
myinv.run()
# %% Import results
if dc == 0: # If you have IP results to load
res, ip = import_res(result_folder=result_folder)
m2p = mtophase(ncycles=1, pulse_l=3.5, tmin=0.02, tmax=2.83)
ipt = ip[:] * m2p
else: # if you only have resistivity data to load
res, files = import_res(result_folder=result_folder, return_file=1)
rest = np.copy(res[0])
# If you want to convert a crtomo result file in a prior model for future inversions for example:
# modf = res2mod(files[0])
# Let's plot the results:
# Remove outliers (arbitrary)
cut = np.log10(4500)
rest[rest > cut] = cut
# Define a linear space for the color map
res_levels = 10 ** np.linspace(min(rest), cut, 10)
rtp = 10 ** np.copy(rest)
# Use the model_map function to display the computed resistivity:
# log=1 because we want a logarithmic scale.
# cbpos is for the position of the color bar.
model_map(
polygons=blocks,
vals=rtp,
log=1,
cbpos=0.4,
levels=res_levels,
folder=result_folder,
figname="demo_res_levels",
)
# %% if IP
if dc == 0:
ip = np.copy(res[1])
# crtomo works in phase so we perform the conversion to go back to "mv/v".
m2p = mtophase(ncycles=1, pulse_l=3.5, tmin=0.02, tmax=2.83)
ipt = np.copy(np.abs(ip / m2p))
# Arbitrarily cut outliers
hist = np.histogram(ipt, bins="auto")
cut = 260
ipt[ipt > cut] = cut
# Define levels to be plotted
ip_levels = [0, 10, 20, 30, 40, 50, 60, 70, 260]
model_map(
polygons=blocks,
vals=ipt,
log=0,
levels=ip_levels,
folder=result_folder,
figname="demo_ip_level",
)
| 27.808081
| 120
| 0.698632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,010
| 0.606611
|
5a4e07f2b94ab476e5ae09d4fd2d5f84fb6f63e2
| 72
|
py
|
Python
|
__init__.py
|
VASemenov/Genetica
|
5f51159e182a628c2d33c8a401719924b3611df5
|
[
"MIT"
] | null | null | null |
__init__.py
|
VASemenov/Genetica
|
5f51159e182a628c2d33c8a401719924b3611df5
|
[
"MIT"
] | null | null | null |
__init__.py
|
VASemenov/Genetica
|
5f51159e182a628c2d33c8a401719924b3611df5
|
[
"MIT"
] | null | null | null |
from genetica.dna import DNA, genify
from genetica.model import Genetica
| 36
| 36
| 0.847222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5a4ed98e41bcfbfb4f87bc36a45fc26e1aa68177
| 1,015
|
py
|
Python
|
client_code/utils/__init__.py
|
daviesian/anvil-extras
|
84fd5ca5144808d4ce2b333995e801a4ddff60e6
|
[
"MIT"
] | null | null | null |
client_code/utils/__init__.py
|
daviesian/anvil-extras
|
84fd5ca5144808d4ce2b333995e801a4ddff60e6
|
[
"MIT"
] | null | null | null |
client_code/utils/__init__.py
|
daviesian/anvil-extras
|
84fd5ca5144808d4ce2b333995e801a4ddff60e6
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
from functools import cache
__version__ = "1.4.0"
def __dir__():
return ["auto_refreshing", "wait_for_writeback", "timed", "BindingRefreshDict"]
@cache
def __getattr__(name):
# todo use dynamic imports but __import__ is not yet supported in skult
if name == "auto_refreshing":
from ._auto_refreshing import auto_refreshing
return auto_refreshing
elif name == "timed":
from ._timed import timed
return timed
elif name == "wait_for_writeback":
from ._writeback_waiter import wait_for_writeback
return wait_for_writeback
elif name == "BindingRefreshDict":
from ._auto_refreshing import BindingRefreshDict
return BindingRefreshDict
else:
raise AttributeError(name)
| 26.710526
| 83
| 0.715271
| 0
| 0
| 0
| 0
| 615
| 0.605911
| 0
| 0
| 444
| 0.437438
|
5a50502deca1083175f893a1ac12f341ff7d78ec
| 13,984
|
py
|
Python
|
evaluate/evaluate_debug.py
|
goodgodgd/vode-2020
|
98e34120d642780576ac51d57c2f0597e7e1e524
|
[
"BSD-2-Clause"
] | 4
|
2020-08-15T02:14:03.000Z
|
2021-01-30T08:18:18.000Z
|
evaluate/evaluate_debug.py
|
goodgodgd/vode-2020
|
98e34120d642780576ac51d57c2f0597e7e1e524
|
[
"BSD-2-Clause"
] | 23
|
2020-01-24T07:25:40.000Z
|
2021-06-02T00:50:32.000Z
|
evaluate/evaluate_debug.py
|
goodgodgd/vode-2020
|
98e34120d642780576ac51d57c2f0597e7e1e524
|
[
"BSD-2-Clause"
] | 1
|
2020-07-02T12:26:45.000Z
|
2020-07-02T12:26:45.000Z
|
import os
import os.path as op
import numpy as np
import pandas as pd
import cv2
import tensorflow as tf
import settings
from config import opts
from tfrecords.tfrecord_reader import TfrecordReader
import utils.util_funcs as uf
import utils.convert_pose as cp
from model.synthesize.synthesize_base import SynthesizeMultiScale
from model.train_val import ModelValidater, merge_results
from model.model_main import set_configs, get_dataset, create_training_parts
from model.model_util.logger import stack_reconstruction_images
import model.loss_and_metric.losses as lm
def inspect_results():
set_configs()
dataset_val, tfr_config, val_steps = get_dataset("kitti_raw", "val", False)
model, augmenter, loss_object, optimizer = \
create_training_parts(initial_epoch=1, tfr_config=tfr_config, learning_rate=0.001,
loss_weights=opts.LOSS_RIGID_T1, net_names=opts.RIGID_NET, weight_suffix='ep15')
validater = ModelValidaterInspect(model, loss_object, val_steps, True)
validater.run_an_epoch(dataset_val)
class ModelValidaterInspect(ModelValidater):
def run_an_epoch(self, dataset):
results = []
for step, features in enumerate(dataset):
preds, loss, loss_by_type = self.run_a_batch(features)
batch_result, log_msg = merge_results(features, preds, loss, loss_by_type, self.stereo)
self.print_result(batch_result, step, log_msg, features, preds)
results.append(batch_result)
self.show_images(features, preds)
results = pd.DataFrame(results)
return results
def print_result(self, batch_result, step, log_msg, features, predictions):
print(f"{step}/{self.steps_per_epoch} steps, {log_msg}")
msg = " "
for i, (key, val) in enumerate(batch_result.items()):
msg += f"{key}={val:1.5f}, "
print(msg)
if "pose_gt" in features:
pose_gt_vec = cp.pose_matr2rvec_batch(features["pose_gt"]).numpy()
pose_pr_vec = predictions["pose"].numpy()
xyz_true = pose_gt_vec[:, :, :3]
xyz_pred = pose_pr_vec[:, :, :3]
scale = np.sum(xyz_true * xyz_pred, axis=2) / np.sum(xyz_pred ** 2, axis=2)
print(" pose gt:", pose_gt_vec[0, 0])
print(" pose pr:", pose_pr_vec[0, 0])
print(f" pose scale, diff: {scale[0, 0]:1.4f}", np.abs(pose_gt_vec[0, 0] - pose_pr_vec[0, 0]))
if "depth_gt" in features:
print(f" depth scale, gt depth, pr depth: {batch_result['gtdepth']/batch_result['prdepth']:1.4f}",
batch_result["gtdepth"], batch_result["prdepth"])
def show_images(self, features, predictions):
total_loss = lm.TotalLoss()
scaleidx, batchidx, srcidx = 0, 0, 0
view1 = stack_reconstruction_images(total_loss, features, predictions, (scaleidx, batchidx, srcidx))
cv2.imshow("recon", view1)
if "pose_gt" in features:
pose_gt_vec = cp.pose_matr2rvec_batch(features["pose_gt"])
predictions["pose"] = pose_gt_vec
view2 = stack_reconstruction_images(total_loss, features, predictions, (scaleidx, batchidx, srcidx))
cv2.imshow("recon_by_gtpose", view2)
cv2.waitKey()
def evaluate_for_debug(data_dir_name, model_name):
"""
function to check if learning process is going right
to evaluate current model, save losses and error metrics to csv files and save debugging images
- debug_depth.csv: 타겟 프레임별로 predicted depth의 error와 smootheness loss 저장
- debug_pose.csv: 소스 프레임별로 photometric loss, trajectory error, rotation error 저장
- trajectory.csv: 소스 프레임별로 gt trajectory, pred trajectory 저장
- debug_imgs(directory): loss와 metric 별로 가장 성능이 안좋은 프레임들을 모아서 inspection view 이미지로 저장
1) target image
2) reconstructed target from gt
3) reconstructed target from pred
4) source image
5) predicted target depth
"""
if not uf.check_tfrecord_including(op.join(opts.DATAPATH_TFR, data_dir_name), ["pose_gt", "depth_gt"]):
print("Evaluation is NOT possible without pose_gt and depth_gt")
return
set_configs(model_name)
model = create_model()
model = try_load_weights(model, model_name)
model.compile(optimizer="sgd", loss="mean_absolute_error")
dataset = TfrecordReader(op.join(opts.DATAPATH_TFR, data_dir_name), batch_size=1).get_dataset()
depth_result = []
pose_result = []
trajectory = []
steps_per_epoch = uf.count_steps(data_dir_name, 1)
for i, x in enumerate(dataset):
uf.print_numeric_progress(i, steps_per_epoch)
depth_res, pose_res, traj = evaluate_batch(i, x, model)
depth_result.append(depth_res)
pose_result.append(pose_res)
trajectory.append(traj)
print("")
depth_result = save_depth_result_and_get_df(depth_result, model_name)
pose_result = save_pose_result_and_get_df(pose_result, model_name)
save_trajectories(trajectory, model_name)
depth_sample_inds = find_worst_depth_samples(depth_result, 5)
print("worst depth sample indices\n", depth_sample_inds[0])
pose_sample_inds = find_worst_pose_samples(pose_result, 5)
print("worst pose sample indices\n", pose_sample_inds[0])
worst_sample_inds = depth_sample_inds + pose_sample_inds
pathname = op.join(opts.DATAPATH_EVL, model_name, 'debug_imgs')
os.makedirs(pathname, exist_ok=True)
for i, x in enumerate(dataset):
uf.print_numeric_progress(i, steps_per_epoch)
for sample_inds in worst_sample_inds:
# sample_inds: df['frame', 'srcidx', metric or loss]
save_worst_views(i, x, model, sample_inds, pathname)
def evaluate_batch(index, x, model):
numsrc = opts.SNIPPET_LEN - 1
stacked_image = x['image']
intrinsic = x['intrinsic']
depth_true = x['depth_gt']
pose_true_mat = x['pose_gt']
source_image, target_image = uf.split_into_source_and_target(stacked_image)
predictions = model(x['image'])
disp_pred_ms = predictions['disp_ms']
pose_pred = predictions['pose']
depth_pred_ms = uf.safe_reciprocal_number_ms(disp_pred_ms)
# evaluate depth from numpy arrays and take only 'abs_rel' metric
depth_err, scale = compute_depth_error(depth_pred_ms[0].numpy()[0], depth_true.numpy()[0])
smooth_loss = compute_smooth_loss(disp_pred_ms[0], target_image)
pose_pred_mat = cp.pose_rvec2matr_batch_tf(pose_pred)
# pose error output: [batch, numsrc]
trj_err, trj_len = compute_trajectory_error(pose_pred_mat, pose_true_mat, scale)
rot_err = ef.calc_rotational_error_tensor(pose_pred_mat, pose_true_mat)
# compute photometric loss: [batch, numsrc]
photo_loss = compute_photo_loss(target_image, source_image, intrinsic, depth_pred_ms, pose_pred)
depth_res = [index, smooth_loss, depth_err]
# pose_res: [numsrc, -1]
pose_res = np.stack([np.array([index] * 4), np.arange(numsrc), photo_loss.numpy().reshape(-1),
trj_err.numpy().reshape(-1), trj_len.numpy().reshape(-1),
rot_err.numpy().reshape(-1)], axis=1)
# to collect trajectory
trajectory = np.concatenate([np.array([index] * 4)[:, np.newaxis], np.arange(numsrc)[:, np.newaxis],
pose_true_mat.numpy()[:, :, :3, 3].reshape((-1, 3)),
pose_pred_mat.numpy()[:, :, :3, 3].reshape((-1, 3))*scale], axis=1)
return depth_res, pose_res, trajectory
def compute_photo_loss(target_true, source_image, intrinsic, depth_pred_ms, pose_pred):
# synthesize target image
synth_target_ms = SynthesizeMultiScale()(source_image, intrinsic, depth_pred_ms, pose_pred)
losses = []
target_pred = synth_target_ms[0]
# photometric loss: [batch, numsrc]
loss = photometric_loss(target_pred, target_true)
return loss
def compute_smooth_loss(disparity, target_image):
# [batch]
loss = smootheness_loss(disparity, target_image)
# return scalar
return loss.numpy()[0]
def compute_trajectory_error(pose_pred_mat, pose_true_mat, scale):
"""
:param pose_pred_mat: predicted snippet pose matrices, [batch, numsrc, 4, 4]
:param pose_true_mat: ground truth snippet pose matrices, [batch, numsrc, 4, 4]
:param scale: scale for pose_pred to have real scale
:return: trajectory error in meter [batch, numsrc]
"""
xyz_pred = pose_pred_mat[:, :, :3, 3]
xyz_true = pose_true_mat[:, :, :3, 3]
# adjust the trajectory scaling due to ignorance of abolute scale
# scale = tf.reduce_sum(xyz_true * xyz_pred, axis=2) / tf.reduce_sum(xyz_pred ** 2, axis=2)
# scale = tf.expand_dims(scale, -1)
traj_error = xyz_true - xyz_pred * tf.constant([[[scale]]])
traj_error = tf.sqrt(tf.reduce_sum(traj_error ** 2, axis=2))
traj_len = tf.sqrt(tf.reduce_sum(xyz_true ** 2, axis=2))
return traj_error, traj_len
def compute_depth_error(depth_pred, depth_true):
mask = np.logical_and(depth_true > opts.MIN_DEPTH, depth_true < opts.MAX_DEPTH)
# crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
# if used on gt_size 370x1224 produces a crop of [-218, -3, 44, 1180]
gt_height, gt_width, _ = depth_true.shape
crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,
0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)
crop_mask = np.zeros(mask.shape)
crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1
mask = np.logical_and(mask, crop_mask)
# scale matching
scaler = np.median(depth_true[mask]) / np.median(depth_pred[mask])
depth_pred[mask] *= scaler
# clip prediction and compute error metrics
depth_pred = np.clip(depth_pred, opts.MIN_DEPTH, opts.MAX_DEPTH)
metrics = ef.compute_depth_metrics(depth_pred[mask], depth_true[mask])
# return only abs rel
return metrics[0], scaler
def save_depth_result_and_get_df(depth_result, model_name):
depth_result = np.array(depth_result)
depth_result = pd.DataFrame(data=depth_result, columns=['frame', 'smooth_loss', 'depth_err'])
depth_result['frame'] = depth_result['frame'].astype(int)
filename = op.join(opts.DATAPATH_EVL, model_name, 'debug_depth.csv')
depth_result.to_csv(filename, encoding='utf-8', index=False, float_format='%.4f')
return depth_result
def save_pose_result_and_get_df(pose_result, model_name):
pose_result = np.concatenate(pose_result, axis=0)
columns = ['frame', 'srcidx', 'photo_loss', 'trj_err', 'distance', 'rot_err']
pose_result = pd.DataFrame(data=pose_result, columns=columns)
pose_result['frame'] = pose_result['frame'].astype(int)
pose_result['srcidx'] = pose_result['srcidx'].astype(int)
filename = op.join(opts.DATAPATH_EVL, model_name, 'debug_pose.csv')
pose_result.to_csv(filename, encoding='utf-8', index=False, float_format='%.4f')
return pose_result
def save_trajectories(trajectory, model_name):
trajectory = np.concatenate(trajectory, axis=0)
trajectory = pd.DataFrame(data=trajectory, columns=['frame', 'srcidx', 'tx', 'ty', 'tz', 'px', 'py', 'pz'])
trajectory['frame'] = trajectory['frame'].astype(int)
trajectory['srcidx'] = trajectory['srcidx'].astype(int)
filename = op.join(opts.DATAPATH_EVL, model_name, 'trajectory.csv')
trajectory.to_csv(filename, encoding='utf-8', index=False, float_format='%.4f')
def find_worst_depth_samples(depth_result, num_samples):
dfcols = list(depth_result)
sample_inds = []
for colname in ['depth_err']:
sorted_result = depth_result[dfcols[:1] + [colname]].sort_values(by=[colname], ascending=False)
sorted_result = sorted_result.reset_index(drop=True).head(num_samples)
sorted_result['srcidx'] = 0
sorted_result = sorted_result[['frame', 'srcidx', colname]]
sample_inds.append(sorted_result)
return sample_inds
def find_worst_pose_samples(pose_result, num_samples):
dfcols = list(pose_result)
sample_inds = []
for colname in ['photo_loss', 'trj_err']:
sorted_result = pose_result[dfcols[:2] + [colname]].sort_values(by=[colname], ascending=False)
sorted_result = sorted_result.reset_index(drop=True).head(num_samples)
sample_inds.append(sorted_result)
return sample_inds
def save_worst_views(frame, x, model, sample_inds, save_path, scale=1):
if frame not in sample_inds['frame'].tolist():
return
colname = list(sample_inds)[-1]
indices = sample_inds.loc[sample_inds['frame'] == frame, :].index.tolist()
stacked_image = x['image']
intrinsic = x['intrinsic']
depth_gt = x['depth_gt']
pose_gt = x['pose_gt']
pose_gt = cp.pose_matr2rvec_batch(pose_gt)
depth_gt_ms = uf.multi_scale_depths(depth_gt, [1, 2, 4, 8])
source_image, target_image = uf.split_into_source_and_target(stacked_image)
predictions = model(x['image'])
disp_pred_ms = predictions['disp_ms']
pose_pred = predictions['pose']
depth_pred_ms = uf.safe_reciprocal_number_ms(disp_pred_ms)
depth_pred_ms = [depth*scale for depth in depth_pred_ms]
synthesizer = SynthesizeMultiScale()
synth_target_pred_ms = synthesizer(source_image, intrinsic, depth_pred_ms, pose_pred)
synth_target_gt_ms = synthesizer(source_image, intrinsic, depth_gt_ms, pose_gt)
for ind in indices:
srcidx = sample_inds.loc[ind, 'srcidx']
view_imgs = {"target": target_image, "synthesized": synth_target_pred_ms[0][0, srcidx],
"depth": depth_pred_ms[0][0, srcidx], "synth_by_gt": synth_target_gt_ms[0][0, srcidx]}
view = uf.stack_titled_images(view_imgs)
filename = op.join(save_path, f"{colname[:3]}_{frame:04d}_{srcidx}.png")
print("save file:", filename)
cv2.imwrite(filename, view)
if __name__ == "__main__":
np.set_printoptions(precision=3, suppress=True, linewidth=100)
inspect_results()
# evaluate_for_debug('kitti_raw_test', 'vode1')
| 43.974843
| 112
| 0.693292
| 2,214
| 0.15711
| 0
| 0
| 0
| 0
| 0
| 0
| 2,932
| 0.208061
|
5a50e3662524ec61048e74d97bc09d7305717136
| 7,018
|
py
|
Python
|
tests/test_utils.py
|
h4ck3rm1k3/requests
|
46184236dc177fb68c7863445609149d0ac243ea
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
h4ck3rm1k3/requests
|
46184236dc177fb68c7863445609149d0ac243ea
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
h4ck3rm1k3/requests
|
46184236dc177fb68c7863445609149d0ac243ea
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import os
from io import BytesIO
import pytest
from requests import compat
from requests.utils import (
address_in_network, dotted_netmask,
get_auth_from_url, get_encodings_from_content,
get_environ_proxies, guess_filename,
is_ipv4_address, is_valid_cidr, requote_uri,
select_proxy, super_len)
from .compat import StringIO, cStringIO
class TestSuperLen:
@pytest.mark.parametrize(
'stream, value', (
(StringIO.StringIO, 'Test'),
(BytesIO, b'Test'),
pytest.mark.skipif('cStringIO is None')((cStringIO, 'Test')),
))
def test_io_streams(self, stream, value):
"""Ensures that we properly deal with different kinds of IO streams."""
assert super_len(stream()) == 0
assert super_len(stream(value)) == 4
def test_super_len_correctly_calculates_len_of_partially_read_file(self):
"""Ensure that we handle partially consumed file like objects."""
s = StringIO.StringIO()
s.write('foobarbogus')
assert super_len(s) == 0
class TestGetEnvironProxies:
"""Ensures that IP addresses are correctly matches with ranges
in no_proxy variable."""
@pytest.yield_fixture(scope='class', autouse=True, params=['no_proxy', 'NO_PROXY'])
def no_proxy(self, request):
os.environ[request.param] = '192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1'
yield
del os.environ[request.param]
@pytest.mark.parametrize(
'url', (
'http://192.168.0.1:5000/',
'http://192.168.0.1/',
'http://172.16.1.1/',
'http://172.16.1.1:5000/',
'http://localhost.localdomain:5000/v1.0/',
))
def test_bypass(self, url):
assert get_environ_proxies(url) == {}
@pytest.mark.parametrize(
'url', (
'http://192.168.1.1:5000/',
'http://192.168.1.1/',
'http://www.requests.com/',
))
def test_not_bypass(self, url):
assert get_environ_proxies(url) != {}
class TestIsIPv4Address:
def test_valid(self):
assert is_ipv4_address('8.8.8.8')
@pytest.mark.parametrize('value', ('8.8.8.8.8', 'localhost.localdomain'))
def test_invalid(self, value):
assert not is_ipv4_address(value)
class TestIsValidCIDR:
def test_valid(self):
assert is_valid_cidr('192.168.1.0/24')
@pytest.mark.parametrize(
'value', (
'8.8.8.8',
'192.168.1.0/a',
'192.168.1.0/128',
'192.168.1.0/-1',
'192.168.1.999/24',
))
def test_invalid(self, value):
assert not is_valid_cidr(value)
class TestAddressInNetwork:
def test_valid(self):
assert address_in_network('192.168.1.1', '192.168.1.0/24')
def test_invalid(self):
assert not address_in_network('172.16.0.1', '192.168.1.0/24')
class TestGuessFilename:
@pytest.mark.parametrize(
'value', (1, type('Fake', (object,), {'name': 1})()),
)
def test_guess_filename_invalid(self, value):
assert guess_filename(value) is None
@pytest.mark.parametrize(
'value, expected_type', (
(b'value', compat.bytes),
(b'value'.decode('utf-8'), compat.str)
))
def test_guess_filename_valid(self, value, expected_type):
obj = type('Fake', (object,), {'name': value})()
result = guess_filename(obj)
assert result == value
assert isinstance(result, expected_type)
class TestContentEncodingDetection:
def test_none(self):
encodings = get_encodings_from_content('')
assert not len(encodings)
@pytest.mark.parametrize(
'content', (
# HTML5 meta charset attribute
'<meta charset="UTF-8">',
# HTML4 pragma directive
'<meta http-equiv="Content-type" content="text/html;charset=UTF-8">',
# XHTML 1.x served with text/html MIME type
'<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />',
# XHTML 1.x served as XML
'<?xml version="1.0" encoding="UTF-8"?>',
))
def test_pragmas(self, content):
encodings = get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == 'UTF-8'
def test_precedence(self):
content = '''
<?xml version="1.0" encoding="XML"?>
<meta charset="HTML5">
<meta http-equiv="Content-type" content="text/html;charset=HTML4" />
'''.strip()
assert get_encodings_from_content(content) == ['HTML5', 'HTML4', 'XML']
USER = PASSWORD = "%!*'();:@&=+$,/?#[] "
ENCODED_USER = compat.quote(USER, '')
ENCODED_PASSWORD = compat.quote(PASSWORD, '')
@pytest.mark.parametrize(
'url, auth', (
(
'http://' + ENCODED_USER + ':' + ENCODED_PASSWORD + '@' +
'request.com/url.html#test',
(USER, PASSWORD)
),
(
'http://user:pass@complex.url.com/path?query=yes',
('user', 'pass')
),
(
'http://user:pass%20pass@complex.url.com/path?query=yes',
('user', 'pass pass')
),
(
'http://user:pass pass@complex.url.com/path?query=yes',
('user', 'pass pass')
),
(
'http://user%25user:pass@complex.url.com/path?query=yes',
('user%user', 'pass')
),
(
'http://user:pass%23pass@complex.url.com/path?query=yes',
('user', 'pass#pass')
),
))
def test_get_auth_from_url(url, auth):
assert get_auth_from_url(url) == auth
@pytest.mark.parametrize(
'uri, expected', (
(
# Ensure requoting doesn't break expectations
'http://example.com/fiz?buz=%25ppicture',
'http://example.com/fiz?buz=%25ppicture',
),
(
# Ensure we handle unquoted percent signs in redirects
'http://example.com/fiz?buz=%ppicture',
'http://example.com/fiz?buz=%25ppicture',
),
))
def test_requote_uri_with_unquoted_percents(uri, expected):
"""See: https://github.com/kennethreitz/requests/issues/2356
"""
assert requote_uri(uri) == expected
@pytest.mark.parametrize(
'mask, expected', (
(8, '255.0.0.0'),
(24, '255.255.255.0'),
(25, '255.255.255.128'),
))
def test_dotted_netmask(mask, expected):
assert dotted_netmask(mask) == expected
@pytest.mark.parametrize(
'url, expected', (
('hTTp://u:p@Some.Host/path', 'http://some.host.proxy'),
('hTTp://u:p@Other.Host/path', 'http://http.proxy'),
('hTTps://Other.Host', None),
))
def test_select_proxies(url, expected):
"""Make sure we can select per-host proxies correctly."""
proxies = {'http': 'http://http.proxy',
'http://some.host': 'http://some.host.proxy'}
assert select_proxy(url, proxies) == expected
| 30.25
| 95
| 0.58008
| 4,266
| 0.607865
| 176
| 0.025078
| 5,154
| 0.734397
| 0
| 0
| 2,506
| 0.357082
|
5a5102204d83caa3f795bc8eb2cf30cd51108dd9
| 37,008
|
py
|
Python
|
clorm/orm/factbase.py
|
florianfischer91/clorm
|
3569a91daa1d691f0a7f5a9534db925e027cdbf9
|
[
"MIT"
] | 10
|
2019-01-11T03:31:17.000Z
|
2019-12-18T08:18:44.000Z
|
clorm/orm/factbase.py
|
florianfischer91/clorm
|
3569a91daa1d691f0a7f5a9534db925e027cdbf9
|
[
"MIT"
] | 21
|
2018-12-06T04:06:53.000Z
|
2019-12-17T00:04:56.000Z
|
clorm/orm/factbase.py
|
florianfischer91/clorm
|
3569a91daa1d691f0a7f5a9534db925e027cdbf9
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------------------------
# Clorm ORM FactBase implementation. FactBase provides a set-like container
# specifically for storing facts (Predicate instances).
# ------------------------------------------------------------------------------
import abc
import io
import itertools
import sys
from typing import (Any, Callable, Iterable, Iterator, List, Optional, TextIO,
Tuple, Type, Union, cast, overload)
from ._typing import _T0, _T1, _T2, _T3, _T4
from ._queryimpl import UnGroupedQuery
from .core import (Predicate, PredicateDefn, PredicatePath, and_,
validate_root_paths)
from .factcontainers import FactMap, factset_equality
from .query import (QueryExecutor, QuerySpec, make_query_plan, process_orderby,
process_where)
__all__ = [
'FactBase',
'Select',
'Delete',
]
#------------------------------------------------------------------------------
# Global
#------------------------------------------------------------------------------
_Facts = Union[Iterable[Predicate], Callable[[], Iterable[Predicate]]]
#------------------------------------------------------------------------------
# Support function for printing ASP facts: Note: _trim_docstring() is taken from
# PEP 257 (modified for Python 3): https://www.python.org/dev/peps/pep-0257/
# ------------------------------------------------------------------------------
_builtin_sorted=sorted
def _format_asp_facts(iterator,output,width):
tmp1=""
for f in iterator:
fstr="{}.".format(f)
if tmp1 and len(tmp1) + len(fstr) > width:
print(tmp1,file=output)
tmp1 = fstr
else:
tmp1 = tmp1 + " " + fstr if tmp1 else fstr
if tmp1: print(tmp1,file=output)
def _trim_docstring(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def _endstrip(string):
if not string: return
nl=string[-1]=='\n'
tmp=string.rstrip()
return tmp + '\n' if nl else tmp
def _format_docstring(docstring,output):
if not docstring: return
tmp=_trim_docstring(docstring)
tmpstr = "".join(_endstrip("% " + l) for l in tmp.splitlines(True))
if tmpstr:
print("% Description:",file=output)
print(tmpstr,file=output)
def _maxwidth(lines):
return max([len(l) for l in lines])
def _format_commented(fm: FactMap, out: TextIO) -> None:
pm: PredicateDefn = fm.predicate.meta
docstring = _trim_docstring(fm.predicate.__doc__) \
if fm.predicate.__doc__ else ""
indent = " "
if pm.arity == 0:
lines = [ "Unary predicate signature:", indent + pm.name ]
else:
def build_signature(p: Type[Predicate]) -> str:
args = []
for pp in p:
complex = pp.meta.field.complex
args.append(cast(str, pp._pathseq[1]) if not complex else build_signature(complex))
return f"{p.meta.name}({','.join(args)})"
lines = [ "Predicate signature:",
indent + build_signature(fm.predicate) ]
if docstring:
lines.append("Description:")
for l in docstring.splitlines():lines.append(indent + l)
bar = "-" * _maxwidth(lines)
lines.insert(0,bar)
lines.append(bar)
for l in lines:
tmp = l.rstrip()
if tmp: print("% {}".format(tmp),file=out)
else: print("%",file=out)
return
#------------------------------------------------------------------------------
# A FactBase consisting of facts of different types
#------------------------------------------------------------------------------
class FactBase(object):
"""A fact base is a container for facts (i.e., Predicate sub-class instances)
``FactBase`` can be behave like a specialised ``set`` object, but can also
behave like a minimalist database. It stores facts for ``Predicate`` types
(where a predicate type loosely corresponds to a *table* in a database)
and allows for certain fields to be indexed in order to perform more
efficient queries.
The initaliser can be given a collection of predicates. If it is passed
another FactBase then it simply makes a copy (including the indexed fields).
FactBase also has a special mode when it is passed a functor instead of a
collection. In this case it performs a delayed initialisation. This means
that the internal data structures are only populated when the FactBase is
actually used. This mode is particularly useful when extracting facts from
models. Often a program will only want to keep the data from the final model
(for example, with optimisation we often want the best model before a
timeout). Delayed initialisation is useful will save computation as only the
last model will be properly initialised.
Args:
facts([Predicate]|FactBase|callable): a list of facts (predicate
instances), a fact base, or a functor that generates a list of
facts. If a functor is passed then the fact base performs a delayed
initialisation. If a fact base is passed and no index is specified then
an index will be created matching in input fact base.
indexes(Field): a list of fields that are to be indexed.
"""
#--------------------------------------------------------------------------
# Internal member functions
#--------------------------------------------------------------------------
# A special purpose initialiser so that we can delayed initialisation
def _init(self, facts=None, indexes=None):
# flag that initialisation has taken place
self._delayed_init: Optional[Callable[[], None]] = None
# If it is delayed initialisation then get the facts
if facts and callable(facts):
facts = facts()
elif facts and isinstance(facts, FactBase) and indexes is None:
indexes = facts.indexes
if indexes is None: indexes=[]
# Create FactMaps for the predicate types with indexed fields
grouped = {}
self._indexes = tuple(indexes)
for path in self._indexes:
if path.meta.predicate not in grouped: grouped[path.meta.predicate] = []
grouped[path.meta.predicate].append(path)
self._factmaps = { pt : FactMap(pt, idxs) for pt, idxs in grouped.items() }
if facts is None: return
self._add(facts)
# Make sure the FactBase has been initialised
def _check_init(self):
if self._delayed_init: self._delayed_init() # Check for delayed init
#--------------------------------------------------------------------------
#
#--------------------------------------------------------------------------
def _add(self, arg: Union[Predicate, Iterable[Predicate]]) -> None:
if isinstance(arg, Predicate):
ptype = arg.__class__
if not ptype in self._factmaps:
self._factmaps[ptype] = FactMap(ptype)
return self._factmaps[ptype].add_fact(arg)
if isinstance(arg, str) or not isinstance(arg, Iterable):
raise TypeError(f"'{arg}' is not a Predicate instance")
sorted_facts = sorted(arg, key=lambda x: x.__class__.__name__)
for type_, grouped_facts in itertools.groupby(sorted_facts, lambda x: x.__class__):
if not issubclass(type_, Predicate):
raise TypeError(f"{list(grouped_facts)} are not Predicate instances")
if not type_ in self._factmaps:
self._factmaps[type_] = FactMap(type_)
self._factmaps[type_].add_facts(grouped_facts)
return
def _remove(self, fact, raise_on_missing):
ptype = type(fact)
if not isinstance(fact, Predicate) or ptype not in self._factmaps:
raise KeyError(fact)
return self._factmaps[ptype].remove(fact,raise_on_missing)
#--------------------------------------------------------------------------
# Initiliser
#--------------------------------------------------------------------------
def __init__(self, facts: Optional[_Facts] = None, indexes: Optional[Iterable[PredicatePath]] = None) -> None:
self._delayed_init=None
if callable(facts):
def delayed_init():
self._init(facts, indexes)
self._delayed_init=delayed_init
else:
self._init(facts, indexes)
#--------------------------------------------------------------------------
# An internal API for the query mechanism. Not to be called by users.
#--------------------------------------------------------------------------
@property
def factmaps(self):
self._check_init() # Check for delayed init
return self._factmaps
#--------------------------------------------------------------------------
# Set member functions
#--------------------------------------------------------------------------
def add(self, arg: Union[Predicate, Iterable[Predicate]]) -> None:
"""Add a single fact or a collection of facts.
Because a ``FactBase`` can only hold :class:`~Predicate` sub-class
instances this member function has been overloaded to take either a
single :class:`~Predicate` sub-class instance or a collection of
:class:`~Predicate` sub-class instances.
Args:
arg: a single fact or a collection of facts.
"""
self._check_init() # Check for delayed init
return self._add(arg)
def remove(self, arg: Predicate) -> None:
"""Remove a fact from the fact base (raises an exception if no fact). """
self._check_init() # Check for delayed init
return self._remove(arg, raise_on_missing=True)
def discard(self, arg: Predicate) -> None:
"""Remove a fact from the fact base. """
self._check_init() # Check for delayed init
return self._remove(arg, raise_on_missing=False)
def pop(self) -> Predicate:
"""Pop an element from the FactBase. """
self._check_init() # Check for delayed init
for pt, fm in self._factmaps.items():
if fm: return fm.pop()
raise KeyError("pop from an empty FactBase")
def clear(self):
"""Clear the fact base of all facts."""
self._check_init() # Check for delayed init
for pt, fm in self._factmaps.items(): fm.clear()
#--------------------------------------------------------------------------
# Special FactBase member functions
#--------------------------------------------------------------------------
def select(self, root):
"""Define a select query using the old Query API.
.. note::
This interface will eventually be deprecated when the new
:class:`Query API<Query>` is finalised. The entry point to this Query
API is through the :meth:`FactBase.query` method.
Args:
predicate: The predicate to query.
Returns:
Returns a Select query object for specifying a query.
"""
self._check_init() # Check for delayed init
roots = validate_root_paths([root])
ptypes = set([ root.meta.predicate for root in roots])
# Make sure there are factmaps for each referenced predicate type
for ptype in ptypes: self._factmaps.setdefault(ptype, FactMap(ptype))
return SelectImpl(self, QuerySpec(roots=roots))
def delete(self, root):
self._check_init() # Check for delayed init
roots = validate_root_paths([root])
ptypes = set([ root.meta.predicate for root in roots])
# Make sure there are factmaps for each referenced predicate type
for ptype in ptypes: self._factmaps.setdefault(ptype, FactMap(ptype))
return _Delete(self, QuerySpec(roots=roots))
# START OVERLOADED FUNCTIONS self.query;UnGroupedQuery[{0}];1;5;Type;
# code within this block is **programmatically,
# statically generated** by generate_overloads.py
@overload
def query(
self, __ent0: Type[_T0]
) -> 'UnGroupedQuery[_T0]':
...
@overload
def query(
self, __ent0: Type[_T0], __ent1: Type[_T1]
) -> 'UnGroupedQuery[Tuple[_T0, _T1]]':
...
@overload
def query(
self, __ent0: Type[_T0], __ent1: Type[_T1], __ent2: Type[_T2]
) -> 'UnGroupedQuery[Tuple[_T0, _T1, _T2]]':
...
@overload
def query(
self, __ent0: Type[_T0], __ent1: Type[_T1], __ent2: Type[_T2], __ent3: Type[_T3]
) -> 'UnGroupedQuery[Tuple[_T0, _T1, _T2, _T3]]':
...
@overload
def query(
self, __ent0: Type[_T0], __ent1: Type[_T1], __ent2: Type[_T2], __ent3: Type[_T3], __ent4: Type[_T4]
) -> 'UnGroupedQuery[Tuple[_T0, _T1, _T2, _T3, _T4]]':
...
# END OVERLOADED FUNCTIONS self.query
@overload
def query(self, *roots: Any) -> 'UnGroupedQuery[Any]': ...
def query(self, *roots):
"""Define a query using the new Query API :class:`Query`.
The parameters consist of a predicates (or aliases) to query (like an
SQL FROM clause).
Args:
*predicates: predicate or predicate aliases
Returns:
Returns a Query object for specifying a query.
"""
self._check_init() # Check for delayed init
# Make sure there are factmaps for each referenced predicate type
ptypes = set([r.meta.predicate for r in validate_root_paths(roots)])
for ptype in ptypes: self._factmaps.setdefault(ptype, FactMap(ptype))
qspec = QuerySpec(roots=roots)
return UnGroupedQuery(self._factmaps, qspec)
@property
def predicates(self) -> Tuple[Type[Predicate], ...]:
"""Return the list of predicate types that this fact base contains."""
self._check_init() # Check for delayed init
return tuple([pt for pt, fm in self._factmaps.items() if fm])
@property
def indexes(self) -> Tuple[PredicatePath, ...]:
self._check_init() # Check for delayed init
return self._indexes
def facts(self) -> List[Predicate]:
"""Return all facts."""
self._check_init() # Check for delayed init
tmp = [ fm.factset for fm in self._factmaps.values() if fm]
return list(itertools.chain(*tmp))
def asp_str(self, *, width: int = 0, commented: bool = False, sorted: bool = False) -> str:
"""Return a ASP string representation of the fact base.
The generated ASP string representation is syntactically correct ASP
code so is suitable for adding as the input to to an ASP program (or
writing to a file for later use in an ASP program).
By default the order of the facts in the string is arbitrary. Because
`FactBase` is built on a `OrderedDict` (which preserves insertion
order) the order of the facts will be deterministic between runs of the
same program. However two FactBases containing the same facts but
constructed in different ways will not produce the same output
string. In order to guarantee the same output the `sorted` flag can be
specified.
Args:
width: tries to fill to a given width by putting more than one
fact on a line if necessary (default: 0).
commented: produces commented ASP code by adding a predicate
signature and turning the Predicate sub-class docstring
into a ASP comments (default: False).
sorted: sort the output facts, first by predicates (name,arity) and
then by the natural order of the instances for that
predicate (default :False).
"""
self._check_init() # Check for delayed init
out = io.StringIO()
first=True
if sorted:
names = _builtin_sorted(self._factmaps.keys(),key=lambda pt:
(pt.meta.name, pt.meta.arity,pt.__name__))
fms = [self._factmaps[n] for n in names]
else:
fms = self._factmaps.values()
for fm in fms:
if commented:
if first: first=False
else: print("",file=out)
_format_commented(fm,out)
if sorted:
_format_asp_facts(_builtin_sorted(fm.factset),out,width)
else:
_format_asp_facts(fm.factset,out,width)
data = out.getvalue()
out.close()
return data
def __str__(self) -> str:
self._check_init() # Check for delayed init
tmp = ", ".join([str(f) for f in self])
return '{' + tmp + '}'
def __repr__(self):
return self.__str__()
#--------------------------------------------------------------------------
# Special functions to support set and container operations
#--------------------------------------------------------------------------
def __contains__(self, fact):
"""Implemement set 'in' operator."""
self._check_init() # Check for delayed init
if not isinstance(fact,Predicate): return False
ptype = type(fact)
if ptype not in self._factmaps: return False
return fact in self._factmaps[ptype].factset
def __bool__(self):
"""Implemement set bool operator."""
self._check_init() # Check for delayed init
for fm in self._factmaps.values():
if fm: return True
return False
def __len__(self):
self._check_init() # Check for delayed init
return sum([len(fm.factset) for fm in self._factmaps.values()])
def __iter__(self) -> Iterator[Predicate]:
self._check_init() # Check for delayed init
for fm in self._factmaps.values():
for f in fm.factset: yield f
def __eq__(self, other):
"""Overloaded boolean operator."""
# If other is not a FactBase then create one
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init(); other._check_init() # Check for delayed init
self_fms = { p: fm for p,fm in self._factmaps.items() if fm }
other_fms = { p: fm for p,fm in other._factmaps.items() if fm }
if self_fms.keys() != other_fms.keys(): return False
for p, fm1 in self_fms.items():
fm2 = other_fms[p]
if not factset_equality(fm1.factset,fm2.factset): return False
return True
def __lt__(self,other):
"""Implemement set < operator."""
# If other is not a FactBase then create one
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init() ; other._check_init() # Check for delayed init
self_fms = { p: fm for p,fm in self._factmaps.items() if fm }
other_fms = { p: fm for p,fm in other._factmaps.items() if fm }
if len(self_fms) > len(other_fms): return False
known_ne=False
for p, spfm in self_fms.items():
if p not in other_fms: return False
opfm = other_fms[p]
if spfm.factset < opfm.factset: known_ne=True
elif spfm.factset > opfm.factset: return False
if known_ne: return True
return False
def __le__(self,other):
"""Implemement set <= operator."""
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init() ; other._check_init() # Check for delayed init
self_fms = { p: fm for p,fm in self._factmaps.items() if fm }
other_fms = { p: fm for p,fm in other._factmaps.items() if fm }
if len(self_fms) > len(other_fms): return False
for p, spfm in self_fms.items():
if p not in other_fms: return False
opfm = other_fms[p]
if spfm.factset > opfm.factset: return False
return True
def __gt__(self,other):
"""Implemement set > operator."""
if not isinstance(other, self.__class__): other=FactBase(other)
return other.__lt__(self)
def __ge__(self,other):
"""Implemement set >= operator."""
if not isinstance(other, self.__class__): other=FactBase(other)
return other.__le__(self)
def __or__(self,other):
"""Implemement set | operator."""
return self.union(other)
def __and__(self,other):
"""Implemement set & operator."""
return self.intersection(other)
def __sub__(self,other):
"""Implemement set - operator."""
return self.difference(other)
def __xor__(self,other):
"""Implemement set ^ operator."""
return self.symmetric_difference(other)
def __ior__(self,other):
"""Implemement set |= operator."""
self.update(other)
return self
def __iand__(self,other):
"""Implemement set &= operator."""
self.intersection_update(other)
return self
def __isub__(self,other):
"""Implemement set -= operator."""
self.difference_update(other)
return self
def __ixor__(self,other):
"""Implemement set ^= operator."""
self.symmetric_difference_update(other)
return self
def __getstate__(self):
self._check_init()
return self.__dict__
#--------------------------------------------------------------------------
# Set functions
#--------------------------------------------------------------------------
def union(self, *others: _Facts) -> 'FactBase':
"""Implements the set union() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
fb = FactBase()
predicates = set(self._factmaps.keys())
for o in factbases: predicates.update(o._factmaps.keys())
for p in predicates:
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
if p in self._factmaps:
fb._factmaps[p] = self._factmaps[p].union(*pothers)
else:
fb._factmaps[p] = FactMap(p).union(*pothers)
return fb
def intersection(self, *others: _Facts) -> 'FactBase':
"""Implements the set intersection() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
fb = FactBase()
predicates = set(self._factmaps.keys())
for fb_ in factbases: predicates.intersection_update(fb_._factmaps.keys())
for p in predicates:
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
fb._factmaps[p] = self._factmaps[p].intersection(*pothers)
return fb
def difference(self, *others: _Facts) -> 'FactBase':
"""Implements the set difference() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
fb = FactBase()
predicates = set(self._factmaps.keys())
for p in predicates:
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
fb._factmaps[p] = self._factmaps[p].difference(*pothers)
return fb
def symmetric_difference(self, other: _Facts) -> 'FactBase':
"""Implements the set symmetric_difference() function"""
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init() # Check for delayed init
other._check_init()
fb = FactBase()
predicates = set(self._factmaps.keys())
predicates.update(other._factmaps.keys())
for p in predicates:
in_self = p in self._factmaps ; in_other = p in other._factmaps
if in_self and in_other:
fb._factmaps[p] = self._factmaps[p].symmetric_difference(other._factmaps[p])
elif in_self:
fb._factmaps[p] = self._factmaps[p].copy()
elif in_other:
fb._factmaps[p] = other._factmaps[p].copy()
return fb
def update(self, *others: _Facts) -> None:
"""Implements the set update() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
for fb in factbases:
for p,fm in fb._factmaps.items():
if p in self._factmaps: self._factmaps[p].update(fm)
else: self._factmaps[p] = fm.copy()
def intersection_update(self, *others: _Facts) -> None:
"""Implements the set intersection_update() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
predicates = set(self._factmaps.keys())
for fb in factbases: predicates.intersection_update(fb._factmaps.keys())
pred_to_delete = set(self._factmaps.keys()) - predicates
for p in pred_to_delete: self._factmaps[p].clear()
for p in predicates:
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
self._factmaps[p].intersection_update(*pothers)
def difference_update(self, *others: _Facts) -> None:
"""Implements the set difference_update() function"""
factbases = [o if isinstance(o, self.__class__) else FactBase(o) for o in others]
self._check_init() # Check for delayed init
for fb in factbases: fb._check_init()
for p in self._factmaps.keys():
pothers = [o._factmaps[p] for o in factbases if p in o._factmaps]
self._factmaps[p].difference_update(*pothers)
def symmetric_difference_update(self, other: _Facts) -> None:
"""Implements the set symmetric_difference_update() function"""
if not isinstance(other, self.__class__): other=FactBase(other)
self._check_init() # Check for delayed init
other._check_init()
predicates = set(self._factmaps.keys())
predicates.update(other._factmaps.keys())
for p in predicates:
if p in self._factmaps and p in other._factmaps:
self._factmaps[p].symmetric_difference_update(other._factmaps[p])
else:
if p in other._factmaps: self._factmaps[p] = other._factmaps[p].copy()
def copy(self) -> 'FactBase':
"""Implements the set copy() function"""
self._check_init() # Check for delayed init
fb = FactBase()
for p, _ in self._factmaps.items():
fb._factmaps[p] = self._factmaps[p].copy()
return fb
#------------------------------------------------------------------------------
# Select is an interface query over a FactBase.
# ------------------------------------------------------------------------------
class Select(abc.ABC):
"""An abstract class that defines the interface to original Query API.
.. note::
This interface will eventually be deprecated when the new :class:`Query
API<Query>` is finalised.
``Select`` query objects cannot be constructed directly. Instead a
``Select`` object is returned by the :meth:`FactBase.select` function. Given
a ``FactBase`` object ``fb``, a specification is of the form:
``query = fb.select(<predicate>).where(<expression>).order_by(<ordering>)``
where ``<predicate>`` specifies the predicate type to search for,
``<expression>`` specifies the search criteria and ``<ordering>`` specifies
a sort order when returning the results. The ``where()`` and ``order_by()``
clauses are omitted when not required.
"""
@abc.abstractmethod
def where(self, *expressions):
"""Set the select statement's where clause.
The where clause consists of a set of boolean and comparison
expressions. This expression specifies a search criteria for matching
facts within the corresponding ``FactBase``.
Boolean expression are built from other boolean expression or a
comparison expression. Comparison expressions are of the form:
``<PredicatePath> <compop> <value>``
where ``<compop>`` is a comparison operator such as ``==``, ``!=``, or
``<=`` and ``<value>`` is either a Python value or another predicate path
object refering to a field of the same predicate or a placeholder.
A placeholder is a special value that issubstituted when the query is
actually executed. These placeholders are named ``ph1_``, ``ph2_``,
``ph3_``, and ``ph4_`` and correspond to the 1st to 4th arguments of the
``get``, ``get_unique`` or ``count`` function call.
Args:
expressions: one or more comparison expressions.
Returns:
Returns a reference to itself.
"""
@abc.abstractmethod
def order_by(self, *fieldorder):
"""Provide an ordering over the results.
Args:
fieldorder: an ordering over fields
Returns:
Returns a reference to itself.
"""
pass
@abc.abstractmethod
def get(self, *args, **kwargs):
"""Return all matching entries."""
pass
def get_unique(self, *args, **kwargs):
"""Return the unique matching entry (or raise an exception)"""
pass
def count(self, *args, **kwargs):
"""Return the number of matches."""
pass
#------------------------------------------------------------------------------
# Delete is an interface to perform a query delete from a FactBase.
# ------------------------------------------------------------------------------
class Delete(abc.ABC):
"""An abstract class that defines the interface to a original delete query API.
.. note::
This interface will eventually be deprecated when the new :class:`Query
API<Query>` is finalised.
``Delete`` query objects cannot be constructed directly. Instead a
``Delete`` object is returned by the ``FactBase.delete()`` function. Given a
``FactBase`` object ``fb``, a specification is of the form:
``query = fb.delete(<predicate>).where(<expression>)``
where ``<predicate>`` specifies the predicate type to search for,
``<expression>`` specifies the search criteria. The ``where()`` clause can
be omitted in which case all predicates of that type will be deleted.
"""
@abc.abstractmethod
def where(self, *expressions):
"""Set the select statement's where clause.
See the documentation for ``Select.where()`` for further details.
"""
pass
@abc.abstractmethod
def execute(self, *args, **kwargs):
"""Function to execute the delete query"""
pass
#------------------------------------------------------------------------------
# Query API version 1 with new query engine
#------------------------------------------------------------------------------
class SelectImpl(Select):
def __init__(self, factbase, qspec):
self._factbase = factbase
self._qspec = qspec
#--------------------------------------------------------------------------
# Add an order_by expression
#--------------------------------------------------------------------------
def where(self, *expressions):
if self._qspec.where:
raise TypeError("Cannot specify 'where' multiple times")
if not expressions:
raise TypeError("Empty 'where' expression")
try:
if len(expressions) == 1:
where = process_where(expressions[0],self._qspec.roots)
else:
where = process_where(and_(*expressions),self._qspec.roots)
nqspec = self._qspec.newp(where=where)
except ValueError as e:
raise TypeError(str(e)) from None
return SelectImpl(self._factbase,nqspec)
#--------------------------------------------------------------------------
# Add an order_by expression
#--------------------------------------------------------------------------
def order_by(self, *expressions):
if self._qspec.order_by:
raise TypeError("Cannot specify 'order_by' multiple times")
if not expressions:
raise TypeError("Empty 'order_by' expression")
try:
order_by=process_orderby(expressions,self._qspec.roots)
nqspec = self._qspec.newp(order_by=order_by)
except ValueError as e:
raise TypeError(str(e)) from None
return SelectImpl(self._factbase,nqspec)
#--------------------------------------------------------------------------
#
#--------------------------------------------------------------------------
def query_plan(self,*args,**kwargs):
qspec = self._qspec.fill_defaults()
(factsets,factindexes) = \
QueryExecutor.get_factmap_data(self._factbase.factmaps, qspec)
qplan = make_query_plan(factindexes.keys(), qspec)
return qplan.ground(*args,**kwargs)
#--------------------------------------------------------------------------
# Functions currently mirroring the old interface
# --------------------------------------------------------------------------
def get(self, *args, **kwargs):
qspec = self._qspec
if args or kwargs:
if self._qspec.where is None:
raise ValueError(("No where clause to ground"))
qspec = self._qspec.bindp(*args, **kwargs)
qe = QueryExecutor(self._factbase.factmaps, qspec)
return list(qe.all())
def get_unique(self, *args, **kwargs):
qspec = self._qspec
if args or kwargs:
if self._qspec.where is None:
raise ValueError(("No where clause to ground"))
qspec = self._qspec.bindp(*args, **kwargs)
qe = QueryExecutor(self._factbase.factmaps, qspec)
found = None
for out in qe.all():
if found: raise ValueError("Query returned more than a single element")
found = out
return found
def count(self, *args, **kwargs):
qspec = self._qspec
if args or kwargs:
if self._qspec.where is None:
raise ValueError(("No where clause to ground"))
qspec = self._qspec.bindp(*args, **kwargs)
qe = QueryExecutor(self._factbase.factmaps, qspec)
count = 0
for _ in qe.all(): count += 1
return count
#------------------------------------------------------------------------------
# The Delete class
#------------------------------------------------------------------------------
class _Delete(Delete):
def __init__(self, factbase, qspec):
self._factbase = factbase
self._root = qspec.roots[0]
self._select = SelectImpl(factbase,qspec)
self._has_where = False
def where(self, *expressions):
self._has_where = True
self._select = self._select.where(*expressions)
return self
def execute(self, *args, **kwargs):
factmap = self._factbase.factmaps[self._root.meta.predicate]
# If there is no where clause then delete everything
if not self._has_where:
num_deleted = len(factmap.facts())
factmap.clear()
return num_deleted
# Gather all the facts to delete and remove them
to_delete = [ f for f in self._select.get(*args, **kwargs) ]
for fact in to_delete: factmap.remove(fact)
return len(to_delete)
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
if __name__ == "__main__":
raise RuntimeError('Cannot run modules')
| 37.879222
| 114
| 0.568769
| 31,491
| 0.850924
| 179
| 0.004837
| 3,238
| 0.087495
| 0
| 0
| 15,264
| 0.412451
|
5a53a6326b7c2b2399d98404ebe43ef902465e91
| 13,470
|
py
|
Python
|
blender/2.79/scripts/addons/modules/extensions_framework/__init__.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 2
|
2019-11-27T09:05:42.000Z
|
2020-02-20T01:25:23.000Z
|
blender/2.79/scripts/addons/modules/extensions_framework/__init__.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | null | null | null |
blender/2.79/scripts/addons/modules/extensions_framework/__init__.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
# -*- coding: utf-8 -*-
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
# --------------------------------------------------------------------------
# Blender 2.5 Extensions Framework
# --------------------------------------------------------------------------
#
# Authors:
# Doug Hammond
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# ***** END GPL LICENCE BLOCK *****
#
import time
import bpy
from extensions_framework.ui import EF_OT_msg
bpy.utils.register_class(EF_OT_msg)
del EF_OT_msg
def log(str, popup=False, module_name='EF'):
"""Print a message to the console, prefixed with the module_name
and the current time. If the popup flag is True, the message will
be raised in the UI as a warning using the operator bpy.ops.ef.msg.
"""
print("[%s %s] %s" %
(module_name, time.strftime('%Y-%b-%d %H:%M:%S'), str))
if popup:
bpy.ops.ef.msg(
msg_type='WARNING',
msg_text=str
)
added_property_cache = {}
def init_properties(obj, props, cache=True):
"""Initialise custom properties in the given object or type.
The props list is described in the declarative_property_group
class definition. If the cache flag is False, this function
will attempt to redefine properties even if they have already been
added.
"""
if not obj in added_property_cache.keys():
added_property_cache[obj] = []
for prop in props:
try:
if cache and prop['attr'] in added_property_cache[obj]:
continue
if prop['type'] == 'bool':
t = bpy.props.BoolProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","options","subtype","update"]}
elif prop['type'] == 'bool_vector':
t = bpy.props.BoolVectorProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","options","subtype","size",
"update"]}
elif prop['type'] == 'collection':
t = bpy.props.CollectionProperty
a = {k: v for k,v in prop.items() if k in ["ptype","name",
"description","default","options"]}
a['type'] = a['ptype']
del a['ptype']
elif prop['type'] == 'enum':
t = bpy.props.EnumProperty
a = {k: v for k,v in prop.items() if k in ["items","name",
"description","default","options","update"]}
elif prop['type'] == 'float':
t = bpy.props.FloatProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","min","max","soft_min","soft_max",
"step","precision","options","subtype","unit","update"]}
elif prop['type'] == 'float_vector':
t = bpy.props.FloatVectorProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","min","max","soft_min","soft_max",
"step","precision","options","subtype","size","update"]}
elif prop['type'] == 'int':
t = bpy.props.IntProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","min","max","soft_min","soft_max",
"step","options","subtype","update"]}
elif prop['type'] == 'int_vector':
t = bpy.props.IntVectorProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","min","max","soft_min","soft_max",
"options","subtype","size","update"]}
elif prop['type'] == 'pointer':
t = bpy.props.PointerProperty
a = {k: v for k,v in prop.items() if k in ["ptype", "name",
"description","options","update"]}
a['type'] = a['ptype']
del a['ptype']
elif prop['type'] == 'string':
t = bpy.props.StringProperty
a = {k: v for k,v in prop.items() if k in ["name",
"description","default","maxlen","options","subtype",
"update"]}
else:
continue
setattr(obj, prop['attr'], t(**a))
added_property_cache[obj].append(prop['attr'])
except KeyError:
# Silently skip invalid entries in props
continue
class declarative_property_group(bpy.types.PropertyGroup):
"""A declarative_property_group describes a set of logically
related properties, using a declarative style to list each
property type, name, values, and other relevant information.
The information provided for each property depends on the
property's type.
The properties list attribute in this class describes the
properties present in this group.
Some additional information about the properties in this group
can be specified, so that a UI can be generated to display them.
To that end, the controls list attribute and the visibility dict
attribute are present here, to be read and interpreted by a
property_group_renderer object.
See extensions_framework.ui.property_group_renderer.
"""
ef_initialised = False
"""This property tells extensions_framework which bpy.type(s)
to attach this PropertyGroup to. If left as an empty list,
it will not be attached to any type, but its properties will
still be initialised. The type(s) given in the list should be
a string, such as 'Scene'.
"""
ef_attach_to = []
@classmethod
def initialise_properties(cls):
"""This is a function that should be called on
sub-classes of declarative_property_group in order
to ensure that they are initialised when the addon
is loaded.
the init_properties is called without caching here,
as it is assumed that any addon calling this function
will also call ef_remove_properties when it is
unregistered.
"""
if not cls.ef_initialised:
for property_group_parent in cls.ef_attach_to:
if property_group_parent is not None:
prototype = getattr(bpy.types, property_group_parent)
if not hasattr(prototype, cls.__name__):
init_properties(prototype, [{
'type': 'pointer',
'attr': cls.__name__,
'ptype': cls,
'name': cls.__name__,
'description': cls.__name__
}], cache=False)
init_properties(cls, cls.properties, cache=False)
cls.ef_initialised = True
return cls
@classmethod
def register_initialise_properties(cls):
"""As ef_initialise_properties, but also registers the
class with RNA. Note that this isn't a great idea
because it's non-trivial to unregister the class, unless
you keep track of it yourself.
"""
bpy.utils.register_class(cls)
cls.initialise_properties()
return cls
@classmethod
def remove_properties(cls):
"""This is a function that should be called on
sub-classes of declarative_property_group in order
to ensure that they are un-initialised when the addon
is unloaded.
"""
if cls.ef_initialised:
prototype = getattr(bpy.types, cls.__name__)
for prop in cls.properties:
if hasattr(prototype, prop['attr']):
delattr(prototype, prop['attr'])
for property_group_parent in cls.ef_attach_to:
if property_group_parent is not None:
prototype = getattr(bpy.types, property_group_parent)
if hasattr(prototype, cls.__name__):
delattr(prototype, cls.__name__)
cls.ef_initialised = False
return cls
"""This list controls the order of property layout when rendered
by a property_group_renderer. This can be a nested list, where each
list becomes a row in the panel layout. Nesting may be to any depth.
"""
controls = []
"""The visibility dict controls the visibility of properties based on
the value of other properties. See extensions_framework.validate
for test syntax.
"""
visibility = {}
"""The enabled dict controls the enabled state of properties based on
the value of other properties. See extensions_framework.validate
for test syntax.
"""
enabled = {}
"""The alert dict controls the alert state of properties based on
the value of other properties. See extensions_framework.validate
for test syntax.
"""
alert = {}
"""The properties list describes each property to be created. Each
item should be a dict of args to pass to a
bpy.props.<?>Property function, with the exception of 'type'
which is used and stripped by extensions_framework in order to
determine which Property creation function to call.
Example item:
{
'type': 'int', # bpy.props.IntProperty
'attr': 'threads', # bpy.types.<type>.threads
'name': 'Render Threads', # Rendered next to the UI
'description': 'Number of threads to use', # Tooltip text in the UI
'default': 1,
'min': 1,
'soft_min': 1,
'max': 64,
'soft_max': 64
}
"""
properties = []
def draw_callback(self, context):
"""Sub-classes can override this to get a callback when
rendering is completed by a property_group_renderer sub-class.
"""
pass
@classmethod
def get_exportable_properties(cls):
"""Return a list of properties which have the 'save_in_preset' key
set to True, and hence should be saved into preset files.
"""
out = []
for prop in cls.properties:
if 'save_in_preset' in prop.keys() and prop['save_in_preset']:
out.append(prop)
return out
def reset(self):
"""Reset all properties in this group to the default value,
if specified"""
for prop in self.properties:
pk = prop.keys()
if 'attr' in pk and 'default' in pk and hasattr(self, prop['attr']):
setattr(self, prop['attr'], prop['default'])
class Addon(object):
"""A list of classes registered by this addon"""
static_addon_count = 0
addon_serial = 0
addon_classes = None
bl_info = None
BL_VERSION = None
BL_IDNAME = None
def __init__(self, bl_info=None):
self.addon_classes = []
self.bl_info = bl_info
# Keep a count in case we have to give this addon an anonymous name
self.addon_serial = Addon.static_addon_count
Addon.static_addon_count += 1
if self.bl_info:
self.BL_VERSION = '.'.join(['%s'%v for v in self.bl_info['version']]).lower()
self.BL_IDNAME = self.bl_info['name'].lower() + '-' + self.BL_VERSION
else:
# construct anonymous name
self.BL_VERSION = '0'
self.BL_IDNAME = 'Addon-%03d'%self.addon_serial
def addon_register_class(self, cls):
"""This method is designed to be used as a decorator on RNA-registerable
classes defined by the addon. By using this decorator, this class will
keep track of classes registered by this addon so that they can be
unregistered later in the correct order.
"""
self.addon_classes.append(cls)
return cls
def register(self):
"""This is the register function that should be exposed in the addon's
__init__.
"""
for cls in self.addon_classes:
bpy.utils.register_class(cls)
if hasattr(cls, 'ef_attach_to'): cls.initialise_properties()
def unregister(self):
"""This is the unregister function that should be exposed in the addon's
__init__.
"""
for cls in self.addon_classes[::-1]: # unregister in reverse order
if hasattr(cls, 'ef_attach_to'): cls.remove_properties()
bpy.utils.unregister_class(cls)
def init_functions(self):
"""Returns references to the three functions that this addon needs
for successful class registration management. In the addon's __init__
you would use like this:
addon_register_class, register, unregister = Addon().init_functions()
"""
return self.register, self.unregister
| 36.209677
| 89
| 0.587231
| 8,320
| 0.617669
| 0
| 0
| 2,823
| 0.209577
| 0
| 0
| 7,261
| 0.53905
|
5a54a96d2f3cc1d14a3c5a24eab90fe8dfc58c84
| 16,305
|
py
|
Python
|
tests/test_common.py
|
NOAA-GSL/adb_graphics
|
b9a3d567efa0de5a175be8404f351b901a8f382f
|
[
"MIT"
] | 2
|
2020-11-06T16:30:50.000Z
|
2021-01-15T19:42:13.000Z
|
tests/test_common.py
|
NOAA-GSL/adb_graphics
|
b9a3d567efa0de5a175be8404f351b901a8f382f
|
[
"MIT"
] | 10
|
2020-11-20T16:02:57.000Z
|
2021-03-31T23:35:56.000Z
|
tests/test_common.py
|
NOAA-GSL/adb_graphics
|
b9a3d567efa0de5a175be8404f351b901a8f382f
|
[
"MIT"
] | 1
|
2021-04-09T20:55:06.000Z
|
2021-04-09T20:55:06.000Z
|
# pylint: disable=invalid-name
'''
Pytests for the common utilities included in this package. Includes:
- conversions.py
- specs.py
- utils.py
To run the tests, type the following in the top level repo directory:
python -m pytest --nat-file [path/to/gribfile] --prs-file [path/to/gribfile]
'''
from inspect import getfullargspec
from string import ascii_letters, digits
import warnings
from matplotlib import cm
from matplotlib import colors as mcolors
from metpy.plots import ctables
import numpy as np
import adb_graphics.conversions as conversions
import adb_graphics.specs as specs
import adb_graphics.utils as utils
import adb_graphics.datahandler.gribdata as gribdata
def test_conversion():
''' Test that conversions return at numpy array for input of np.ndarray,
list, or int '''
a = np.ones([3, 2]) * 300
c = a[0, 0]
# Check for the right answer
assert np.array_equal(conversions.k_to_c(a), a - 273.15)
assert np.array_equal(conversions.k_to_f(a), (a - 273.15) * 9/5 + 32)
assert np.array_equal(conversions.kgm2_to_in(a), a * 0.03937)
assert np.array_equal(conversions.m_to_dm(a), a / 10)
assert np.array_equal(conversions.m_to_in(a), a * 39.3701)
assert np.array_equal(conversions.m_to_kft(a), a / 304.8)
assert np.array_equal(conversions.m_to_mi(a), a / 1609.344)
assert np.array_equal(conversions.ms_to_kt(a), a * 1.9438)
assert np.array_equal(conversions.pa_to_hpa(a), a / 100)
assert np.array_equal(conversions.percent(a), a * 100)
assert np.array_equal(conversions.to_micro(a), a * 1E6)
assert np.array_equal(conversions.vvel_scale(a), a * -10)
assert np.array_equal(conversions.vort_scale(a), a / 1E-05)
assert np.array_equal(conversions.weasd_to_1hsnw(a), a * 10)
functions = [
conversions.k_to_c,
conversions.k_to_f,
conversions.kgm2_to_in,
conversions.m_to_dm,
conversions.m_to_in,
conversions.m_to_kft,
conversions.m_to_mi,
conversions.ms_to_kt,
conversions.pa_to_hpa,
conversions.percent,
conversions.to_micro,
conversions.vvel_scale,
conversions.vort_scale,
conversions.weasd_to_1hsnw,
]
# Check that all functions return a np.ndarray given a collection, or single float
for f in functions:
for collection in [a, c]:
assert isinstance(f(collection), type(collection))
class MockSpecs(specs.VarSpec):
''' Mock class for the VarSpec abstract class '''
@property
def clevs(self):
return np.asarray(range(15))
@property
def vspec(self):
return {}
def test_specs():
''' Test VarSpec properties. '''
config = 'adb_graphics/default_specs.yml'
varspec = MockSpecs(config)
# Ensure correct return type
assert isinstance(varspec.t_colors, np.ndarray)
assert isinstance(varspec.ps_colors, np.ndarray)
assert isinstance(varspec.yml, dict)
# Ensure the appropriate number of colors is returned
assert np.shape(varspec.t_colors) == (len(varspec.clevs), 4)
assert np.shape(varspec.ps_colors) == (105, 4)
def test_utils():
''' Test that utils works appropriately. '''
assert callable(utils.get_func('conversions.k_to_c'))
class TestDefaultSpecs():
''' Test contents of default_specs.yml. '''
config = 'adb_graphics/default_specs.yml'
varspec = MockSpecs(config)
cfg = varspec.yml
@property
def allowable(self):
''' Each entry in the dict names a function that tests a key in
default_specs.yml. '''
return {
'accumulate': self.is_bool,
'annotate': self.is_bool,
'annotate_decimal': self.is_int,
'clevs': self.is_a_clev,
'cmap': self.is_a_cmap,
'colors': self.is_a_color,
'contours': self.is_a_contour_dict,
'hatches': self.is_a_contourf_dict,
'labels': self.is_a_contourf_dict,
'ncl_name': True,
'print_units': True,
'split': self.is_bool,
'ticks': self.is_number,
'title': self.is_string,
'transform': self.check_transform,
'unit': self.is_string,
'vertical_index': self.is_int,
'vertical_level_name': self.is_string,
'wind': self.is_wind,
}
def check_kwargs(self, accepted_args, kwargs):
''' Ensure a dictionary entry matches the kwargs accepted by a function.
'''
assert isinstance(kwargs, dict)
for key, args in kwargs.items():
lev = None
if '_' in key:
short_name, lev = key.split('_')
else:
short_name = key
assert self.is_a_key(short_name)
if lev:
assert self.cfg.get(short_name).get(lev) is not None
for arg in args.keys():
assert arg in accepted_args
return True
def check_transform(self, entry):
'''
Check that the transform entry is either a single transformation
function, a list of transformation functions, or a dictionary containing
the functions list and the kwargs list like so:
transform:
funcs: [list, of, functions]
kwargs:
first_arg: value
sec_arg: value
The functions listed under functions MUST be methods, not attributes!
'''
kwargs = dict()
# Check that each item listed is callable
if isinstance(entry, (list, str)):
assert self.is_callable(entry)
# If the transform entry is a dictionary, check that it has the
# appropriate contents
elif isinstance(entry, dict):
funcs = entry.get('funcs')
assert funcs is not None
# Make sure funcs is a list
funcs = funcs if isinstance(funcs, list) else [funcs]
# Key word arguments may not be present.
kwargs = entry.get('kwargs')
transforms = []
for func in funcs:
callables = self.get_callable(func)
callables = callables if isinstance(callables, list) else \
[callables]
transforms.extend(callables)
# The argspecs bit gives us a list of all the accepted arguments
# for the functions listed in the variable all_params. Test fails
# when provided arguments don't appear in all_params.
# arguments not in that list, we fail.
if kwargs:
argspecs = [getfullargspec(func) for func in transforms if
callable(func)]
all_params = []
for argspec in argspecs:
# Make sure all functions accept key word arguments
assert argspec.varkw is not None
parameters = []
for argtype in [argspec.args, argspec.varargs, argspec.varkw]:
if argtype is not None:
parameters.extend(argtype)
all_params.extend(parameters)
for key in kwargs.keys():
if key not in all_params:
msg = f'Function key {key} is not an expicit parameter \
in any of the transforms: {funcs}!'
warnings.warn(msg, UserWarning)
return True
# pylint: disable=inconsistent-return-statements
def get_callable(self, func):
''' Return the callable function given a function name. '''
if func in dir(self.varspec):
return self.varspec.__getattribute__(func)
# Check datahandler.gribdata objects if a single word is provided
if len(func.split('.')) == 1:
funcs = []
for attr in dir(gribdata):
# pylint: disable=no-member
if func in dir(gribdata.__getattribute__(attr)):
funcs.append(gribdata.__getattribute__(attr).__dict__.get(func))
return funcs
if callable(utils.get_func(func)):
return utils.get_func(func)
raise ValueError('{func} is not a known callable function!')
@staticmethod
def is_a_clev(clev):
''' Returns true for a clev that is a list, a range, or a callable function. '''
if isinstance(clev, (list, np.ndarray)):
return True
if 'range' in clev.split('[')[0]:
clean = lambda x: x.strip().split('-')[-1].replace('.', '1')
items = clev.split(' ', 1)[1].strip('[').strip(']').split(',')
nums = [clean(i).isnumeric() for i in items]
return all(nums)
return callable(utils.get_func(clev))
@staticmethod
def is_a_cmap(cmap):
''' Returns true for a cmap that is a Colormap object. '''
return cmap in dir(cm) + list(ctables.colortables.keys())
def is_a_contour_dict(self, entry):
''' Set up the accepted arguments for plt.contour, and check the given
arguments. '''
args = ['X', 'Y', 'Z', 'levels',
'corner_mask', 'colors', 'alpha', 'cmap', 'norm', 'vmin',
'vmax', 'origin', 'extent', 'locator', 'extend', 'xunits',
'yunits', 'antialiased', 'nchunk', 'linewidths', 'linestyles']
if entry is None:
return True
return self.check_kwargs(args, entry)
def is_a_contourf_dict(self, entry):
''' Set up the accepted arguments for plt.contourf, and check the given
arguments. '''
args = ['X', 'Y', 'Z', 'levels',
'corner_mask', 'colors', 'alpha', 'cmap', 'labels', 'norm', 'vmin',
'vmax', 'origin', 'extent', 'locator', 'extend', 'xunits',
'yunits', 'antialiased', 'nchunk', 'linewidths',
'hatches']
if entry is None:
return True
return self.check_kwargs(args, entry)
def is_a_color(self, color):
''' Returns true if color is contained in the list of recognized colors. '''
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS,
**ctables.colortables)
if color in colors.keys():
return True
if color in dir(self.varspec):
return True
return False
@staticmethod
def is_a_level(key):
'''
Returns true if the key fits one of the level descriptor formats.
Allowable formats include:
[str_descriptor] e.g. sfc, max, mup
[numeric][lev_type] e.g. 500mb, or 2m
[stat][numeric] e.g. mn02, mx25
'''
allowed_levels = [
'agl', # above ground level
'best', # Best
'bndylay', # boundary layer cld cover
'esbl', # ???
'esblmn', # ???
'high', # high clouds
'int', # vertical integral
'low', # low clouds
'max', # maximum in column
'maxsfc', # max surface value
'mdn', # maximum downward
'mid', # mid-level clouds
'mnsfc', # min surface value
'msl', # mean sea level
'mu', # most unstable
'mul', # most unstable layer
'mup', # maximum upward
'mu', # most unstable
'pw', # wrt precipitable water
'sat', # satellite
'sfc', # surface
'sfclt', # surface (less than)
'top', # nominal top of atmosphere
'total', # total clouds
'ua', # upper air
]
allowed_lev_type = [
'cm', # centimeters
'ds', # difference
'ft', # feet
'km', # kilometers
'm', # meters
'mm', # millimeters
'mb', # milibars
'sr', # storm relative
]
allowed_stat = [
'in', # ???
'ens', # ensemble
'm', # ???
'maxm', # ???
'mn', # minimum
'mx', # maximum
]
# Easy check first -- it is in the allowed_levels list
if key in allowed_levels:
return True
# Check for [numeric][lev_type] or [lev_type][numeric] pattern
# Numbers come at beginning or end, only
numeric = ''.join([c for c in key if c in digits + '.']) in key
# The level is allowed
level_str = [c for c in key if c in ascii_letters]
allowed = ''.join(level_str) in allowed_lev_type + allowed_stat
# Check the other direction - level string contains one of the allowed
# types.
if not allowed:
for lev in allowed_lev_type + allowed_stat:
if lev in level_str:
allowed = True
break
if numeric and allowed:
return True
return False
def is_a_key(self, key):
''' Returns true if key exists as a key in the config file. '''
return self.cfg.get(key) is not None
@staticmethod
def is_bool(k):
''' Returns true if k is a boolean variable. '''
return isinstance(k, bool)
def is_callable(self, funcs):
''' Returns true if func in funcs list is the name of a callable function. '''
funcs = funcs if isinstance(funcs, list) else [funcs]
callables = []
for func in funcs:
callable_ = self.get_callable(func)
callable_ = callable_ if isinstance(callable_, list) else [callable_]
for clbl in callable_:
if isinstance(clbl, np.ndarray):
callables.append(True)
elif callable(clbl):
callables.append(True)
else:
callables.append(False)
return all(callables)
@staticmethod
def is_dict(d):
''' Returns true if d is a dictionary '''
return isinstance(d, dict)
@staticmethod
def is_int(i):
''' Returns true if i is an integer. '''
if isinstance(i, int):
return True
return i.isnumeric() and len(i.split('.')) == 1
@staticmethod
def is_number(i):
''' Returns true if i is a number. '''
if isinstance(i, (int, float)):
return True
return i.isnumeric() and len(i.split('.')) <= 2
@staticmethod
def is_string(s):
''' Returns true if s is a string. '''
return isinstance(s, str)
def is_wind(self, wind):
''' Returns true if wind is a bool or is_a_level. '''
return isinstance(wind, bool) or self.is_a_level(wind)
def check_keys(self, d, depth=0):
''' Helper function that recursively checks the keys in the dictionary by calling the
function defined in allowable. '''
max_depth = 2
# Only proceed if d is a dictionary
if not isinstance(d, dict):
return
# Proceed only up to max depth.
if depth >= max_depth:
return
level = depth+1
for k, v in d.items():
# Check that the key is allowable
assert (k in self.allowable.keys()) or self.is_a_level(k)
# Call a checker if one exists for the key, otherwise descend into
# next level of dict
checker = self.allowable.get(k)
if checker:
if isinstance(checker, bool):
assert checker
else:
assert checker(v)
else:
if isinstance(v, dict):
self.check_keys(v, depth=level)
def test_keys(self):
''' Tests each of top-level variables in the config file by calling the helper function. '''
for short_name, spec in self.cfg.items():
assert '_' not in short_name
self.check_keys(spec)
| 30.138632
| 100
| 0.557068
| 13,231
| 0.811469
| 0
| 0
| 5,261
| 0.322662
| 0
| 0
| 5,369
| 0.329285
|
5a54ab45f8f150e828680b7baff870b193da03be
| 6,448
|
py
|
Python
|
ggpy/cruft/grammar.py
|
hobson/ggpy
|
4e6e6e876c3a4294cd711647051da2d9c1836b60
|
[
"MIT"
] | 1
|
2015-01-26T19:07:45.000Z
|
2015-01-26T19:07:45.000Z
|
ggpy/cruft/grammar.py
|
hobson/ggpy
|
4e6e6e876c3a4294cd711647051da2d9c1836b60
|
[
"MIT"
] | null | null | null |
ggpy/cruft/grammar.py
|
hobson/ggpy
|
4e6e6e876c3a4294cd711647051da2d9c1836b60
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# package: org.ggp.base.util.symbol.grammar
import threading
class SymbolFormatException(Exception):
source = ''
def __init__(self, message, source):
super(SymbolFormatException, self).__init__(message)
self.source = source
def getSource(self):
return self.source
def __str__(self):
return "Improperly formatted symbolic expression: " + self.source
class Symbol(object):
def __str__(self):
pass
class SymbolAtom(Symbol):
def __init__(self, value=None):
super(SymbolAtom, self).__init__()
self.value = value.intern() if not value is None else ''
def getValue(self):
return self.value
def __str__(self):
return self.value
class SymbolList(Symbol): # odd that this is a derived class and not a container for Symbol objects
'''List container for Symbol objects (self.contents = [Symbol(), Symbol(), ...])
Java -> Python
size -> __len__
toString -> __str__
'''
def __init__(self, contents):
super(SymbolList, self).__init__()
self.contents = contents
def get(self, index):
""" generated source for method get """
return self.contents.get(index)
def __len__(self):
return len(self.contents)
def __str__(self):
if self.contents:
return '( ' + ' '.join([str(sym) for sym in self.contents]) + ' )'
else:
return '( )'
class SymbolPool(object):
'''A pair of dicts/pools with a thread-safe add_key_value_if_absent() operations
Python dicts and lists are already single-operation (atomic) thread-safe
nonatomic operations for lists (L) and dicts (D) include:
i = i+1
L.append(L[-1])
L[i] = L[j]
D[x] = D[x] + 1
SymbolPool uses a lock (GIL?) to perform multiple operations on a dict/list thread-safely
Here's how you'd do the same for the D[x] operation above:
import threading
lock = threading.Lock()
lock.acquire()
D[x] = D[x] + 1
lock.release()
'''
# WARNING: mutable class attributes will be shared across instances!
atomPool = {}
listPool = {}
# `classmethod`s can be overridden by any classes that inherit SymbolPool
# and are shared among instances. otherwise they are the same as instance
# methods. `staticmethod`s are just non-global functions and don't need to access
# the class
@staticmethod
def addToPool(key, value, pool):
""" Add key-value to `dict` `pool` if `pool` does not yet have one for that key
value :: a list of Symbol objects (SymbolList)
pool :: a dictionary of atoms or symbol lists stored in this SymbolPool class
Sam says, "Even if you've checked to make sure that the pool doesn't contain the key,
you still shouldn't assume that this method actually inserts the given value, since
this class is accessed by multiple threads simultaneously."
@return the value associated with the key in the pool
"""
# added by HL to avoid the unthreadsafe behavior described by Sam above
lock = threading.Lock()
lock.aquire()
prev_value = pool.get(key)
if prev_value is None:
pool[key] = value
lock.release()
return value
lock.release()
return prev_value
@classmethod
def getAtom(cls, value):
'''Add an atom to the atomPool if it isn't already there, return the value if there'''
ret = cls.atomPool.get(value)
if ret is None:
ret = cls.addToPool(value, SymbolAtom(value), cls.atomPool)
return ret
@classmethod
def getList(cls, contents):
"""contents is a SymbolList or list of symbols"""
ret = cls.listPool.get(contents)
if ret == None:
ret = cls.addToPool(contents, SymbolList(contents), cls.listPool)
return ret
# no need to overload in python just treat the Array like a List and it should just work!
# @classmethod
# @getList.register(object, Symbol)
# def getList_0(cls, contents):
# """ generated source for method getList_0 """
# return cls.getList(Arrays.asList(contents))
@classmethod
def drainPool(cls):
'''Drains the contents of the SymbolPool. Useful to control memory usage.
Sam says, "Once you've finished playing a large game, this should be safe to call
any time during gameplay. But my experiments indicate that SymbolPool
has a 97% cache hit rate during a game, so you likely only want to call
this between games, because symbols from previous game are unlikely to
reappear in subsequent, unrelated games.""
'''
cls.atomPool = dict()
cls.listPool = dict()
class SymbolFactory(object):
@classmethod
def create(cls, string):
try:
return cls.convert(LinkedList(tokens))
except Exception as e:
raise SymbolFormatException(string)
# Private, implementation-specific methods below here
@classmethod
def convert(cls, tokens):
""" generated source for method convert """
if tokens.getFirst() == "(":
return convertList(tokens)
else:
return convertAtom(tokens)
@classmethod
def convertAtom(cls, tokens):
""" generated source for method convertAtom """
return SymbolPool.getAtom(tokens.removeFirst())
@classmethod
def convertList(cls, tokens):
""" generated source for method convertList """
contents = ArrayList()
tokens.removeFirst()
while not tokens.getFirst() == "": # java2python added an extra close-paren
contents.add(cls.convert(tokens))
tokens.removeFirst()
return SymbolPool.getList(contents)
@classmethod
def lex(cls, string):
""" generated source for method lex """
tokens = ArrayList()
for token in string.split(" "):
tokens.add(token)
return tokens
@classmethod
def preprocess(cls, string):
""" generated source for method preprocess """
string = string.replaceAll("\\(", " ( ")
string = string.replaceAll("\\)", " ) ")
string = string.replaceAll("\\s+", " ")
string = string.trim()
return string
| 32.079602
| 100
| 0.622519
| 6,346
| 0.984181
| 0
| 0
| 3,503
| 0.543269
| 0
| 0
| 3,163
| 0.49054
|
5a57e614d9b55b36163878bad041ba8ed0614d30
| 948
|
py
|
Python
|
cortical/models/context.py
|
npd15393/ResumeMiner
|
9644ae97aaad869c3739b2b7b92e4e5a6f857206
|
[
"BSD-2-Clause"
] | null | null | null |
cortical/models/context.py
|
npd15393/ResumeMiner
|
9644ae97aaad869c3739b2b7b92e4e5a6f857206
|
[
"BSD-2-Clause"
] | null | null | null |
cortical/models/context.py
|
npd15393/ResumeMiner
|
9644ae97aaad869c3739b2b7b92e4e5a6f857206
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
/*******************************************************************************
* Copyright (c) cortical.io GmbH. All rights reserved.
*
* This software is confidential and proprietary information.
* You shall use it only in accordance with the terms of the
* license agreement you entered into with cortical.io GmbH.
******************************************************************************/
"""
from cortical.models.fingerprint import Fingerprint
class Context(object):
def __init__(self, fingerprint=None, context_label=None, context_id=None):
#The semantic fingerprint representation of a context
self.fingerprint = Fingerprint(**fingerprint) if isinstance(fingerprint, dict) else fingerprint # Fingerprint
#The descriptive label of a context.
self.context_label = context_label # str
#The id of a context.
self.context_id = context_id # int
| 43.090909
| 117
| 0.597046
| 448
| 0.472574
| 0
| 0
| 0
| 0
| 0
| 0
| 568
| 0.599156
|
5a58135dc9e13b466cba75e814598ea999f2751b
| 705
|
py
|
Python
|
COMP-2080/Week-11/knapRecursive.py
|
kbrezinski/Candidacy-Prep
|
f4610fb611e6300a7d657af124728d46a8659ba5
|
[
"BSD-3-Clause"
] | null | null | null |
COMP-2080/Week-11/knapRecursive.py
|
kbrezinski/Candidacy-Prep
|
f4610fb611e6300a7d657af124728d46a8659ba5
|
[
"BSD-3-Clause"
] | null | null | null |
COMP-2080/Week-11/knapRecursive.py
|
kbrezinski/Candidacy-Prep
|
f4610fb611e6300a7d657af124728d46a8659ba5
|
[
"BSD-3-Clause"
] | null | null | null |
# [weight, value]
I = [[4, 8], [4, 7], [6, 14]]
k = 8
def knapRecursive(I, k):
return knapRecursiveAux(I, k, len(I) - 1)
def knapRecursiveAux(I, k, hi):
# final element
if hi == 0:
# too big for sack
if I[hi][0] > k:
return 0
# fits
else:
return I[hi][1]
else:
# too big for sack
if I[hi][0] > k:
return knapRecursiveAux(I, k, hi - 1)
# fits
else:
# don't include it
s1 = knapRecursiveAux(I, k, hi - 1)
# include it
s2 = I[hi][1] + knapRecursiveAux(I, k - I[hi][0], hi - 1)
return max(s1, s2)
print(knapRecursive(I, k))
| 22.03125
| 69
| 0.455319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.161702
|
5a59bbf41d09d9b1b99e57b30f3e8db2c9734a9d
| 232
|
py
|
Python
|
digits/inference/__init__.py
|
PhysicsTeacher13/Digits-NVIDIA
|
80c08ed2b84d5d4eb4f1721ab30f3db2ce67690a
|
[
"BSD-3-Clause"
] | 111
|
2017-04-21T06:03:04.000Z
|
2021-04-26T06:36:54.000Z
|
digits/inference/__init__.py
|
PhysicsTeacher13/Digits-NVIDIA
|
80c08ed2b84d5d4eb4f1721ab30f3db2ce67690a
|
[
"BSD-3-Clause"
] | 6
|
2017-05-15T22:02:49.000Z
|
2018-03-16T10:25:26.000Z
|
digits/inference/__init__.py
|
PhysicsTeacher13/Digits-NVIDIA
|
80c08ed2b84d5d4eb4f1721ab30f3db2ce67690a
|
[
"BSD-3-Clause"
] | 40
|
2017-04-21T07:04:16.000Z
|
2019-11-14T14:20:32.000Z
|
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .images import ImageInferenceJob
from .job import InferenceJob
__all__ = [
'InferenceJob',
'ImageInferenceJob',
]
| 21.090909
| 63
| 0.762931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.413793
|
5a5a27414d864ca463175f98377b3d5b7fff1510
| 3,592
|
py
|
Python
|
homework/homework 21.py
|
CoderLoveMath/Jeju-IOSEFTGS-python
|
0efe26e3840817197c1584ac4cf90d35c3699988
|
[
"FSFAP"
] | null | null | null |
homework/homework 21.py
|
CoderLoveMath/Jeju-IOSEFTGS-python
|
0efe26e3840817197c1584ac4cf90d35c3699988
|
[
"FSFAP"
] | null | null | null |
homework/homework 21.py
|
CoderLoveMath/Jeju-IOSEFTGS-python
|
0efe26e3840817197c1584ac4cf90d35c3699988
|
[
"FSFAP"
] | null | null | null |
# Import a library of functions called 'pygame'
import pygame
# Initialize the game engine
pygame.init()
# Define the colors we will use in RGB format
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
# Set the height and width of the screen
size = [491, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Heron's Note")
# Loop until the user clicks the close button.
done = False
clock = pygame.time.Clock()
scene_count = 0
while not done:
scene_count += 0.1
clock.tick(10)
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
screen.fill(WHITE)
screen.blit(pygame.image.load('bg.png'), pygame.image.load('bg.png').get_rect())
font = pygame.font.Font('font.ttf', 80) # font setting
title = font.render("헤론의 노트", True, (28, 0, 0))
font = pygame.font.Font('font.ttf', 20) # font setting
subtitle = font.render("헤론의 공식을 알아보자!", True, (28, 0, 0))
screen.blit(title, (120, 50))
screen.blit(subtitle, (170, 150))
pygame.draw.polygon(screen, BLACK, [[120, 400], [245.5, 200], [371, 400]], 5)
if scene_count < 3:
font = pygame.font.Font('font.ttf', 40)
text = font.render("다음과 같은 삼각형이 있습니다.", True, (28, 0, 0))
screen.blit(text, (50, 500))
elif scene_count < 6:
font = pygame.font.Font('font.ttf', 40)
text = font.render("삼각형의 변 길이는 다음과 같습니다.", True, (28, 0, 0))
screen.blit(text, (30, 500))
elif scene_count < 9:
font = pygame.font.Font('font.ttf', 40)
text = font.render("3", True, (28, 0, 0))
screen.blit(text, (250, 500))
elif scene_count < 10:
font = pygame.font.Font('font.ttf', 40)
text = font.render("3, 14", True, (28, 0, 0))
screen.blit(text, (250, 500))
elif scene_count < 13:
font = pygame.font.Font('font.ttf', 40)
text = font.render("3, 14, 15", True, (28, 0, 0))
screen.blit(text, (200, 500))
elif scene_count < 15:
font = pygame.font.Font('font.ttf', 40)
text = font.render("세 변의 합의 절반을 s라 하면", True, (28, 0, 0))
screen.blit(text, (70, 500))
elif scene_count < 18:
font = pygame.font.Font('font.ttf', 30)
text = font.render("넓이는 s(s-3)(s-14)(2-5)의 제곱근이 됩니다.", True, (28, 0, 0))
screen.blit(text, (70, 500))
elif scene_count < 21:
font = pygame.font.Font('font.ttf', 30)
text = font.render("계산 결과, 20.4가 넓이가 됨을 알 수 있습니다.", True, (28, 0, 0))
screen.blit(text, (40, 500))
elif scene_count < 23:
font = pygame.font.Font('font.ttf', 30)
text = font.render("일반화 시켜보면,", True, (28, 0, 0))
screen.blit(text, (200, 500))
elif scene_count < 26:
font = pygame.font.Font('font.ttf', 30)
text = font.render("변의 길이인 a, b, c로 이루어진 삼각형의 넓이는", True, (28, 0, 0))
screen.blit(text, (40, 500))
else:
font = pygame.font.Font('font.ttf', 30)
prev_text = font.render("변의 길이인 a, b, c로 이루어진 삼각형의 넓이는", True, (28, 0, 0))
screen.blit(prev_text, (40, 450))
font = pygame.font.Font('font.ttf', 40)
text = font.render("s(s-a)(s-b)(s-c)의 제곱근입니다", True, (28, 0, 0))
font = pygame.font.Font('font.ttf', 30)
subtext = font.render("(단, s = (a+b+c) / 2)", True, (28, 0, 0))
screen.blit(text, (40, 500))
screen.blit(subtext, (200, 550))
pygame.display.flip()
# Be IDLE friendly
pygame.quit()
| 35.92
| 84
| 0.578786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,069
| 0.277518
|
5a5bfad53218db468fff1b6bf7d577e4b9d5e32d
| 2,929
|
py
|
Python
|
pyte/ops/for_.py
|
Fuyukai/Pyte
|
7ef04938d80f8b646bd73d976ac9787a5b88edd9
|
[
"MIT"
] | 2
|
2020-01-10T22:08:38.000Z
|
2021-06-21T15:34:47.000Z
|
pyte/ops/for_.py
|
Fuyukai/Pyte
|
7ef04938d80f8b646bd73d976ac9787a5b88edd9
|
[
"MIT"
] | 6
|
2016-04-17T21:28:14.000Z
|
2016-08-24T02:14:01.000Z
|
pyte/ops/for_.py
|
SunDwarf/Pyte
|
7ef04938d80f8b646bd73d976ac9787a5b88edd9
|
[
"MIT"
] | null | null | null |
from pyte import tokens, util
from pyte.superclasses import _PyteAugmentedValidator, _PyteOp
from pyte.util import PY36
class FOR_LOOP(_PyteOp):
"""
Represents a for loop.
"""
def __init__(self, iterator: _PyteAugmentedValidator, body: list):
"""
Represents a for operator.
:param iterator: A :class:`.PyteAugmentedValidator` that represents the iterable.
:param body: A list of instructions to execute on each loop.
Parameters:
iterator: _PyteAugmentedValidator
This should be a saved value that is iterable, i.e a saved list or something.
body: list
A list of instructions to execute, similarly to IF.
"""
self.iterator = iterator
self._body = list(util.flatten(body))
def to_bytes_35(self, previous: bytes):
"""
A to-bytes specific to Python 3.5 and below.
"""
# Calculations ahead.
bc = b""
# Calculate the length of the iterator.
it_bc = util.generate_bytecode_from_obb(self.iterator, previous)
bc += it_bc
# Push a get_iter on.
bc += util.generate_bytecode_from_obb(tokens.GET_ITER, b"")
prev_len = len(previous) + len(bc)
# Calculate the bytecode for the body.
body_bc = b""
for op in self._body:
# Add padding bytes to the bytecode to allow if blocks to work.
padded_bc = previous
# Add padding for SETUP_LOOP
padded_bc += b"\x00\x00\x00"
padded_bc += bc
# Add padding for FOR_ITER
padded_bc += b"\x00\x00\x00"
# Add previous body
padded_bc += body_bc
body_bc += util.generate_bytecode_from_obb(op, padded_bc)
# Add a JUMP_ABSOLUTE
body_bc += util.generate_simple_call(tokens.JUMP_ABSOLUTE, prev_len + 3)
# Add a POP_TOP
body_bc += util.generate_bytecode_from_obb(tokens.POP_BLOCK, b"")
# Calculate the right lengths.
# Add a FOR_ITER, using len(body_bc)
body_bc = util.generate_simple_call(tokens.FOR_ITER, len(body_bc) - 1) + body_bc
# Add the SETUP_LOOP call
bc = util.generate_simple_call(tokens.SETUP_LOOP, prev_len + len(body_bc) - 6) + bc + body_bc
return bc
def to_bytes_36(self, previous: bytes):
"""
A to-bytes specific to Python 3.6 and above.
"""
# Calculations ahead.
bc = b""
# Calculate the length of the iterator.
it_bc = util.generate_bytecode_from_obb(self.iterator, previous)
bc += it_bc
bc += util.ensure_instruction(tokens.GET_ITER)
def to_bytes(self, previous: bytes):
# Python 3.6 has slightly different behaviour
if PY36:
return self.to_bytes_36(previous)
else:
return self.to_bytes_35(previous)
| 31.494624
| 101
| 0.609082
| 2,805
| 0.957665
| 0
| 0
| 0
| 0
| 0
| 0
| 1,170
| 0.399454
|
5a5cd7e8aa4acb388f0ef7bcdc817349add0a810
| 1,212
|
py
|
Python
|
web/hottubapi.py
|
pwschuurman/hottub_controller
|
be9faeabcaf9f5bb7aba3ec03eba60276b27cf80
|
[
"MIT"
] | 1
|
2020-06-03T18:32:50.000Z
|
2020-06-03T18:32:50.000Z
|
web/hottubapi.py
|
pwschuurman/hottub_controller
|
be9faeabcaf9f5bb7aba3ec03eba60276b27cf80
|
[
"MIT"
] | null | null | null |
web/hottubapi.py
|
pwschuurman/hottub_controller
|
be9faeabcaf9f5bb7aba3ec03eba60276b27cf80
|
[
"MIT"
] | null | null | null |
from gpioapi import GpioAPI
import rx
MAX_TEMP = 38
COOL_TEMP = 30
class HotTubAPI:
def __init__(self):
self.gpioapi = GpioAPI(None)
def transmissions(self):
return self.gpioapi.transmission_subject
def heat_up(self):
reached_max_temp = self.gpioapi.transmission_subject.pipe(
op.filter(lambda x: x.set_point() is not None and x.set_point() >= MAX_TEMP)
)
# Press the temp-up button until reached max temp
rx.interval(1.0).pipe(
op.timeout(15.0),
op.take_until(reached_max_temp)
).on_next(self.press_temp_up_button())
def cool_down(self):
reached_cool_temp = self.gpioapi.transmission_subject.pipe(
op.filter(lambda x: x.set_point() <= COOL_TEMP)
)
# Press the temp-up button until reached cool temp
rx.interval(1.0).pipe(
op.timeout(15.0),
op.take_until(reached_cool_temp)
).on_next(self.press_temp_down_button())
def press_light_button(self):
self.gpioapi.light_button.press()
def press_pump_button(self):
self.gpioapi.pump_button.press()
def press_temp_down_button(self):
self.gpioapi.temp_down_button.press()
def press_temp_up_button(self):
self.gpioapi.temp_up_button.press()
| 24.734694
| 82
| 0.710396
| 1,141
| 0.941419
| 0
| 0
| 0
| 0
| 0
| 0
| 99
| 0.081683
|
5a5e3e187f9834c9b5e31410232316fcaa6ec9f3
| 7,711
|
py
|
Python
|
src/biocluster_pipeline.py
|
zocean/Norma
|
4c45c1540f7d7d13f9b71a6772044d3772a451f8
|
[
"MIT"
] | 1
|
2020-02-17T22:59:46.000Z
|
2020-02-17T22:59:46.000Z
|
src/biocluster_pipeline.py
|
zocean/Norma
|
4c45c1540f7d7d13f9b71a6772044d3772a451f8
|
[
"MIT"
] | null | null | null |
src/biocluster_pipeline.py
|
zocean/Norma
|
4c45c1540f7d7d13f9b71a6772044d3772a451f8
|
[
"MIT"
] | 2
|
2020-02-24T02:54:04.000Z
|
2020-07-07T22:16:35.000Z
|
#!/usr/bin/python
# Programmer : Yang Zhang
# Contact: zocean636@gmail.com
# Last-modified: 24 Jan 2019 15:20:08
import os,sys,argparse
def parse_arg():
''' This Function Parse the Argument '''
p=argparse.ArgumentParser( description = 'Example: %(prog)s -h', epilog='Library dependency :')
p.add_argument('-v','--version',action='version',version='%(prog)s 0.1')
p.add_argument('--conf',type=str,dest="conf",help="configure file")
p.add_argument('--dry_run',dest="dry_run",action="store_true",help="set this parameter if just want to test environment. No real script will be procssed")
if len(sys.argv) < 2:
print p.print_help()
exit(1)
return p.parse_args()
class Run(object):
def __init__(self):
# global parameter
self.genome_size = None
self.bowtie2_index = None
self.wig2bigwig = None
self.norma = None
# tool specific parameter
self.bowtie_opt = None
self.norma_opt = None
# experiment specific parameter
self.exp_name = None
self.fastq_pulldown = None
self.fastq_input = None
self.label_pulldown = None
self.label_input = None
# output file
self.output_folder = None
self.out_bam_pulldown = None
self.out_bam_input = None
self.out_bowtie_log_pulldown = None
self.out_bowtie_log_input = None
self.out_bam_pulldown_rmdup = None
self.out_bam_input_rmdup = None
self.out_norma_output = None
self.out_norma_log = None
def build(self, conf):
# check required parameters
for parameter in ['genome_size', 'bowtie2_index', 'wig2bigwig', 'norma', 'exp_name', 'fastq_pulldown', 'fastq_input', 'label_pulldown', 'label_input', 'output_folder']:
if conf.get(parameter, None) is None:
print >>sys.stderr, "%s parameter not found" % (parameter)
exit(1)
# run initiation
self.genome_size = conf['genome_size']
self.bowtie2_index = conf['bowtie2_index']
self.wig2bigwig = conf['wig2bigwig']
self.norma = conf['norma']
if conf.get('bowtie_opt', None) is not None and conf['bowtie_opt'] != "":
self.bowtie_opt = conf['bowtie_opt']
if conf.get('norma_opt', None) is not None and conf['norma_opt'] != "":
self.norma_opt = conf['norma_opt']
self.exp_name = conf['exp_name']
self.fastq_pulldown = conf['fastq_pulldown'].split(',')
self.fastq_input = conf['fastq_input'].split(',')
self.label_pulldown = conf['label_pulldown']
self.label_input = conf['label_input']
# output
self.output_folder = conf['output_folder']
if not os.path.isdir(self.output_folder):
os.makedirs(self.output_folder)
self.out_bam_pulldown = os.path.join(self.output_folder, '%s.bam' % (self.label_pulldown))
self.out_bam_input = os.path.join(self.output_folder, '%s.bam' % (self.label_input))
self.out_log_bowtie_pulldown = os.path.join(self.output_folder, 'log_bowtie_%s.txt' % (self.label_pulldown))
self.out_log_bowtie_input = os.path.join(self.output_folder, 'log_bowtie_%s.txt' % (self.label_input))
self.out_bam_pulldown_rmdup = os.path.join(self.output_folder, '%s.rmdup.bam' % (self.label_pulldown))
self.out_bam_input_rmdup = os.path.join(self.output_folder, '%s.rmdup.bam' % (self.label_input))
self.out_norma_output = os.path.join(self.output_folder, self.exp_name)
self.out_log_norma = os.path.join(self.output_folder, 'log_norma_%s' % (self.exp_name))
def pipeline(self, dry_run = False):
#
print >>sys.stderr, "# Start Norma pipeline for experiment: %s" % (self.exp_name)
print >>sys.stderr, "# Step 1: Align the pulldown fastq to the reference genome"
cmd = self.__run_bowtie(self.bowtie2_index, self.fastq_pulldown, self.bowtie_opt, self.out_bam_pulldown, self.out_log_bowtie_pulldown)
if dry_run:
print >>sys.stderr, cmd
else:
os.system(cmd)
print >>sys.stderr, "# Step 1: Alignment done: check %s for running log" % (self.out_log_bowtie_pulldown)
print >>sys.stderr, ""
#
print >>sys.stderr, "# Step 2: PCR duplicates removal for pulldown"
cmd = self.__run_rmdup(self.out_bam_pulldown, self.out_bam_pulldown_rmdup)
if dry_run:
print >>sys.stderr, cmd
else:
os.system(cmd)
print >>sys.stderr, ""
#
print >>sys.stderr, "# Step 3: Align the input fastq to the reference genome"
cmd = self.__run_bowtie(self.bowtie2_index, self.fastq_input, self.bowtie_opt, self.out_bam_input, self.out_log_bowtie_input)
if dry_run:
print >>sys.stderr, cmd
else:
os.system(cmd)
print >>sys.stderr, "# Step 3: Aligment done: check %s for running log" % (self.out_log_bowtie_input)
print >>sys.stderr, ""
#
print >>sys.stderr, "# Step 4: PCR duplicates removal for input"
cmd = self.__run_rmdup(self.out_bam_input, self.out_bam_input_rmdup)
if dry_run:
print >>sys.stderr, cmd
else:
os.system(cmd)
print >>sys.stderr, ""
#
print >>sys.stderr, "# Step 5: Run Norma to get the TSA-seq signal"
cmd = self.__run_norma(self.norma, self.out_bam_pulldown_rmdup, self.out_bam_input_rmdup, self.out_norma_output, self.out_log_norma, self.norma_opt, self.wig2bigwig, self.genome_size)
if dry_run:
print >>sys.stderr, cmd
else:
os.system(cmd)
print >>sys.stderr, "# Step 5: Norma done: check %s for running log" % (self.out_log_norma)
def __run_bowtie(self, genome_index, fastq_list, other_opt, output_file, log_file):
if other_opt is not None:
cmd = "bowtie2 %s -x %s -U %s 2>%s | samtools view -S -bh - | samtools sort -o %s" % (other_opt, genome_index, ' '.join(fastq_list), log_file, output_file)
else:
cmd = "bowtie2 -x %s -U %s 2>%s | samtools view -bS " % (genome_index, ' '.join(fastq_list), log_file, output_file)
cmd += '\n' + "samtools index %s" % (output_file)
return cmd
def __run_rmdup(self, input_bam, output_bam):
cmd = "samtools rmdup -s %s %s" % (input_bam, output_bam)
cmd += '\n' + "samtools index %s" % (output_bam)
return cmd
def __run_norma(self, norma_script, pulldown_bam, input_bam, output, log, other_opt, wig2bigiwg, genome_size):
if other_opt is not None:
cmd = "%s %s -g %s -e %s -c %s --wig2bw %s -o %s 2>&1 >%s" % (norma_script, other_opt, genome_size, pulldown_bam, input_bam, wig2bigiwg, output, log)
else:
cmd = "%s -g %s -e %s -c %s --wig2bw %s -o %s 2>&1 >%s" % (norma_script, other_opt, genome_size, pulldown_bam, input_bam, wig2bigiwg, output, log)
return cmd
def parse_conf(filename):
fin = open(filename, 'r')
table = {}
for line in fin:
if line.strip().startswith('#') or line.strip() == '':
continue
row = line.strip().split('=')
table[row[0].strip()] = row[1].strip()
fin.close()
return table
def main():
global args
args = parse_arg()
# parse the configure table
conf = parse_conf(args.conf)
print >>sys.stderr, "# parse parameters done"
# build Run
TSA_seq_run = Run()
TSA_seq_run.build(conf)
print >>sys.stderr, "# build run done"
# run
print >>sys.stderr, "# run pipeline"
TSA_seq_run.pipeline(args.dry_run)
if __name__=="__main__":
main()
| 46.451807
| 191
| 0.625989
| 6,299
| 0.816885
| 0
| 0
| 0
| 0
| 0
| 0
| 1,865
| 0.241862
|
5a5e3edccfdfe1e9cbd18ca904e258b6b8bd5b04
| 5,404
|
py
|
Python
|
env/lib/python3.5/site-packages/cartopy/tests/test_shapereader.py
|
project-pantheon/pantheon_glob_planner
|
c0d50a53b36c4678192ec75ad7a4cd68c570daef
|
[
"BSD-3-Clause"
] | null | null | null |
env/lib/python3.5/site-packages/cartopy/tests/test_shapereader.py
|
project-pantheon/pantheon_glob_planner
|
c0d50a53b36c4678192ec75ad7a4cd68c570daef
|
[
"BSD-3-Clause"
] | null | null | null |
env/lib/python3.5/site-packages/cartopy/tests/test_shapereader.py
|
project-pantheon/pantheon_glob_planner
|
c0d50a53b36c4678192ec75ad7a4cd68c570daef
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import os.path
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
import cartopy.io.shapereader as shp
@pytest.mark.natural_earth
class TestLakes(object):
def setup_class(self):
LAKES_PATH = os.path.join(os.path.dirname(__file__),
'lakes_shapefile', 'ne_110m_lakes.shp')
self.reader = shp.Reader(LAKES_PATH)
names = [record.attributes['name'] for record in self.reader.records()]
# Choose a nice small lake
print([name for name in names if 'Nicaragua' in name])
self.lake_name = 'Lago de\rNicaragua'
self.lake_index = names.index(self.lake_name)
self.test_lake_geometry = \
list(self.reader.geometries())[self.lake_index]
self.test_lake_record = list(self.reader.records())[self.lake_index]
def test_geometry(self):
lake_geometry = self.test_lake_geometry
assert lake_geometry.type == 'MultiPolygon'
assert len(lake_geometry) == 1
polygon = lake_geometry[0]
expected = np.array([(-84.85548682324658, 11.147898667846633),
(-85.29013729525353, 11.176165676310276),
(-85.79132117383625, 11.509737046754324),
(-85.8851655748783, 11.900100816287136),
(-85.5653401354239, 11.940330918826362),
(-85.03684526237491, 11.5216484643976),
(-84.85548682324658, 11.147898667846633),
(-84.85548682324658, 11.147898667846633)])
assert_array_almost_equal(expected, polygon.exterior.coords)
assert len(polygon.interiors) == 0
def test_record(self):
lake_record = self.test_lake_record
assert lake_record.attributes.get('name') == self.lake_name
expected = sorted(['admin', 'featurecla', 'min_label', 'min_zoom',
'name', 'name_alt', 'scalerank'])
actual = sorted(lake_record.attributes.keys())
assert actual == expected
assert lake_record.geometry == self.test_lake_geometry
def test_bounds(self):
# tests that a file which has a record with a bbox can
# use the bbox without first creating the geometry
record = next(self.reader.records())
assert not record._geometry, \
'The geometry was loaded before it was needed.'
assert len(record._bounds) == 4
assert record._bounds == record.bounds
assert not record._geometry, \
'The geometry was loaded in order to create the bounds.'
@pytest.mark.natural_earth
class TestRivers(object):
def setup_class(self):
RIVERS_PATH = shp.natural_earth(resolution='110m',
category='physical',
name='rivers_lake_centerlines')
self.reader = shp.Reader(RIVERS_PATH)
names = [record.attributes['name'] for record in self.reader.records()]
# Choose a nice small river
self.river_name = 'Peace'
self.river_index = names.index(self.river_name)
self.test_river_geometry = \
list(self.reader.geometries())[self.river_index]
self.test_river_record = list(self.reader.records())[self.river_index]
def test_geometry(self):
geometry = self.test_river_geometry
assert geometry.type == 'MultiLineString'
assert len(geometry) == 1
linestring = geometry[0]
coords = linestring.coords
assert round(abs(coords[0][0] - -124.83563045947423), 7) == 0
assert round(abs(coords[0][1] - 56.75692352968272), 7) == 0
assert round(abs(coords[1][0] - -124.20045039940291), 7) == 0
assert round(abs(coords[1][1] - 56.243492336646824), 7) == 0
def test_record(self):
records = list(self.reader.records())
assert len(records) == len(self.reader)
# Choose a nice small lake
river_record = records[self.river_index]
expected_attributes = {'featurecla': 'River',
'min_label': 3.1,
'min_zoom': 2.1,
'name': self.river_name,
'name_en': self.river_name,
'scalerank': 2}
for key, value in river_record.attributes.items():
if key in expected_attributes:
assert value == expected_attributes[key]
assert river_record.geometry == self.test_river_geometry
| 42.21875
| 79
| 0.622687
| 4,405
| 0.815137
| 0
| 0
| 4,459
| 0.82513
| 0
| 0
| 1,298
| 0.240192
|
5a5f41145e46fd5342cd880863fcd045e36493b6
| 268
|
py
|
Python
|
inmembrane/plugins/__init__.py
|
pansapiens/inmembrane
|
382eee3b2bacc9c567f65d7c48f1ddf9a86c253c
|
[
"BSD-2-Clause"
] | 4
|
2015-03-09T02:08:34.000Z
|
2021-02-06T13:52:21.000Z
|
inmembrane/plugins/__init__.py
|
pansapiens/inmembrane
|
382eee3b2bacc9c567f65d7c48f1ddf9a86c253c
|
[
"BSD-2-Clause"
] | 5
|
2015-01-29T03:36:04.000Z
|
2021-12-08T07:20:42.000Z
|
inmembrane/plugins/__init__.py
|
pansapiens/inmembrane
|
382eee3b2bacc9c567f65d7c48f1ddf9a86c253c
|
[
"BSD-2-Clause"
] | 6
|
2015-03-09T02:08:43.000Z
|
2021-06-07T17:33:16.000Z
|
# This little bit of magic fills the __all__ list
# with every plugin name, and means that calling:
# from plugins import *
# within inmembrane.py will import every plugin
import pkgutil
__all__ = []
for p in pkgutil.iter_modules(__path__):
__all__.append(p[1])
| 26.8
| 50
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 169
| 0.630597
|
5a6488350ce9cd310eada5196eabccb1e9f79524
| 1,984
|
py
|
Python
|
dvc/output/__init__.py
|
amjadsaadeh/dvc
|
f405168619c2bb85430c4ded2585b57ebfd01bd7
|
[
"Apache-2.0"
] | null | null | null |
dvc/output/__init__.py
|
amjadsaadeh/dvc
|
f405168619c2bb85430c4ded2585b57ebfd01bd7
|
[
"Apache-2.0"
] | null | null | null |
dvc/output/__init__.py
|
amjadsaadeh/dvc
|
f405168619c2bb85430c4ded2585b57ebfd01bd7
|
[
"Apache-2.0"
] | null | null | null |
import schema
from dvc.exceptions import DvcException
from dvc.config import Config
from dvc.dependency import SCHEMA, urlparse
from dvc.dependency.base import DependencyBase
from dvc.output.s3 import OutputS3
from dvc.output.gs import OutputGS
from dvc.output.local import OutputLOCAL
from dvc.output.hdfs import OutputHDFS
from dvc.output.ssh import OutputSSH
from dvc.remote import Remote
OUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]
OUTS_MAP = {'hdfs': OutputHDFS,
's3': OutputS3,
'gs': OutputGS,
'ssh': OutputSSH,
'': OutputLOCAL}
SCHEMA[schema.Optional(OutputLOCAL.PARAM_CACHE)] = bool
SCHEMA[schema.Optional(OutputLOCAL.PARAM_METRIC)] = OutputLOCAL.METRIC_SCHEMA
def _get(stage, p, info, cache, metric):
parsed = urlparse(p)
if parsed.scheme == 'remote':
name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)
sect = stage.project.config._config[name]
remote = Remote(stage.project, sect)
return OUTS_MAP[remote.scheme](stage,
p,
info,
cache=cache,
remote=remote,
metric=metric)
for o in OUTS:
if o.supported(p):
return o(stage, p, info, cache=cache, remote=None, metric=metric)
raise DvcException('Output \'{}\' is not supported'.format(p))
def loadd_from(stage, d_list):
ret = []
for d in d_list:
p = d.pop(DependencyBase.PARAM_PATH)
cache = d.pop(OutputLOCAL.PARAM_CACHE, True)
metric = d.pop(OutputLOCAL.PARAM_METRIC, False)
ret.append(_get(stage, p, info=d, cache=cache, metric=metric))
return ret
def loads_from(stage, s_list, use_cache=True, metric=False):
ret = []
for s in s_list:
ret.append(_get(stage, s, info={}, cache=use_cache, metric=metric))
return ret
| 31.492063
| 77
| 0.621472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.030746
|
5a65af496e71e8ad9c61c888ed0b5d903da6928e
| 343
|
py
|
Python
|
company_logo.py
|
DomirScire/HackerRank_answers
|
0432185a472aeae7062cf4e406d0e7a5ed2cc979
|
[
"MIT"
] | 1
|
2021-03-19T13:05:16.000Z
|
2021-03-19T13:05:16.000Z
|
company_logo.py
|
DomirScire/HackerRank_answers
|
0432185a472aeae7062cf4e406d0e7a5ed2cc979
|
[
"MIT"
] | null | null | null |
company_logo.py
|
DomirScire/HackerRank_answers
|
0432185a472aeae7062cf4e406d0e7a5ed2cc979
|
[
"MIT"
] | null | null | null |
# DomirScire
import math
import os
import random
import re
import sys
import collections
if __name__ == '__main__':
s = sorted(input().strip())
s_counter = collections.Counter(s).most_common()
s_counter = sorted(s_counter, key=lambda x: (x[1] * -1, x[0]))
for i in range(0, 3):
print(s_counter[i][0], s_counter[i][1])
| 22.866667
| 66
| 0.661808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.06414
|
5a65da8fa8ec5fbb64d2b18d96b4bb40c2a9a8c1
| 2,600
|
py
|
Python
|
ltr/models/loss/kl_regression.py
|
Jee-King/ICCV2021_Event_Frame_Tracking
|
ea86cdd331748864ffaba35f5efbb3f2a02cdb03
|
[
"MIT"
] | 15
|
2021-08-31T13:32:12.000Z
|
2022-03-24T01:55:41.000Z
|
ltr/models/loss/kl_regression.py
|
Jee-King/ICCV2021_Event_Frame_Tracking
|
ea86cdd331748864ffaba35f5efbb3f2a02cdb03
|
[
"MIT"
] | 2
|
2022-01-13T12:53:29.000Z
|
2022-03-31T08:14:42.000Z
|
ltr/models/loss/kl_regression.py
|
Jee-King/ICCV2021_Event_Frame_Tracking
|
ea86cdd331748864ffaba35f5efbb3f2a02cdb03
|
[
"MIT"
] | 2
|
2021-11-08T16:27:16.000Z
|
2021-12-08T14:24:27.000Z
|
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
class KLRegression(nn.Module):
"""KL-divergence loss for probabilistic regression.
It is computed using Monte Carlo (MC) samples from an arbitrary distribution."""
def __init__(self, eps=0.0):
super().__init__()
self.eps = eps
def forward(self, scores, sample_density, gt_density, mc_dim=-1):
"""Args:
scores: predicted score values
sample_density: probability density of the sample distribution
gt_density: probability density of the ground truth distribution
mc_dim: dimension of the MC samples"""
exp_val = scores - torch.log(sample_density + self.eps)
L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim]) - \
torch.mean(scores * (gt_density / (sample_density + self.eps)), dim=mc_dim)
return L.mean()
class MLRegression(nn.Module):
"""Maximum likelihood loss for probabilistic regression.
It is computed using Monte Carlo (MC) samples from an arbitrary distribution."""
def __init__(self, eps=0.0):
super().__init__()
self.eps = eps
def forward(self, scores, sample_density, gt_density=None, mc_dim=-1):
"""Args:
scores: predicted score values. First sample must be ground-truth
sample_density: probability density of the sample distribution
gt_density: not used
mc_dim: dimension of the MC samples. Only mc_dim=1 supported"""
assert mc_dim == 1
assert (sample_density[:,0,...] == -1).all()
exp_val = scores[:, 1:, ...] - torch.log(sample_density[:, 1:, ...] + self.eps)
L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim] - 1) - scores[:, 0, ...]
loss = L.mean()
return loss
class KLRegressionGrid(nn.Module):
"""KL-divergence loss for probabilistic regression.
It is computed using the grid integration strategy."""
def forward(self, scores, gt_density, grid_dim=-1, grid_scale=1.0):
"""Args:
scores: predicted score values
gt_density: probability density of the ground truth distribution
grid_dim: dimension(s) of the grid
grid_scale: area of one grid cell"""
score_corr = grid_scale * torch.sum(scores * gt_density, dim=grid_dim)
L = torch.logsumexp(scores, dim=grid_dim) + math.log(grid_scale) - score_corr
return L.mean()
| 36.619718
| 106
| 0.627308
| 2,494
| 0.959231
| 0
| 0
| 0
| 0
| 0
| 0
| 1,150
| 0.442308
|
5a6600ba347d74c16e50529d4d48201c7ed9b11e
| 2,478
|
py
|
Python
|
custom/mixins.py
|
luoyangC/django_template
|
e2fec854e2ba028b1d1981053b5398c21b9f9a25
|
[
"Apache-2.0"
] | null | null | null |
custom/mixins.py
|
luoyangC/django_template
|
e2fec854e2ba028b1d1981053b5398c21b9f9a25
|
[
"Apache-2.0"
] | 8
|
2020-06-05T22:21:55.000Z
|
2021-09-22T18:50:27.000Z
|
custom/mixins.py
|
luoyangC/django_template
|
e2fec854e2ba028b1d1981053b5398c21b9f9a25
|
[
"Apache-2.0"
] | null | null | null |
"""
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from rest_framework import status
from rest_framework import mixins
from custom.response import JsonResponse
class CreateModelMixin(mixins.CreateModelMixin):
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return JsonResponse(data=serializer.data, status=status.HTTP_200_OK, headers=headers)
class ListModelMixin(mixins.ListModelMixin):
"""
List a queryset.
"""
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return JsonResponse(data=serializer.data, status=status.HTTP_200_OK)
class RetrieveModelMixin(mixins.RetrieveModelMixin):
"""
Retrieve a model instance.
"""
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return JsonResponse(data=serializer.data, status=status.HTTP_200_OK)
class UpdateModelMixin(mixins.UpdateModelMixin):
"""
Update a model instance.
"""
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
return JsonResponse(data=serializer.data, status=status.HTTP_200_OK)
class DestroyModelMixin(mixins.DestroyModelMixin):
"""
Destroy a model instance.
"""
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return JsonResponse(data=None, status=status.HTTP_200_OK)
| 33.04
| 93
| 0.700565
| 2,175
| 0.877724
| 0
| 0
| 0
| 0
| 0
| 0
| 408
| 0.164649
|
5a66649d8c1a6d7c9c60e1d964b3f1eb9d459b10
| 893
|
py
|
Python
|
rl_trainer/algo/network.py
|
jidiai/Competition_Olympics-Curling
|
a3f1e1316a9e9a060bcca623aff2004878c50c78
|
[
"MIT"
] | 7
|
2022-02-01T14:45:03.000Z
|
2022-02-28T08:21:13.000Z
|
rl_trainer/algo/network.py
|
jidiai/Competition_Olympics-Curling
|
a3f1e1316a9e9a060bcca623aff2004878c50c78
|
[
"MIT"
] | 1
|
2022-02-19T15:03:56.000Z
|
2022-02-25T08:59:22.000Z
|
rl_trainer/algo/network.py
|
jidiai/Competition_Olympics-Curling
|
a3f1e1316a9e9a060bcca623aff2004878c50c78
|
[
"MIT"
] | 5
|
2022-02-08T14:16:12.000Z
|
2022-03-08T01:56:37.000Z
|
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class Actor(nn.Module):
def __init__(self, state_space, action_space, hidden_size=64):
super(Actor, self).__init__()
self.linear_in = nn.Linear(state_space, hidden_size)
self.action_head = nn.Linear(hidden_size, action_space)
def forward(self, x):
x = F.relu(self.linear_in(x))
action_prob = F.softmax(self.action_head(x), dim=1)
return action_prob
class Critic(nn.Module):
def __init__(self, state_space, hidden_size=64):
super(Critic, self).__init__()
self.linear_in = nn.Linear(state_space, hidden_size)
self.state_value = nn.Linear(hidden_size, 1)
def forward(self, x):
x = F.relu(self.linear_in(x))
value = self.state_value(x)
return value
| 28.806452
| 66
| 0.666293
| 757
| 0.847704
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.012318
|
5a6742060ae0c9724845b125a09501149114e4ca
| 7,284
|
py
|
Python
|
digesters/hipchat/hipchat_notification_digester.py
|
paul-hammant/imapdigester
|
7d2d9525d39b1f3f839a219061180971404e4bb8
|
[
"MIT"
] | 25
|
2016-04-04T17:32:47.000Z
|
2022-03-08T02:18:07.000Z
|
digesters/hipchat/hipchat_notification_digester.py
|
paul-hammant/imapslurper
|
7d2d9525d39b1f3f839a219061180971404e4bb8
|
[
"MIT"
] | null | null | null |
digesters/hipchat/hipchat_notification_digester.py
|
paul-hammant/imapslurper
|
7d2d9525d39b1f3f839a219061180971404e4bb8
|
[
"MIT"
] | 4
|
2017-01-02T21:03:28.000Z
|
2022-02-22T18:38:44.000Z
|
# coding=utf-8
import arrow
from bs4 import BeautifulSoup
from digesters.base_digester import BaseDigester
TEMPLATE = """<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type"/>
<title>Atlassian HipChat</title>
</head>
<body style="box-sizing: border-box; height: 100%; width: 100%;">
<table bgcolor="#f5f5f5" border="0" cellpadding="0" cellspacing="0" class="container wrapper_shrink"
style="_padding: 20px; padding: 3%;" width="640">
<tr>
<td valign="top">
<table bgcolor="#ffffff" border="0" cellpadding="0" cellspacing="0" class="inner-container table_shrink"
id="email_content"
style="-khtml-border-radius: 6px; -moz-border-radius: 6px; -webkit-border-radius: 6px; border: 1px solid #dadada; border-radius: 6px; width: 100% !important; margin-top: 15px;"
width="600">
<tr>
<td class="td top-spacer"
style="font-size: 15px; line-height: 4px; padding-left: 20px; padding-right: 10px !important;"
valign="top">
</td>
</tr>
<tr>
<td>
<div class="history_container history_email" id="chats" style="padding-right: 0px !important;">
<InsertHere/>
</div>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>"""
class HipchatNotificationDigester(BaseDigester):
def __init__(self, store_writer):
super(HipchatNotificationDigester, self).__init__()
self.store_writer = store_writer
self.new_message_count = 0
self.new_articles = 0
self.hc_notifications = self.store_writer.get_from_binary("hipchat-notifications")
if self.hc_notifications is None:
self.hc_notifications = {}
self.most_recently_seen = self.store_writer.get_from_binary("most-recently-seen")
if self.most_recently_seen is None:
self.most_recently_seen = 0
self.new_notifications = {}
self.previously_notified_article_count = len(self.hc_notifications)
if self.previously_notified_article_count > 0:
self.previously_notified_article_most_recent = max(self.hc_notifications)
else:
self.previously_notified_article_most_recent = 0
def process_new_notification(self, rfc822content, msg, html_message, text_message):
self.new_message_count += 1
subject = msg['Subject']
if "sent you a 1-1 message" in subject:
room = "Direct Message"
else:
room = "Room: " + subject.split('"')[1]
when = arrow.get(msg['Date'].split(',', 1)[1].strip(), 'D MMM YYYY HH:mm:ss ZZ').timestamp
if html_message:
soup = BeautifulSoup(html_message, 'html.parser')
div = soup.find("div", {"id": "chats"}).find("div")
self.hc_notifications[when] = {
"room": room,
"div": str(div)
}
return True
return False
def rewrite_digest_emails(self, digest_folder_proxy, has_previous_message, previously_seen, sender_to_implicate):
if self.previously_notified_article_count == len(self.hc_notifications):
return
# Deleted email (by the user) means they don't want to see THOSE notifications listed in a Digest again.
if has_previous_message == False:
self.hc_notifications = {}
if has_previous_message == False:
if self.previously_notified_article_count > 0:
self.most_recently_seen = self.previously_notified_article_most_recent
template_end, template_start = self.get_template_start_and_end(TEMPLATE)
past_bookmark = 0
unseen = 0
for when in sorted(iter(self.hc_notifications.keys()), reverse=True):
mostRecentSeen = self.most_recently_seen
if when < mostRecentSeen:
past_bookmark += 1
else:
unseen += 1
if past_bookmark > 30: # only show thirty after the bookmark
self.hc_notifications.pop(when, None)
email_html = self.make_html_payload(template_end, template_start, self.hc_notifications).replace("<br/>","")
# Delete previous email, and write replacement
if has_previous_message:
digest_folder_proxy.delete_previous_message()
digest_folder_proxy.append(self.make_new_raw_so_email(email_html, unseen, sender_to_implicate))
# Save
self.store_writer.store_as_binary("hipchat-notifications", self.hc_notifications)
self.store_writer.store_as_binary("most-recently-seen", self.most_recently_seen)
def matching_incoming_headers(self):
return ["From: HipChat <donotreply@hipchat.com>"]
def matching_digest_subject(self):
return 'Notification Digest'
def matching_digest_sender(self):
return "HipChat"
def print_summary(self):
print("Hipchat: New HipChat notifications: " + str(self.new_message_count))
def get_template_start_and_end(self, template):
template_start = template[:template.find("<InsertHere/>")]
template_end = template[template.find("<InsertHere/>") + len("<InsertHere/>"):]
return template_end, template_start
def make_html_payload(self, template_end, template_start, hc_notifications):
email_html = template_start
ix = 0
for anum in sorted(iter(hc_notifications.keys()), reverse=True):
if anum == self.most_recently_seen and ix > 0:
email_html += '<div style="border-bottom: 1.5pt solid red; border-top: 1.5pt solid red;"><center>^ New Questions Since You Last Checked ^</center></div>\n'
email_html += '<div class="ecxhc-chat-from" style="margin-left: 150px;text-align:left;width:200px;padding:10px 0 10px 10px;">' + hc_notifications[anum]["room"] + '</div>\n'
email_html += "<div>\n" + hc_notifications[anum]["div"] + "</div>\n"
ix = + 1
email_html += template_end
return email_html
def make_new_raw_so_email(self, email_html, count, sender_to_implicate):
new_message = 'Subject: ' + self.matching_digest_subject() + ": " + str(count) + ' new notification(s)\n'
new_message += 'From: \"HipChat\" <' + sender_to_implicate + '>\n'
new_message += 'Content-Transfer-Encoding: 8bit\n'
new_message += 'Content-Type: multipart/alternative; boundary="---NOTIFICATION_BOUNDARY' \
+ self.notification_boundary_rand + '"\n'
new_message += 'MIME-Version: 1.0\n'
new_message += 'This is a multi-part message in MIME format.\n'
new_message += '-----NOTIFICATION_BOUNDARY' + self.notification_boundary_rand \
+ '\nContent-Type: text/html; charset="utf-8"\n'
new_message += 'Content-Transfer-Encoding: 8bit\n\n'
new_message += email_html.replace("\n\n\n", "\n").replace("\n\n", "\n")
new_message += '\n\n-----NOTIFICATION_BOUNDARY' + self.notification_boundary_rand
return new_message
| 42.596491
| 195
| 0.623009
| 5,750
| 0.789401
| 0
| 0
| 0
| 0
| 0
| 0
| 2,706
| 0.371499
|
5a6831d8ec7d93dd05d620a6d41fce88e4531158
| 138
|
py
|
Python
|
FB2/__init__.py
|
Ae-Mc/FB2
|
2c29f774ab08bdad5bd6144b1be71b93146ce8fe
|
[
"MIT"
] | 3
|
2020-11-15T10:55:22.000Z
|
2022-02-09T19:45:52.000Z
|
FB2/__init__.py
|
Ae-Mc/FB2
|
2c29f774ab08bdad5bd6144b1be71b93146ce8fe
|
[
"MIT"
] | 1
|
2020-11-15T11:04:59.000Z
|
2020-11-19T22:12:52.000Z
|
FB2/__init__.py
|
Ae-Mc/FB2
|
2c29f774ab08bdad5bd6144b1be71b93146ce8fe
|
[
"MIT"
] | null | null | null |
from .FictionBook2 import FictionBook2
from .Author import Author
from .TitleInfo import TitleInfo
from .DocumentInfo import DocumentInfo
| 27.6
| 38
| 0.855072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5a683a89ea393148d4edd0bc84134016995c858d
| 374
|
py
|
Python
|
runserver.py
|
chintal/tendril-monitor-vendor
|
af7577bd88b3d35e09a733607555d5d10e1cd9c7
|
[
"MIT"
] | null | null | null |
runserver.py
|
chintal/tendril-monitor-vendor
|
af7577bd88b3d35e09a733607555d5d10e1cd9c7
|
[
"MIT"
] | null | null | null |
runserver.py
|
chintal/tendril-monitor-vendor
|
af7577bd88b3d35e09a733607555d5d10e1cd9c7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# Copyright (C) 2015 Chintalagiri Shashank
# Released under the MIT license.
"""
Simple Deployment Example
-------------------------
"""
from vendor_monitor import worker
from twisted.internet import reactor
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
worker.start()
reactor.run()
| 16.26087
| 42
| 0.68984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 184
| 0.491979
|
5a697644fbf259cd8f3bc1346fab09736144290b
| 3,746
|
py
|
Python
|
yt/frontends/ytdata/tests/test_unit.py
|
tukss/yt
|
8bf6fce609cad3d4b291ebd94667019ab2e18377
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-09-15T08:17:43.000Z
|
2021-09-15T08:17:43.000Z
|
yt/frontends/ytdata/tests/test_unit.py
|
tukss/yt
|
8bf6fce609cad3d4b291ebd94667019ab2e18377
|
[
"BSD-3-Clause-Clear"
] | 8
|
2020-04-02T16:51:49.000Z
|
2022-01-11T14:12:44.000Z
|
yt/frontends/ytdata/tests/test_unit.py
|
stonnes/yt
|
aad3cfa3b4ebab7838352ab467275a27c26ff363
|
[
"BSD-3-Clause-Clear"
] | 2
|
2020-08-12T15:46:11.000Z
|
2021-02-09T13:09:17.000Z
|
import os
import shutil
import tempfile
import numpy as np
from yt.loaders import load, load_uniform_grid
from yt.testing import (
assert_array_equal,
assert_fname,
fake_random_ds,
requires_file,
requires_module,
)
from yt.utilities.answer_testing.framework import data_dir_load
from yt.visualization.plot_window import ProjectionPlot, SlicePlot
ytdata_dir = "ytdata_test"
@requires_module("h5py")
@requires_file(os.path.join(ytdata_dir, "slice.h5"))
@requires_file(os.path.join(ytdata_dir, "proj.h5"))
@requires_file(os.path.join(ytdata_dir, "oas.h5"))
def test_old_plot_data():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
fn = "slice.h5"
full_fn = os.path.join(ytdata_dir, fn)
ds_slice = data_dir_load(full_fn)
p = SlicePlot(ds_slice, "z", "density")
fn = p.save()
assert_fname(fn[0])
fn = "proj.h5"
full_fn = os.path.join(ytdata_dir, fn)
ds_proj = data_dir_load(full_fn)
p = ProjectionPlot(ds_proj, "z", "density")
fn = p.save()
assert_fname(fn[0])
fn = "oas.h5"
full_fn = os.path.join(ytdata_dir, fn)
ds_oas = data_dir_load(full_fn)
p = SlicePlot(ds_oas, [1, 1, 1], "density")
fn = p.save()
assert_fname(fn[0])
os.chdir(curdir)
shutil.rmtree(tmpdir)
@requires_module("h5py")
def test_plot_data():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
ds = fake_random_ds(16)
plot = SlicePlot(ds, "z", "density")
fn = plot.data_source.save_as_dataset("slice.h5")
ds_slice = load(fn)
p = SlicePlot(ds_slice, "z", "density")
fn = p.save()
assert_fname(fn[0])
plot = ProjectionPlot(ds, "z", "density")
fn = plot.data_source.save_as_dataset("proj.h5")
ds_proj = load(fn)
p = ProjectionPlot(ds_proj, "z", "density")
fn = p.save()
assert_fname(fn[0])
plot = SlicePlot(ds, [1, 1, 1], "density")
fn = plot.data_source.save_as_dataset("oas.h5")
ds_oas = load(fn)
p = SlicePlot(ds_oas, [1, 1, 1], "density")
fn = p.save()
assert_fname(fn[0])
os.chdir(curdir)
if tmpdir != ".":
shutil.rmtree(tmpdir)
@requires_module("h5py")
def test_non_square_frb():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
# construct an arbitrary dataset
arr = np.arange(8.0 * 9.0 * 10.0).reshape((8, 9, 10))
data = dict(density=(arr, "g/cm**3"))
bbox = np.array([[-4, 4.0], [-4.5, 4.5], [-5.0, 5]])
ds = load_uniform_grid(
data, arr.shape, length_unit="Mpc", bbox=bbox, periodicity=(False, False, False)
)
# make a slice
slc = ds.slice(axis="z", coord=ds.quan(0.0, "code_length"))
# make a frb and save it to disk
center = (ds.quan(0.0, "code_length"), ds.quan(0.0, "code_length"))
xax, yax = ds.coordinates.x_axis[slc.axis], ds.coordinates.y_axis[slc.axis]
res = [ds.domain_dimensions[xax], ds.domain_dimensions[yax]] # = [8,9]
width = ds.domain_right_edge[xax] - ds.domain_left_edge[xax] # = 8 code_length
height = ds.domain_right_edge[yax] - ds.domain_left_edge[yax] # = 9 code_length
frb = slc.to_frb(width=width, height=height, resolution=res, center=center)
fname = "test_frb_roundtrip.h5"
frb.save_as_dataset(fname, fields=["density"])
expected_vals = arr[:, :, 5].T
print(
"\nConfirmation that initial frb results are expected:",
(expected_vals == frb["density"].v).all(),
"\n",
)
# yt-reload:
reloaded_ds = load(fname)
assert_array_equal(frb["density"].shape, reloaded_ds.data["density"].shape)
assert_array_equal(frb["density"], reloaded_ds.data["density"])
os.chdir(curdir)
if tmpdir != ".":
shutil.rmtree(tmpdir)
| 28.815385
| 88
| 0.644688
| 0
| 0
| 0
| 0
| 3,341
| 0.891885
| 0
| 0
| 542
| 0.144688
|
5a6985ea52c126cdfc4394e0251917377b3471a6
| 10,580
|
py
|
Python
|
openmdao.lib/src/openmdao/lib/drivers/test/test_opt_genetic.py
|
mjfwest/OpenMDAO-Framework
|
a5521f47ad7686c25b203de74e1c7dff5fd7a52b
|
[
"Apache-2.0"
] | 69
|
2015-01-02T19:10:08.000Z
|
2021-11-14T04:42:28.000Z
|
openmdao.lib/src/openmdao/lib/drivers/test/test_opt_genetic.py
|
jcchin/OpenMDAO-Framework
|
038e89b06da1c74f00918f4c6fbd8bd365e25657
|
[
"Apache-2.0"
] | 3
|
2015-01-15T23:08:18.000Z
|
2015-03-11T16:57:35.000Z
|
openmdao.lib/src/openmdao/lib/drivers/test/test_opt_genetic.py
|
jcchin/OpenMDAO-Framework
|
038e89b06da1c74f00918f4c6fbd8bd365e25657
|
[
"Apache-2.0"
] | 31
|
2015-09-16T00:37:35.000Z
|
2022-01-10T06:27:55.000Z
|
"""
Test the genetic optimizer driver
"""
import unittest
import random
from openmdao.main.datatypes.api import Float, Array, Enum, Int, Str
from pyevolve import Selectors
from openmdao.main.api import Assembly, Component, set_as_top, Driver
from openmdao.lib.drivers.genetic import Genetic
# pylint: disable-msg=E1101
class SphereFunction(Component):
total = Float(0., iotype='out')
x = Float(0, low=-5.12, high=5.13, iotype="in")
y = Enum([-10, 0, 1, 2, 3, 4, 5], iotype="in")
z = Int(0, low=-5, high=5, iotype="in")
def __init__(self):
super(SphereFunction, self).__init__()
def execute(self):
""" calculate the sume of the squares for the list of numbers """
self.total = self.x**2+self.y**2+self.z**2
class Asmb(Assembly):
def configure(self):
self.add('sphere', SphereFunction())
self.driver.workflow.add('sphere')
self.create_passthrough('sphere.x')
self.create_passthrough('sphere.y')
self.create_passthrough('sphere.z')
self.create_passthrough('sphere.total')
class SphereFunctionArray(Component):
total = Float(0., iotype='out')
x = Array([0.0, 0.0, 0.0], iotype="in")
def __init__(self):
super(SphereFunctionArray, self).__init__()
def execute(self):
""" calculate the sume of the squares for the list of numbers """
self.total = self.x[0]**2+self.x[1]**2+self.x[2]**2
class TestCase(unittest.TestCase):
""" test case for the genetic driver"""
def setUp(self):
random.seed(10)
# pyevolve does some caching that causes failures during our
# complete unit tests due to stale values in the cache attributes
# below, so reset them here
Selectors.GRankSelector.cachePopID = None
Selectors.GRankSelector.cacheCount = None
Selectors.GRouletteWheel.cachePopID = None
Selectors.GRouletteWheel.cacheWheel = None
self.top = set_as_top(Assembly())
self.top.add('driver', Genetic())
self.top.driver.seed = 123
def tearDown(self):
self.top = None
def test_optimizeSphere_set_high_low(self):
self.top.add('comp', SphereFunction())
self.top.driver.workflow.add('comp')
self.top.driver.add_objective("comp.total")
self.top.driver.add_parameter('comp.x', high=5.13, low=-5.12)
self.top.driver.add_parameter('comp.y')
self.top.driver.add_parameter('comp.z', high=5, low=-5)
self.top.driver.mutation_rate = .02
self.top.driver.generations = 1
self.top.driver.opt_type = "minimize"
self.top.run()
self.assertAlmostEqual(self.top.driver.best_individual.score,
.020, places=2)
x, y, z = [x for x in self.top.driver.best_individual]
self.assertAlmostEqual(x, 0.135, places=2)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_optimizeSphere(self):
self.top.add('comp', SphereFunction())
self.top.driver.workflow.add('comp')
self.top.driver.add_objective("comp.total")
self.top.driver.add_parameter('comp.x')
self.top.driver.add_parameter('comp.y')
self.top.driver.add_parameter('comp.z')
self.top.driver.mutation_rate = .02
self.top.driver.generations = 1
self.top.driver.opt_type = "minimize"
self.top.run()
self.assertAlmostEqual(self.top.driver.best_individual.score,
.02, places=1)
x, y, z = [x for x in self.top.driver.best_individual]
self.assertAlmostEqual(x, 0.135, places=2)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_optimizeSpherearray_nolowhigh(self):
self.top.add('comp', SphereFunctionArray())
self.top.driver.workflow.add('comp')
self.top.driver.add_objective("comp.total")
try:
self.top.driver.add_parameter('comp.x[0]')
except ValueError as err:
self.assertEqual(str(err),
"driver: Trying to add parameter 'comp.x[0]', "
"but no lower limit was found and no 'low' "
"argument was given. One or the other must be "
"specified.")
else:
self.fail('TypeError expected')
def test_optimizeSphereAssemblyPassthrough(self):
self.top.add('comp', Asmb())
self.top.driver.workflow.add('comp')
self.top.driver.add_objective("comp.total")
self.top.driver.add_parameter('comp.x')
self.top.driver.add_parameter('comp.y')
self.top.driver.add_parameter('comp.z')
self.top.driver.mutation_rate = .02
self.top.driver.generations = 1
self.top.driver.opt_type = "minimize"
self.top.run()
self.assertAlmostEqual(self.top.driver.best_individual.score,
.02, places=1)
x, y, z = [x for x in self.top.driver.best_individual]
self.assertAlmostEqual(x, .135, places=2)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_optimizeSpherearray(self):
self.top.add('comp', SphereFunctionArray())
self.top.driver.workflow.add('comp')
self.top.driver.add_objective("comp.total")
self.top.driver.add_parameter('comp.x[0]', low=-5.12, high=5.13)
self.top.driver.add_parameter('comp.x[1]', low=-5.12, high=5.13)
self.top.driver.add_parameter('comp.x[2]', low=-5.12, high=5.13)
self.top.driver.mutation_rate = .02
self.top.driver.population_size = 100
self.top.driver.generations = 1
self.top.driver.opt_type = "minimize"
self.top.run()
self.assertAlmostEqual(self.top.driver.best_individual.score,
0.22, places=2)
x, y, z = [x for x in self.top.driver.best_individual]
def test_list_remove_clear_params(self):
self.top.add('comp', SphereFunction())
self.top.driver.workflow.add('comp')
self.top.driver.add_parameter('comp.x')
self.top.driver.add_parameter('comp.y')
params = self.top.driver.list_param_targets()
self.assertEqual(set(params), set(['comp.x', 'comp.y']))
self.assertEqual(len(params), 2)
self.top.driver.remove_parameter('comp.x')
params = self.top.driver.list_param_targets()
self.assertEqual(params, ['comp.y'])
try:
self.top.driver.remove_parameter('xyz')
except AttributeError as err:
self.assertEqual(str(err),
"driver: Trying to remove parameter 'xyz' that is "
"not in this driver.")
else:
self.fail('RuntimeError Expected')
self.top.driver.add_parameter('comp.x')
self.top.driver.clear_parameters()
params = self.top.driver.list_param_targets()
self.assertEqual(params, [])
self.top.driver.add_parameter('comp.y')
try:
self.top.driver.add_parameter('comp.y')
except ValueError as err:
self.assertEqual(str(err),
"driver: ['comp.y'] are already Parameter targets")
else:
self.fail('RuntimeError expected')
def test_0_low_high(self):
class SomeComp(Component):
"""Arbitrary component with a few variables, but which does not
really do any calculations"""
w = Float(0.0, low=-10, high=0.0, iotype="in")
x = Float(0.0, low=0.0, high=100.0, iotype="in")
y = Int(10, low=10, high=100, iotype="in")
z = Enum([-10, -5, 0, 7], iotype="in")
class Simulation(Assembly):
"""Top Level Assembly used for simulation"""
def configure(self):
"""Adds the Genetic driver to the assembly"""
opt = self.add('optimizer', Genetic())
self.add('comp', SomeComp())
opt.workflow.add('comp')
self.optimizer.add_parameter('comp.x')
self.optimizer.add_parameter('comp.y')
self.optimizer.add_parameter('comp.z')
s = Simulation()
def test_improper_parameter_type(self):
class SomeComp(Component):
"""Arbitrary component with a few variables, but which does not
really do any calculations"""
z = Str("test", iotype="in")
class Simulation(Assembly):
"""Top Level Assembly used for simulation"""
def configure(self):
"""Adds the Genetic driver to the assembly"""
self.add('driver', Genetic())
self.add('comp', SomeComp())
self.driver.workflow.add('comp')
self.driver.add_parameter('comp.z')
try:
s = set_as_top(Simulation())
except ValueError as err:
self.assertEqual(str(err),
"driver: The value of parameter 'comp.z' must be a real or "
"integral type, but its type is 'str'.")
else:
self.fail("ValueError expected")
def test_initial_run(self):
from openmdao.main.interfaces import IHasParameters, implements
from openmdao.main.hasparameters import HasParameters
from openmdao.util.decorators import add_delegate
class MyComp(Component):
x = Float(0.0, iotype='in', low=-10, high=10)
xx = Float(0.0, iotype='in', low=-10, high=10)
f_x = Float(iotype='out')
y = Float(iotype='out')
def execute(self):
if self.xx != 1.0:
self.raise_exception("Lazy", RuntimeError)
self.f_x = 2.0*self.x
self.y = self.x
#print self.x, self.xx, self.f_x, self.y
@add_delegate(HasParameters)
class SpecialDriver(Driver):
implements(IHasParameters)
def execute(self):
self.set_parameters([1.0])
top = set_as_top(Assembly())
top.add('comp', MyComp())
top.add('driver', Genetic())
top.add('subdriver', SpecialDriver())
top.driver.workflow.add('subdriver')
top.subdriver.workflow.add('comp')
top.subdriver.add_parameter('comp.xx')
top.driver.add_parameter('comp.x')
top.driver.add_objective('comp.f_x')
top.run()
if __name__ == "__main__":
unittest.main()
| 33.587302
| 80
| 0.58913
| 10,195
| 0.963611
| 0
| 0
| 180
| 0.017013
| 0
| 0
| 1,938
| 0.183176
|
5a69dfb1498fd1737edb8cb80ef069c5d681ed1f
| 2,974
|
py
|
Python
|
src/db/ohlc_to_db.py
|
canl/algo-trading
|
288f43a54d6594f79c79dc21f5534ad9aa785b29
|
[
"MIT"
] | 11
|
2020-04-04T08:59:37.000Z
|
2020-12-25T20:21:05.000Z
|
src/db/ohlc_to_db.py
|
canl/algo-trading
|
288f43a54d6594f79c79dc21f5534ad9aa785b29
|
[
"MIT"
] | 1
|
2021-12-13T20:35:20.000Z
|
2021-12-13T20:35:20.000Z
|
src/db/ohlc_to_db.py
|
canl/algo-trading
|
288f43a54d6594f79c79dc21f5534ad9aa785b29
|
[
"MIT"
] | 3
|
2020-06-21T16:29:56.000Z
|
2020-07-18T15:15:01.000Z
|
import sqlite3
from datetime import datetime
from sqlite3 import Error
import pandas as pd
from src.pricer import read_price_df
DB_FILE_PATH = 'db.sqlite'
def connect_to_db(db_file):
"""
Connect to an SQlite database, if db file does not exist it will be created
:param db_file: absolute or relative path of db file
:return: sqlite3 connection
"""
sqlite3_conn = None
try:
sqlite3_conn = sqlite3.connect(db_file)
return sqlite3_conn
except Error as err:
print(err)
if sqlite3_conn is not None:
sqlite3_conn.close()
def insert_df_to_table(data: pd.DataFrame, table_name: str):
"""
Open a csv file with pandas, store its content in a pandas data frame, change the data frame headers to the table
column names and insert the data to the table
:param data: Data in DataFrame format, to be populated to SQL table
:param table_name: table name in the database to insert the data into
:return: None
"""
conn = connect_to_db(DB_FILE_PATH)
if conn is not None:
c = conn.cursor()
# Create table if it is not exist
c.execute('CREATE TABLE IF NOT EXISTS ' + table_name +
'(time VARCHAR NOT NULL PRIMARY KEY,'
'open DECIMAL,'
'high DECIMAL,'
'low DECIMAL,'
'close DECIMAL)')
data.columns = get_column_names_from_db_table(c, table_name)
data.to_sql(name=table_name, con=conn, if_exists='append', index=False)
conn.close()
print('SQL insert process finished')
else:
print('Connection to database failed')
def read_price(start_date: datetime, end_date: datetime, instrument: str = 'GBP_USD') -> pd.DataFrame:
price_df = read_price_df(instrument=instrument, granularity='S5', start=start_date, end=end_date, max_count=4000)
price_df.reset_index(level=0, inplace=True)
price_df['time'] = price_df['time'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
return price_df
def get_column_names_from_db_table(sql_cursor, table_name):
"""
Scrape the column names from a database table to a list
:param sql_cursor: sqlite cursor
:param table_name: table name to get the column names from
:return: a list with table column names
"""
table_column_names = 'PRAGMA table_info(' + table_name + ');'
sql_cursor.execute(table_column_names)
table_column_names = sql_cursor.fetchall()
column_names = list()
for name in table_column_names:
column_names.append(name[1])
return column_names
if __name__ == '__main__':
ccy_pair = 'USD_JPY'
start = datetime(2015, 1, 1, 0, 0, 0)
to = datetime(2020, 7, 31, 23, 59, 59)
df = read_price(start_date=start, end_date=to, instrument=ccy_pair)
# pattern: currency_pair _ ohlc
insert_df_to_table(data=df, table_name=f"{ccy_pair.lower().replace('_', '')}_ohlc")
| 30.979167
| 117
| 0.66308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,156
| 0.388702
|
5a6ab1cd0cde51b96b0f8b27b7f207dcb0b63462
| 2,793
|
py
|
Python
|
morphs/data/localize.py
|
MarvinT/morphs
|
c8b204debcb23ba79c3112933af9e6ca4b05b7a1
|
[
"MIT"
] | 2
|
2019-01-25T17:36:33.000Z
|
2019-04-03T14:25:05.000Z
|
morphs/data/localize.py
|
MarvinT/morphs
|
c8b204debcb23ba79c3112933af9e6ca4b05b7a1
|
[
"MIT"
] | 17
|
2018-09-21T00:07:10.000Z
|
2019-05-23T17:07:35.000Z
|
morphs/data/localize.py
|
MarvinT/morphs
|
c8b204debcb23ba79c3112933af9e6ca4b05b7a1
|
[
"MIT"
] | 3
|
2018-09-20T18:47:07.000Z
|
2021-09-15T20:43:31.000Z
|
import pandas as pd
import numpy as np
import morphs
from six import exec_
from pathlib2 import Path
from joblib import Parallel, delayed
# adapted from klustakwik
# NEVER POINT THIS AT SOMETHING YOU DONT TRUST
def _read_python(path):
assert path.exists()
with open(path.as_posix(), "r") as f:
contents = f.read()
metadata = {}
exec_(contents, {}, metadata)
metadata = {k.lower(): v for (k, v) in metadata.items()}
return metadata
def calc_loc(block_path, squared=True):
columns = [
"block_path",
"AP",
"ML",
"Z",
"cluster",
"cluster_pos",
"cluster_accuracy",
]
waveform_dict = morphs.load.waveforms()
waveforms, cluster_map = waveform_dict[block_path]
if waveforms is None:
return pd.DataFrame(columns=columns)
amps = (waveforms[:, 0, :] + waveforms[:, -1, :]) / 2 - np.min(waveforms, axis=1)
amps /= np.max(amps, axis=0)
if squared:
amps = amps ** 2
prb_files = list(Path(block_path).glob("*.prb"))
assert len(prb_files) == 1
prb = _read_python(prb_files[0])
assert len(prb["channel_groups"]) == 1
for group in prb["channel_groups"]:
chans = prb["channel_groups"][group]["geometry"].keys()
x, y = zip(*[prb["channel_groups"][group]["geometry"][k] for k in chans])
y_hats = np.sum(amps * np.array(y).reshape((1, -1)), axis=1) / np.sum(
amps, axis=1
)
d = {}
d["block_path"] = block_path
d["AP"], d["ML"], d["Z"] = morphs.data.parse.recording_site(block_path)
i_cluster_map = {v: k for k, v in cluster_map.items()}
d["cluster"] = [i_cluster_map[i] for i in range(len(cluster_map))]
d["cluster_pos"] = y_hats
_, cluster_accuracies = morphs.load.cluster_accuracies()
d["cluster_accuracy"] = (
cluster_accuracies[block_path].loc[d["cluster"]]["accuracy"].values
)
return pd.DataFrame(data=d, columns=columns)
def generate_all_loc(parallel=False, n_jobs=morphs.parallel.N_JOBS):
if parallel and n_jobs > 1:
all_locs = Parallel(n_jobs=n_jobs)(
delayed(calc_loc)(block_path) for block_path in morphs.paths.blocks()
)
else:
all_locs = [calc_loc(block_path) for block_path in morphs.paths.blocks()]
all_locs_df = pd.concat(all_locs, ignore_index=True)
all_locs_df["block_path"] = all_locs_df["block_path"].astype("category")
all_locs_df.to_pickle(morphs.paths.LOCATIONS_PKL)
@morphs.utils.load._load(
morphs.paths.LOCATIONS_PKL,
generate_all_loc,
download_func=morphs.utils.load._download(
morphs.paths.LOCATIONS_PKL, "1wLoMiKJjKPQbNF_qplqrMzHLyFCyFXn3"
),
)
def load_all_loc(prefer_download=True):
return pd.read_pickle(morphs.paths.LOCATIONS_PKL.as_posix())
| 33.25
| 85
| 0.649481
| 0
| 0
| 0
| 0
| 312
| 0.111708
| 0
| 0
| 379
| 0.135696
|
5a6c3376aee63cfa4176eec2e2221796087f1da4
| 55
|
py
|
Python
|
app/cli/plugin/__init__.py
|
lonless0/flask_project
|
f5d6c5c7655e54d95069b469e3d470eda7a05cb7
|
[
"MIT"
] | 786
|
2019-01-15T14:30:37.000Z
|
2022-03-28T08:53:39.000Z
|
app/cli/plugin/__init__.py
|
lonless0/flask_project
|
f5d6c5c7655e54d95069b469e3d470eda7a05cb7
|
[
"MIT"
] | 107
|
2019-01-18T05:15:16.000Z
|
2022-03-16T07:13:05.000Z
|
app/cli/plugin/__init__.py
|
lonless0/flask_project
|
f5d6c5c7655e54d95069b469e3d470eda7a05cb7
|
[
"MIT"
] | 222
|
2019-01-16T14:44:23.000Z
|
2022-03-23T11:33:00.000Z
|
from .generator import generate
from .init import init
| 18.333333
| 31
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5a6c7805cdb06035d72a4db4a8f024fac0e49f51
| 2,512
|
py
|
Python
|
labelocr/verify_ocr_app.py
|
tienthienhd/labelocr
|
65297c12af9fa15f30d1457164d5cda7bebe70c1
|
[
"Apache-2.0"
] | 2
|
2020-10-01T02:39:48.000Z
|
2020-10-01T04:27:13.000Z
|
labelocr/verify_ocr_app.py
|
tienthienhd/labelocr
|
65297c12af9fa15f30d1457164d5cda7bebe70c1
|
[
"Apache-2.0"
] | null | null | null |
labelocr/verify_ocr_app.py
|
tienthienhd/labelocr
|
65297c12af9fa15f30d1457164d5cda7bebe70c1
|
[
"Apache-2.0"
] | null | null | null |
import atexit
import glob
import json
import logging
import os
import shutil
import sys
import tkinter as tk
import threading
from tkinter import filedialog, messagebox
import cv2
import numpy as np
import pandas as pd
import pygubu
from PIL import Image, ImageTk
from deprecated import deprecated
PROJECT_PATH = os.path.dirname(__file__)
PROJECT_UI = os.path.join(PROJECT_PATH, "verify_ocr.ui")
FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger("LabelOcr")
class VerifyOcrApp:
def __init__(self, master):
self.builder = builder = pygubu.Builder()
builder.add_resource_path(PROJECT_PATH)
builder.add_from_file(PROJECT_UI)
self.master = master
self.mainwindow = builder.get_object('master', master)
builder.connect_callbacks(self)
self.config_dir = os.path.join(os.path.expanduser("~"),".ocr_labeling")
self.last_session_path = os.path.join(self.config_dir, 'last_session_verify_ocr')
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir, exist_ok=True)
self.image_dir = builder.get_variable("var_img_dir")
self.label_path = builder.get_variable("var_label_path")
self.image_name = builder.get_variable("var_image_name")
self.label_ocr = builder.get_variable("var_label")
self.cur_index = builder.get_variable("var_cur_index")
def load_data(self):
if self.image_dir.get() is not None and self.label_path.get() is not None and len(
self.image_dir.get()) > 0 and len(self.label_path.get()) > 0:
if self.label_in_filename:
self.list_file = list(glob.glob(f"{self.image_dir.get()}/*.png"))
self.list_label = [os.path.splitext(os.path.basename(file))[0] for file in self.list_file]
self.list_label = [self._parse_label(x) for x in self.list_label]
else:
df_label = pd.read_csv(self.label_path.get(), header=0, names=['filename', 'label'], dtype={"filename": str, "label": str})
self.list_file = df_label['filename'].tolist()
self.list_label = df_label['label'].tolist()
self._show_image()
else:
messagebox.showerror("Input Error", "Please choose folder image and label file.")
LOGGER.info("Not found label to save.")
def main():
root = tk.Tk()
app = VerifyOcrApp(root)
app.run()
| 35.885714
| 139
| 0.667994
| 1,892
| 0.753185
| 0
| 0
| 0
| 0
| 0
| 0
| 364
| 0.144904
|
5a6ebd896d0065716f83ceee55fedb02e43d2b47
| 17,814
|
py
|
Python
|
cosmic-core/systemvm/patches/centos7/opt/cosmic/router/bin/cs/firewall.py
|
sanderv32/cosmic
|
9a9d86500b67255a1c743a9438a05c0d969fd210
|
[
"Apache-2.0"
] | 64
|
2016-01-30T13:31:00.000Z
|
2022-02-21T02:13:25.000Z
|
cosmic-core/systemvm/patches/centos7/opt/cosmic/router/bin/cs/firewall.py
|
sanderv32/cosmic
|
9a9d86500b67255a1c743a9438a05c0d969fd210
|
[
"Apache-2.0"
] | 525
|
2016-01-22T10:46:31.000Z
|
2022-02-23T11:08:01.000Z
|
cosmic-core/systemvm/patches/centos7/opt/cosmic/router/bin/cs/firewall.py
|
sanderv32/cosmic
|
9a9d86500b67255a1c743a9438a05c0d969fd210
|
[
"Apache-2.0"
] | 25
|
2016-01-13T16:46:46.000Z
|
2021-07-23T15:22:27.000Z
|
import logging
from jinja2 import Environment, FileSystemLoader
import utils
class Firewall:
def __init__(self, config):
self.config = config
self.jinja_env = Environment(
loader=FileSystemLoader('/opt/cosmic/router/bin/cs/templates'),
trim_blocks=True,
lstrip_blocks=True
)
self.fw = self.config.fw
def sync(self):
logging.info("Running firewall sync")
public_device = None
public_ip = None
self.add_default_vpc_rules()
if "interfaces" not in self.config.dbag_network_overview:
logging.info("Skipping firewall sync, as we have no 'interfaces' object in network_overview.")
return
for interface in self.config.dbag_network_overview['interfaces']:
device = utils.get_interface_name_from_mac_address(interface['mac_address'])
if interface['metadata']['type'] == 'sync':
self.add_sync_vpc_rules(device)
elif interface['metadata']['type'] == 'other':
pass
elif interface['metadata']['type'] == 'public':
self.add_public_vpc_rules(device)
public_device = device
public_ip = interface['ipv4_addresses'][0]['cidr']
elif interface['metadata']['type'] == 'guesttier':
self.add_tier_vpc_rules(device, interface['ipv4_addresses'][0]['cidr'])
elif interface['metadata']['type'] == 'private':
self.add_private_vpc_rules(device, interface['ipv4_addresses'][0]['cidr'])
vpn_open = False
if public_device is not None and 'vpn' in self.config.dbag_network_overview:
if 'site2site' in self.config.dbag_network_overview['vpn']:
for site2site in self.config.dbag_network_overview['vpn']['site2site']:
self.add_site2site_vpn_rules(public_device, site2site)
vpn_open = True
if 'remote_access' in self.config.dbag_network_overview['vpn']:
if public_ip is not None:
self.add_remote_access_vpn_rules(
public_device, public_ip, self.config.dbag_network_overview['vpn']['remote_access']
)
vpn_open = True
# default block VPN ports
logging.info("VPN_open is %s" % (vpn_open))
if not vpn_open:
self.block_vpn_rules(public_device)
if public_device is not None and 'loadbalancer' in self.config.dbag_network_overview:
if len(self.config.dbag_network_overview['loadbalancer']) > 0:
self.add_loadbalancer_rules(public_device, public_ip, self.config.dbag_network_overview['loadbalancer'])
def add_default_vpc_rules(self):
logging.info("Configuring default VPC rules")
self.fw.append(["filter", "", "-P INPUT DROP"])
self.fw.append(["filter", "", "-P FORWARD DROP"])
self.fw.append(["filter", "", "-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT"])
self.fw.append(["mangle", "front", "-A POSTROUTING -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill"])
self.fw.append(["filter", "", "-A INPUT -i lo -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -p icmp -j ACCEPT"])
if self.config.get_advert_method() == "MULTICAST":
self.fw.append(["filter", "", "-A INPUT -d 224.0.0.18/32 -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -d 224.0.0.22/32 -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -d 224.0.0.252/32 -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -d 225.0.0.50/32 -j ACCEPT"])
self.fw.append(["filter", "",
"-A INPUT -i eth0 -p tcp -m tcp -s 169.254.0.1/32 --dport 3922 -m "
"state --state NEW,ESTABLISHED -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT"])
self.fw.append(["filter", "", "-A FORWARD -s %s ! -d %s -j ACCEPT" % (
self.config.dbag_cmdline['config']['vpccidr'], self.config.dbag_cmdline['config']['vpccidr']
)])
def add_tier_vpc_rules(self, device, cidr):
logging.info("Configuring VPC tier rules for device %s" % device)
self.fw.append(["filter", "", "-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT" % device])
self.fw.append(["filter", "", "-A FORWARD -m state --state NEW -o %s -j ACL_INBOUND_%s" % (device, device)])
self.fw.append(["filter", "", "-A OUTPUT -m state --state NEW -o %s -j ACL_INBOUND_%s" % (device, device)])
self.fw.append(["filter", "front", "-A ACL_INBOUND_%s -d 224.0.0.18/32 -j ACCEPT" % device])
self.fw.append(["filter", "front", "-A ACL_INBOUND_%s -d 224.0.0.22/32 -j ACCEPT" % device])
self.fw.append(["filter", "front", "-A ACL_INBOUND_%s -d 224.0.0.252/32 -j ACCEPT" % device])
self.fw.append(["filter", "front", "-A ACL_INBOUND_%s -d 225.0.0.50/32 -j ACCEPT" % device])
self.fw.append(["filter", "front", "-A ACL_INBOUND_%s -d %s -p udp -m udp --dport 68 -j ACCEPT" % (
device, cidr
)])
self.fw.append(["filter", "", "-A INPUT -i %s -p udp -m udp --dport 67 -j ACCEPT" % device])
self.fw.append(["filter", "", "-A INPUT -i %s -p udp -m udp --dport 53 -s %s -j ACCEPT" % (device, cidr)])
self.fw.append(["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 53 -s %s -j ACCEPT" % (device, cidr)])
self.fw.append(["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 80 -m state --state NEW -j ACCEPT" %
device
])
self.fw.append(["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 8080 -m state --state NEW -j ACCEPT" %
device])
self.fw.append(["mangle", "", "-A PREROUTING -m state --state NEW -i %s ! -d %s -j ACL_OUTBOUND_%s" % (
device, cidr, device
)])
self.fw.append(["mangle", "front", "-A ACL_OUTBOUND_%s -d 224.0.0.18/32 -j ACCEPT" % device])
self.fw.append(["mangle", "front", "-A ACL_OUTBOUND_%s -d 224.0.0.22/32 -j ACCEPT" % device])
self.fw.append(["mangle", "front", "-A ACL_OUTBOUND_%s -d 224.0.0.252/32 -j ACCEPT" % device])
self.fw.append(["mangle", "front", "-A ACL_OUTBOUND_%s -d 225.0.0.50/32 -j ACCEPT" % device])
self.fw.append(["mangle", "front", "-A ACL_OUTBOUND_%s -d 255.255.255.255/32 -j ACCEPT" % device])
self.fw.append(["nat", "front", "-A POSTROUTING -s %s -o %s -j SNAT --to-source %s" % (
cidr, device, cidr.split('/')[0]
)])
self.fw.append(["", "front", "-A INPUT -i %s -d %s -p tcp -m tcp -m state --state NEW --dport 80 -j ACCEPT" % (
device, cidr
)])
self.fw.append(["", "front", "-A INPUT -i %s -d %s -p tcp -m tcp -m state --state NEW --dport 443 -j ACCEPT" % (
device, cidr
)])
def add_sync_vpc_rules(self, device):
logging.info("Configuring Sync VPC rules")
if self.config.get_advert_method() == "UNICAST":
self.fw.append(["filter", "", "-A INPUT -i %s -p vrrp -j ACCEPT" % device])
self.fw.append(["filter", "", "-A OUTPUT -o %s -p vrrp -j ACCEPT" % device])
self.fw.append(["filter", "", "-A INPUT -i %s -p tcp --dport 3780 -j ACCEPT" % device])
self.fw.append(["filter", "", "-A OUTPUT -o %s -p tcp --dport 3780 -j ACCEPT" % device])
def add_public_vpc_rules(self, device):
logging.info("Configuring Public VPC rules")
# create ingress chain mangle (port forwarding / source nat)
self.fw.append(["mangle", "", "-N ACL_PUBLIC_IP_%s" % device])
self.fw.append(["mangle", "", "-A PREROUTING -m state --state NEW -i %s -j ACL_PUBLIC_IP_%s" % (
device, device
)])
self.fw.append(["filter", "", "-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT" % device])
# create ingress chain filter (load balancing)
self.fw.append(["filter", "", "-N ACL_PUBLIC_IP_%s" % device])
self.fw.append(["filter", "", "-A INPUT -m state --state NEW -j ACL_PUBLIC_IP_%s" % device])
# create egress chain
self.fw.append(["mangle", "front", "-N ACL_OUTBOUND_%s" % device])
# jump to egress chain
self.fw.append(["mangle", "front", "-A PREROUTING -m state --state NEW -i %s -j ACL_OUTBOUND_%s" % (
device, device
)])
# create source nat list chain
self.fw.append(["filter", "", "-N SOURCE_NAT_LIST"])
self.fw.append(["filter", "", "-A FORWARD -j SOURCE_NAT_LIST"])
if 'source_nat' in self.config.dbag_network_overview['services'] and \
self.config.dbag_network_overview['services']['source_nat']:
logging.info("Adding SourceNAT for interface %s to %s" % (
device, self.config.dbag_network_overview['services']['source_nat'][0]['to']
))
self.fw.append(["nat", "", "-A POSTROUTING -o %s -d 10.0.0.0/8 -j RETURN" % device])
self.fw.append(["nat", "", "-A POSTROUTING -o %s -d 172.16.0.0/12 -j RETURN" % device])
self.fw.append(["nat", "", "-A POSTROUTING -o %s -d 192.168.0.0/16 -j RETURN" % device])
self.fw.append(["nat", "", "-A POSTROUTING -j SNAT -o %s --to-source %s" % (
device, self.config.dbag_network_overview['services']['source_nat'][0]['to']
)])
def add_private_vpc_rules(self, device, cidr):
logging.info("Configuring Private VPC rules")
self.fw.append(["filter", "", "-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT" % device])
# create egress chain
self.fw.append(["mangle", "", "-N ACL_OUTBOUND_%s" % device])
# jump to egress chain
self.fw.append(["mangle", "", "-A PREROUTING -m state --state NEW -i %s ! -d %s -j ACL_OUTBOUND_%s" % (
device, cidr, device
)])
# create ingress chain
self.fw.append(["filter", "", "-N ACL_INBOUND_%s" % device])
# jump to ingress chain
self.fw.append(["filter", "", "-A FORWARD -m state --state NEW -o %s -j ACL_INBOUND_%s" % (device, device)])
def add_site2site_vpn_rules(self, device, site2site):
logging.info("Configuring Site2Site VPN rules")
self.config.fw.append(["", "front", "-A INPUT -i %s -p udp -m udp --dport 500 -s %s -d %s -j ACCEPT" % (
device, site2site['right'], site2site['left'])])
self.config.fw.append(["", "front", "-A INPUT -i %s -p udp -m udp --dport 4500 -s %s -d %s -j ACCEPT" % (
device, site2site['right'], site2site['left'])])
self.config.fw.append(["", "front", "-A INPUT -i %s -p esp -s %s -d %s -j ACCEPT" % (
device, site2site['right'], site2site['left'])])
self.config.fw.append(["nat", "front", "-A POSTROUTING -o %s -m mark --mark 0x525 -j ACCEPT" % device])
# Make it possible to tcpdump on ipsec tunnels
# https://wiki.strongswan.org/projects/strongswan/wiki/CorrectTrafficDump
# ingress IPsec and IKE Traffic rule
self.config.fw.append(["filter", "front", "-I INPUT -p esp -j NFLOG --nflog-group 5"])
self.config.fw.append(["filter", "front", "-I INPUT -p ah -j NFLOG --nflog-group 5"])
self.config.fw.append(["filter", "front",
"-I INPUT -p udp -m multiport --dports 500,4500 -j NFLOG --nflog-group 5"])
# egress IPsec and IKE traffic
self.config.fw.append(["filter", "front", "-I OUTPUT -p esp -j NFLOG --nflog-group 5"])
self.config.fw.append(["filter", "front", "-I OUTPUT -p ah -j NFLOG --nflog-group 5"])
self.config.fw.append(["filter", "front",
"-I OUTPUT -p udp -m multiport --dports 500,4500 -j NFLOG --nflog-group 5"])
# decapsulated IPsec traffic
self.config.fw.append(["mangle", "front",
"-I PREROUTING -m policy --pol ipsec --dir in -j NFLOG --nflog-group 5"])
self.config.fw.append(["mangle", "front",
"-I POSTROUTING -m policy --pol ipsec --dir out -j NFLOG --nflog-group 5"])
# IPsec traffic that is destinated for the local host (iptables INPUT chain)
self.config.fw.append(["filter", "front",
"-I INPUT -m addrtype --dst-type LOCAL -m policy --pol ipsec --dir in"
" -j NFLOG --nflog-group 5"])
# IPsec traffic that is destinated for a remote host (iptables FORWARD chain)
self.config.fw.append(["filter", "front",
"-I INPUT -m addrtype ! --dst-type LOCAL -m policy --pol ipsec --dir in"
" -j NFLOG --nflog-group 5"])
# IPsec traffic that is outgoing (iptables OUTPUT chain)
self.config.fw.append(["filter", "front", "-I OUTPUT -m policy --pol ipsec --dir out -j NFLOG --nflog-group 5"])
for net in site2site['peer_list'].lstrip().rstrip().split(','):
self.config.fw.append(["mangle", "front",
"-A FORWARD -s %s -d %s -j MARK --set-xmark 0x525/0xffffffff" % (
site2site['left_subnet'], net)])
self.config.fw.append(["mangle", "",
"-A OUTPUT -s %s -d %s -j MARK --set-xmark 0x525/0xffffffff" % (
site2site['left_subnet'], net)])
self.config.fw.append(["mangle", "front",
"-A FORWARD -s %s -d %s -j MARK --set-xmark 0x524/0xffffffff" % (
net, site2site['left_subnet'])])
self.config.fw.append(["mangle", "",
"-A INPUT -s %s -d %s -j MARK --set-xmark 0x524/0xffffffff" % (
net, site2site['left_subnet'])])
# Block anything else
self.block_vpn_rules(device)
def add_remote_access_vpn_rules(self, device, publicip, remote_access):
logging.info("Configuring RemoteAccess VPN rules")
localcidr = remote_access['local_cidr']
local_ip = remote_access['local_ip']
self.config.fw.append(["", "", "-I INPUT -i %s --dst %s -p udp -m udp --dport 500 -j ACCEPT" % (device, publicip.split("/")[0])])
self.config.fw.append(["", "", "-I INPUT -i %s --dst %s -p udp -m udp --dport 4500 -j ACCEPT" % (device, publicip.split("/")[0])])
self.config.fw.append(["", "", "-I INPUT -i %s --dst %s -p udp -m udp --dport 1701 -j ACCEPT" % (device, publicip.split("/")[0])])
self.config.fw.append(["", "", "-I INPUT -i %s ! --dst %s -p udp -m udp --dport 500 -j REJECT" % (device, publicip.split("/")[0])])
self.config.fw.append(["", "", "-I INPUT -i %s ! --dst %s -p udp -m udp --dport 4500 -j REJECT" % (device, publicip.split("/")[0])])
self.config.fw.append(["", "", "-I INPUT -i %s ! --dst %s -p udp -m udp --dport 1701 -j REJECT" % (device, publicip.split("/")[0])])
self.config.fw.append(["", "", "-I INPUT -i %s -p ah -j ACCEPT" % device])
self.config.fw.append(["", "", "-I INPUT -i %s -p esp -j ACCEPT" % device])
self.config.fw.append(["", "", " -N VPN_FORWARD"])
self.config.fw.append(["", "", "-I FORWARD -i ppp+ -j VPN_FORWARD"])
self.config.fw.append(["", "", "-I FORWARD -o ppp+ -j VPN_FORWARD"])
self.config.fw.append(["", "", "-I FORWARD -o ppp+ -j VPN_FORWARD"])
self.config.fw.append(["", "", "-A VPN_FORWARD -s %s -j RETURN" % localcidr])
self.config.fw.append(["", "", "-A VPN_FORWARD -i ppp+ -d %s -j RETURN" % localcidr])
self.config.fw.append(["", "", "-A VPN_FORWARD -i ppp+ -o ppp+ -j RETURN"])
self.config.fw.append(["", "", "-I INPUT -i ppp+ -m udp -p udp --dport 53 -j ACCEPT"])
self.config.fw.append(["", "", "-I INPUT -i ppp+ -m tcp -p tcp --dport 53 -j ACCEPT"])
self.config.fw.append(["nat", "front", "-A PREROUTING -i ppp+ -m tcp -p tcp --dport 53 -j DNAT --to-destination %s" % local_ip])
def block_vpn_rules(self, device):
logging.info("Dropping VPN rules")
self.config.fw.append(["", "", "-A INPUT -i %s -p udp -m udp --dport 500 -j REJECT" % device])
self.config.fw.append(["", "", "-A INPUT -i %s -p udp -m udp --dport 4500 -j REJECT" % device])
self.config.fw.append(["", "", "-A INPUT -i %s -p udp -m udp --dport 1701 -j REJECT" % device])
self.config.fw.append(["", "", "-A INPUT -i %s -p ah -j REJECT" % device])
self.config.fw.append(["", "", "-A INPUT -i %s -p esp -j REJECT" % device])
self.config.fw.append(["", "", "-A INPUT -i ppp+ -m udp -p udp --dport 53 -j REJECT"])
self.config.fw.append(["", "", "-A INPUT -i ppp+ -m tcp -p tcp --dport 53 -j REJECT"])
def add_loadbalancer_rules(self, device, publicip, loadbalancer):
logging.info("Configuring Loadbalancer rules")
for lb in loadbalancer.get('load_balancers', {}):
self.config.fw.append(["", "", "-I INPUT -i %s --dst %s -p %s -m %s --dport %s -j ACCEPT" % (device,
publicip.split("/")[0],
lb['protocol'],
lb['protocol'],
lb['src_port'])])
| 57.650485
| 140
| 0.544179
| 17,732
| 0.995397
| 0
| 0
| 0
| 0
| 0
| 0
| 8,077
| 0.453407
|
5a6f4d014d86fed26640b0dae06b65517e18a73d
| 2,875
|
py
|
Python
|
MachineLearning/knn/knn.py
|
z8g/pettern
|
abf6b9c09597bb2badec97d51112681e46dde760
|
[
"Apache-2.0"
] | 72
|
2019-09-26T09:12:14.000Z
|
2020-09-05T11:59:25.000Z
|
MachineLearning/knn/knn.py
|
z8g/common
|
abf6b9c09597bb2badec97d51112681e46dde760
|
[
"Apache-2.0"
] | null | null | null |
MachineLearning/knn/knn.py
|
z8g/common
|
abf6b9c09597bb2badec97d51112681e46dde760
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
import numpy
import operator
"""
================================================================================
kNN算法的步骤:
1. 计算已知类别数据集中的点与当前点之间的距离(欧式距离公式)
2. 按照距离递增次序排序
3. 选取与当前距离最小的k个点
4. 确定前k个点所在类别的出现频率
5. 返回前k个点出现频率最高的类别作为当前点的预测分类
================================================================================
"""
"""
名称: 创建数据集和标签
用法: group,lables = kNN.createDataSet()
@return 数据集,标签列表
"""
def create_dataset():
group = numpy.array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
lables = ['A', 'B', 'B', 'B']
return group, lables
"""
名称: kNN分类
用法: classify0([0,0],group,lables,3)
@param u 用于分类的输入向量
@param dataSet 输入的训练样本集
@param lables 标签向量(labels的元素数量和dataSet的行数相同)
@param k 选择最近邻居的数目
@return 输入向量所属类别(用标签中的元素表示)
"""
def classify0(u, dataset, labels, k):
dataset_size = dataset.shape[0]
diff_matrix = numpy.tile(u, (dataset_size, 1)) - dataset
distances = (((diff_matrix ** 2).sum(axis=1)) ** 0.5).argsort()
class_count_map = {}
for i in range(k):
key = labels[distances[i]]
class_count_map[key] = class_count_map.get(key, 0) + 1
value_tree_set = sorted(class_count_map.iteritems(),
key=operator.itemgetter(1),
reverse=True)
return value_tree_set[0][0]
"""
名称: 归一化特征值 (将取值范围处理为 [0,1] 或者 [-1,1] 之间)
用法: normDataSet, ranges, minValues = kNN.autoNorm(m)
@param dataset 数据集
@return 归一化数据集, 数据集范围, 最小值
下面公式可以将任意数值转化到0到1区间内:
newValue = (oldValue - min) / (max - min)
"""
def auto_norm(dataset):
min_value = dataset.min(0)
max_value = dataset.max(0)
ranges = max_value - min_value
norm_dataset = numpy.zeros(numpy.shape(dataset))
row = dataset.shape[0]
norm_dataset = dataset - numpy.tile(min_value, (row, 1))
norm_dataset = norm_dataset / numpy.tile(ranges, (row, 1))
return norm_dataset, ranges, min_value
"""
名称: 将文件内容读入矩阵(dating例子)
用法: dataset_matrix,label_list = read_matrix('knnDataSet.txt')
@param filepath 文件路径
@return 数据集矩阵,标签列表
前三列读入数据集矩阵,后一列读入标签列表
"""
def read_matrix(filepath):
file_reader = open(filepath)
lines = file_reader.readlines()
dataset_matrix = numpy.zeros((len(lines), 3))
label_list = []
index = 0
for line in lines:
items = line.strip().split('\t')
dataset_matrix[index, :] = items[0:3]
label_list.append(int(items[-1]))
index += 1
return dataset_matrix, label_list
"""
名称: 将文件内容读入向量(手写数字识别例子)
用法: return_vector = read_vector('digits/test/0_1.txt')
@param filepath 文件路径
@return 向量
将文件内容读到一列中
"""
def read_vector(filepath):
return_vector = numpy.zeros((1, 1024))
file_reader = open(filepath)
for i in range(32):
line = file_reader.readline()
for j in range(32):
return_vector[0, 32 * i + j] = int(line[j])
return return_vector
| 26.136364
| 80
| 0.606957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,731
| 0.493022
|
5a6f7399d0e46958326d190fed0176f8bf1bbfef
| 468
|
py
|
Python
|
core/migrations/0012_alter_preco_categoria.py
|
thiagofreitascarneiro/Projeto_Fusion
|
4bf9d1c69ddf83fbc957e9ccdc41112d71bbffa9
|
[
"MIT"
] | null | null | null |
core/migrations/0012_alter_preco_categoria.py
|
thiagofreitascarneiro/Projeto_Fusion
|
4bf9d1c69ddf83fbc957e9ccdc41112d71bbffa9
|
[
"MIT"
] | null | null | null |
core/migrations/0012_alter_preco_categoria.py
|
thiagofreitascarneiro/Projeto_Fusion
|
4bf9d1c69ddf83fbc957e9ccdc41112d71bbffa9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-09-05 19:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20210905_1619'),
]
operations = [
migrations.AlterField(
model_name='preco',
name='categoria',
field=models.CharField(choices=[('Premium', 'C'), ('Pro', 'B'), ('Plus', 'A')], max_length=15, verbose_name='categoria'),
),
]
| 24.631579
| 133
| 0.587607
| 375
| 0.801282
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.290598
|
5a6fc90d5c1328218d16b60badb1e9edda81f0c8
| 2,394
|
py
|
Python
|
Source/State/Main_Menu.py
|
LesterYHZ/Super-Mario-Bro-Python-Project
|
2cbcb7ba713a81d37bd1ea16311f15e982a00774
|
[
"MIT"
] | null | null | null |
Source/State/Main_Menu.py
|
LesterYHZ/Super-Mario-Bro-Python-Project
|
2cbcb7ba713a81d37bd1ea16311f15e982a00774
|
[
"MIT"
] | null | null | null |
Source/State/Main_Menu.py
|
LesterYHZ/Super-Mario-Bro-Python-Project
|
2cbcb7ba713a81d37bd1ea16311f15e982a00774
|
[
"MIT"
] | null | null | null |
"""
Main menu set up
"""
import pygame
from .. import Setup
from .. import Tools
from .. import Constant as Con
from ..Components import Info
class MainMenu:
def __init__(self):
self.setup_background()
self.setup_player()
self.setup_cursor()
self.info = Info.Info("Main_Menu")
self.finished = False
self.next = "Load_Screen"
def setup_background(self):
self.background = Setup.GRAPHICS['World_1-1']
self.background_rect = self.background.get_rect()
self.background = pygame.transform.scale(self.background,
(int(self.background_rect.width * Con.BG_MULTI),
int(self.background_rect.height * Con.BG_MULTI)))
self.viewport = Setup.SCREEN.get_rect()
self.caption = Tools.get_image(Setup.GRAPHICS['Title_Screen'],
1,60,176,88,(225,0,220),Con.BG_MULTI)
def setup_player(self):
self.player_img = Tools.get_image(Setup.GRAPHICS['Mario_and_Luigi'],
80,34,16,16,(0,0,0),Con.PLAYER_MULTI)
def setup_cursor(self):
self.cursor = pygame.sprite.Sprite()
self.cursor.image = Tools.get_image(Setup.GRAPHICS['Items_Objects_and_NPCs'],
23,160,9,8,(0,0,0),Con.BG_MULTI)
rect = self.cursor.image.get_rect()
rect.x,rect.y = (220,357)
self.cursor.rect = rect
self.cursor.state = "1P"
def update_cursor(self,keys):
if keys[pygame.K_UP]:
self.cursor.state = "1P"
self.cursor.rect.y = 357
elif keys[pygame.K_DOWN]:
self.cursor.state = "2P"
self.cursor.rect.y = 402
elif keys[pygame.K_RETURN]:
if self.cursor.state == "1P":
self.finished = True
elif self.cursor.state == "2P":
self.finished = True
def update(self,surface,keys):
self.update_cursor(keys)
surface.blit(self.background,self.viewport)
surface.blit(self.caption,(165,100))
surface.blit(self.player_img,(110,498))
surface.blit(self.cursor.image,self.cursor.rect)
self.info.update()
self.info.draw(surface)
| 36.830769
| 99
| 0.552632
| 2,238
| 0.934837
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.056809
|
5a7057c32e096dcc96fd46f2913322b29562d86b
| 634
|
py
|
Python
|
user/models.py
|
ThePokerFaCcCe/teamwork
|
e6d3cfa7821ddba7a122b740e7f5dabb2b1eb316
|
[
"MIT"
] | null | null | null |
user/models.py
|
ThePokerFaCcCe/teamwork
|
e6d3cfa7821ddba7a122b740e7f5dabb2b1eb316
|
[
"MIT"
] | null | null | null |
user/models.py
|
ThePokerFaCcCe/teamwork
|
e6d3cfa7821ddba7a122b740e7f5dabb2b1eb316
|
[
"MIT"
] | null | null | null |
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import AbstractUser
from django.db import models
from user.validators import UsernameValidator
class User(AbstractUser):
username = models.CharField(
_("username"),
max_length=36,
unique=True,
help_text=_(
"Required. 36 characters or fewer. "
"English Letters, digits and one underscore _ only."
),
validators=[UsernameValidator()],
error_messages={
"unique": _("A user with that username already exists."),
},
db_index=True,
)
| 27.565217
| 69
| 0.637224
| 448
| 0.706625
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.235016
|
5a71f92e7f88851d5919ffc0e563e6147877d1d6
| 812
|
py
|
Python
|
Advent2016/6.py
|
SSteve/AdventOfCode
|
aed16209381ccd292fc02008f1f2da5d16ff1a05
|
[
"MIT"
] | null | null | null |
Advent2016/6.py
|
SSteve/AdventOfCode
|
aed16209381ccd292fc02008f1f2da5d16ff1a05
|
[
"MIT"
] | null | null | null |
Advent2016/6.py
|
SSteve/AdventOfCode
|
aed16209381ccd292fc02008f1f2da5d16ff1a05
|
[
"MIT"
] | null | null | null |
from collections import Counter
TEST = """eedadn
drvtee
eandsr
raavrd
atevrs
tsrnev
sdttsa
rasrtv
nssdts
ntnada
svetve
tesnvt
vntsnd
vrdear
dvrsen
enarar"""
def decode(lines: list[str], wantLeast=False):
result = ''
for i in range(len(lines[0])):
count = Counter(line[i] for line in lines)
if wantLeast:
result += count.most_common()[-1][0]
else:
result += count.most_common(1)[0][0]
return result
part1 = decode(TEST.splitlines())
assert part1 == 'easter'
part2 = decode(TEST.splitlines(), True)
assert part2 == 'advent'
with open('6.txt', 'r') as infile:
part1 = decode(infile.read().splitlines())
print(f"Part 1: {part1}")
with open('6.txt', 'r') as infile:
part2 = decode(infile.read().splitlines(), True)
print(f"Part 2: {part2}")
| 18.044444
| 52
| 0.64532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 191
| 0.235222
|
5a7517c33209b1b32f8a9e56da76245b5b0b9793
| 6,246
|
py
|
Python
|
profile_api/views.py
|
csalaman/profiles-rest-api
|
936d2a23fb78144c8e50a8d3de2b94051add49b9
|
[
"MIT"
] | null | null | null |
profile_api/views.py
|
csalaman/profiles-rest-api
|
936d2a23fb78144c8e50a8d3de2b94051add49b9
|
[
"MIT"
] | null | null | null |
profile_api/views.py
|
csalaman/profiles-rest-api
|
936d2a23fb78144c8e50a8d3de2b94051add49b9
|
[
"MIT"
] | null | null | null |
# DRF Views types (APIView & ViewSet)
# APIViews allows to write standard HTTP Methods as functions & give most control over the logic
# Benefits: Perfect for implementing complex logic, calling other APIs, working with local files
# Viewsets -> uses model operations for functions kist, create, retrieve, update, partial_update, destroy
# When to use: simple CRUD interface to database, quick & simple API, little to no customization on the logic, working with standard data structures
# Good to use when: need full control over the logic(complex algo, updating multiple datasources in a single API call),
# processing files and rendering a synchronous response, calling other APIs/services, accessing local files or data
from rest_framework.views import APIView
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
# Import the serializer (app_name/serializers.py)
from profile_api import serializers
from profile_api import models
# Get Auth Token (For user authentication for every request)
from rest_framework.authentication import TokenAuthentication
# Get View Auth Token (for login, etc)
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
# Import permissions
from profile_api import permissions
# Import filters for filtering of data
from rest_framework import filters
class HelloApiView(APIView):
"""Test API View"""
# Config serializer class to use
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP Methods as functions (get, post, patch, put, delete)',
'Similar to traditional Django View',
'Is mapped manually to URLs',
]
# Send Response with list/dictionary of data to include
return Response({'message': "Hello World!", 'an_apiview':an_apiview})
def post(self, request):
"""Create a hello message with our name"""
# Pass request data using to serializer class (param(data=request.data))
serializer = self.serializer_class(data=request.data)
# Check if the request data is valid
if serializer.is_valid():
# Use the serializer method validated_data to get fields of valid request data
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
# Return the serializer errors and response code
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST,
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method':'PUT'})
def patch(self, request, pk=None):
"""Handle a partial update of an object, specify the fields"""
return Response({'method':'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method':'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a message"""
a_viewset = [
'Uses actions (list,create,retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code'
]
return Response({'message':'Hello World', 'a_viewset':a_viewset})
def create(self, request):
"""Create a new message"""
# Pass to serializer & validate
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(serializer.errors,status= status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""Retrieve object by ID"""
return Response({'http_method':'GET'})
def update(self, request, pk=None):
"""Update an object"""
return Response({'http_method':'PUT'})
def partial_update(self,request, pk=None):
"""Partial update on object"""
return Response({'http_method':'PATCH'})
def destroy(self, request, pk=None):
"""Destroy an object"""
return Response({'http_method':'DELETE'})
# Viewset to manage user profiles API
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating user profiles"""
serializer_class = serializers.UserProfileSerializer
# ModelViewSet- provide possible functions for model
queryset = models.UserProfile.objects.all()
# Define authentication(authentication_classes) classes (more types can be added for particular viewset)
authentication_classes = (TokenAuthentication,)
# Define permission(permission_classes), how users will authenticate & can do
permission_classes = (permissions.UpdateOwnProfile,)
# Define filters & searchable fields
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginAPIView(ObtainAuthToken):
"""Handle creating user authentication token"""
# Enable browsable API for testing
renderer_classes = (api_settings.DEFAULT_RENDERER_CLASSES)
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading, and updating profile feed items"""
# Define AUTH
authentication_classes = (TokenAuthentication,)
# Define serializer
serializer_class = serializers.ProfileFeedItemSerializer
# Define possible model functions to manage
queryset = models.ProfileFeedItem.objects.all()
# Define permission for user
permission_classes = (
permissions.UpdateOwnStatus,
IsAuthenticated,
)
# DRF override perform_create
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
| 39.0375
| 148
| 0.693724
| 4,731
| 0.757445
| 0
| 0
| 0
| 0
| 0
| 0
| 2,882
| 0.461415
|
5a75c828e876ed3a1b7b9389dd4545aaaf2d9462
| 466
|
py
|
Python
|
examples/panflute/myemph.py
|
jacobwhall/panflute
|
281ddeaebd2c2c94f457f3da785037cadf69389e
|
[
"BSD-3-Clause"
] | 361
|
2016-04-26T18:23:30.000Z
|
2022-03-24T20:58:18.000Z
|
examples/panflute/myemph.py
|
jacobwhall/panflute
|
281ddeaebd2c2c94f457f3da785037cadf69389e
|
[
"BSD-3-Clause"
] | 164
|
2016-04-27T18:42:55.000Z
|
2022-02-13T23:34:17.000Z
|
examples/panflute/myemph.py
|
jacobwhall/panflute
|
281ddeaebd2c2c94f457f3da785037cadf69389e
|
[
"BSD-3-Clause"
] | 62
|
2016-06-15T13:33:54.000Z
|
2021-11-20T07:33:07.000Z
|
#!/usr/bin/env python
import panflute as pf
"""
Pandoc filter that causes emphasis to be rendered using
the custom macro '\myemph{...}' rather than '\emph{...}'
in latex. Other output formats are unaffected.
"""
def latex(s):
return pf.RawInline(s, format='latex')
def myemph(e, doc):
if type(e)==pf.Emph and doc.format=='latex':
return pf.Span(latex('\\myemph{'), *e.items, latex('}'))
if __name__ == "__main__":
pf.toJSONFilter(myemph)
| 21.181818
| 64
| 0.654506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 227
| 0.487124
|
5a78040379a605d417a65ff4123fa8c2e73e5ad9
| 3,393
|
py
|
Python
|
src/financial_statements/old/balance_sheet.py
|
LeanderLXZ/intelligent-analysis-of-financial-statements
|
38bab5bea3c2f22f71020020c8325f6b6b014853
|
[
"Apache-2.0"
] | null | null | null |
src/financial_statements/old/balance_sheet.py
|
LeanderLXZ/intelligent-analysis-of-financial-statements
|
38bab5bea3c2f22f71020020c8325f6b6b014853
|
[
"Apache-2.0"
] | null | null | null |
src/financial_statements/old/balance_sheet.py
|
LeanderLXZ/intelligent-analysis-of-financial-statements
|
38bab5bea3c2f22f71020020c8325f6b6b014853
|
[
"Apache-2.0"
] | 1
|
2021-12-15T02:09:16.000Z
|
2021-12-15T02:09:16.000Z
|
import time
import threading
import argparse
import tushare as ts
import numpy as np
import pandas as pd
from pandas import datetime as dt
from tqdm import tqdm
from utils import *
with open('../../tushare_token.txt', 'r') as f:
token = f.readline()
ts.set_token(token)
tushare_api = ts.pro_api()
# 股票列表
df_list = []
for list_status in ['L', 'D', 'P']:
df_i = tushare_api.stock_basic(
exchange='',
list_status=list_status,
fields='ts_code')
df_list.append(df_i)
df_all = pd.concat(df_list)
# 资产负债表
df = pd.DataFrame()
for ts_code in tqdm(df_all['ts_code'].values):
df_i = safe_get(
tushare_api.balancesheet,
ts_code=ts_code,
fields=
'ts_code, ann_date, f_ann_date, end_date, report_type, comp_type,'
'total_share, cap_rese, undistr_porfit, surplus_rese, special_rese,'
'money_cap, trad_asset, notes_receiv, accounts_receiv, oth_receiv,'
'prepayment, div_receiv, int_receiv, inventories, amor_exp,'
'nca_within_1y, sett_rsrv, loanto_oth_bank_fi, premium_receiv,'
'reinsur_receiv, reinsur_res_receiv, pur_resale_fa, oth_cur_assets,'
'total_cur_assets, fa_avail_for_sale, htm_invest, lt_eqt_invest,'
'invest_real_estate, time_deposits, oth_assets, lt_rec, fix_assets,'
'cip, const_materials, fixed_assets_disp, produc_bio_assets,'
'oil_and_gas_assets, intan_assets, r_and_d, goodwill, lt_amor_exp,'
'defer_tax_assets, decr_in_disbur, oth_nca, total_nca, cash_reser_cb,'
'depos_in_oth_bfi, prec_metals, deriv_assets, rr_reins_une_prem,'
'rr_reins_outstd_cla, rr_reins_lins_liab, rr_reins_lthins_liab,'
'refund_depos, ph_pledge_loans, refund_cap_depos, indep_acct_assets,'
'client_depos, client_prov, transac_seat_fee, invest_as_receiv,'
'total_assets, lt_borr, st_borr, cb_borr, depos_ib_deposits,'
'loan_oth_bank, trading_fl, notes_payable, acct_payable, adv_receipts,'
'sold_for_repur_fa, comm_payable, payroll_payable, taxes_payable,'
'int_payable, div_payable, oth_payable, acc_exp, deferred_inc,'
'st_bonds_payable, payable_to_reinsurer, rsrv_insur_cont,'
'acting_trading_sec, acting_uw_sec, non_cur_liab_due_1y, oth_cur_liab,'
'total_cur_liab, bond_payable, lt_payable, specific_payables,'
'estimated_liab, defer_tax_liab, defer_inc_non_cur_liab, oth_ncl,'
'total_ncl, depos_oth_bfi, deriv_liab, depos, agency_bus_liab,'
'oth_liab, prem_receiv_adva, depos_received, ph_invest, reser_une_prem,'
'reser_outstd_claims, reser_lins_liab, reser_lthins_liab,'
'indept_acc_liab, pledge_borr, indem_payable, policy_div_payable,'
'total_liab, treasury_share, ordin_risk_reser, forex_differ,'
'invest_loss_unconf, minority_int, total_hldr_eqy_exc_min_int,'
'total_hldr_eqy_inc_min_int, total_liab_hldr_eqy, lt_payroll_payable,'
'oth_comp_income, oth_eqt_tools, oth_eqt_tools_p_shr, lending_funds,'
'acc_receivable, st_fin_payable, payables, hfs_assets, hfs_sales,'
'update_flag'
)
df_i = df_i.drop_duplicates()
df_i = df_i.reindex(index=df_i.index[::-1])
df_i.insert(0, 'code', [c[:6] for c in df_i['ts_code']])
df = df.append(df_i)
df = df.reset_index(drop=True)
df.to_csv('../../data/financial_statements/balance_sheet.csv', index=False)
| 44.644737
| 80
| 0.72178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,258
| 0.661976
|
5a79960fc035f3d47bd3d6b6b9332c5bd900eee5
| 1,208
|
py
|
Python
|
examples/wsgi/test.py
|
gelnior/couchdbkit
|
8277d6ffd00553ae0b0b2368636460d40f8d8225
|
[
"MIT"
] | 51
|
2015-04-01T14:53:46.000Z
|
2022-03-16T09:16:10.000Z
|
examples/wsgi/test.py
|
gelnior/couchdbkit
|
8277d6ffd00553ae0b0b2368636460d40f8d8225
|
[
"MIT"
] | 17
|
2015-02-04T11:25:02.000Z
|
2021-07-10T10:17:53.000Z
|
examples/wsgi/test.py
|
gelnior/couchdbkit
|
8277d6ffd00553ae0b0b2368636460d40f8d8225
|
[
"MIT"
] | 40
|
2015-01-13T23:38:01.000Z
|
2022-02-26T22:08:01.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008,2009 Benoit Chesneau <benoitc@e-engura.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import couchdbkit
from couchdbkit.contrib import WSGIHandler
import json
def app(environ, start_response):
"""Simplest possible application object"""
data = 'Hello, World!\n DB Infos : %s\n' % json.dumps(environ["COUCHDB_INFO"])
status = '200 OK'
response_headers = [
('Content-type','text/plain'),
('Content-Length', len(data))
]
start_response(status, response_headers)
return [data]
def main():
handler = WSGIHandler(app)
handler.run()
if __name__ == "__main__":
main()
| 30.974359
| 83
| 0.693709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 799
| 0.661424
|
5a7ade7264494768c161fd0f8d10b792225101d5
| 2,480
|
py
|
Python
|
src/comments/api/views/DetailAPIView.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | null | null | null |
src/comments/api/views/DetailAPIView.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | 4
|
2021-03-30T12:35:36.000Z
|
2021-06-10T18:11:24.000Z
|
src/comments/api/views/DetailAPIView.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | 2
|
2021-02-07T16:16:36.000Z
|
2021-07-13T05:26:51.000Z
|
from django.views import View
from comments.models import Comment
from django.http import JsonResponse
from utils.decorators import fail_safe_api
from utils.models import nested_model_to_dict
from utils.request import parse_body, set_user
from django.contrib.contenttypes.models import ContentType
class DetailAPIView(View):
def dispatch(self, request, *args, **kwargs):
set_user(request)
if request.user.is_authenticated:
parse_body(request, for_method=request.method)
return super(DetailAPIView, self).dispatch(request, *args, **kwargs)
@fail_safe_api(for_model=Comment)
def get(self, request, slug):
comment = Comment.objects.get(slug=slug)
comment_dict = nested_model_to_dict(comment)
comment_dict['content_object'] = nested_model_to_dict(comment.content_object)
content = {
"status": 200,
"data" : {
"comment": comment_dict
},
"meta": {
"count" : 1
}
}
return JsonResponse(content)
@fail_safe_api(for_model=Comment, needs_authentication=True)
def post(self, request, slug):
parent = Comment.objects.get(slug=slug)
content_object = parent.content_object
content_type = parent.content_type
content = request.POST.get('content', '')
object_id = content_object.id
created_comment = Comment.objects.create(user=request.user, content=content,content_object=content_object,
content_type=content_type, parent=parent, object_id=object_id)
content = {
"status": 200,
"data" : {
"comment": nested_model_to_dict(created_comment)
},
"message" : 'created',
"meta": {
"count" : 1
}
}
return JsonResponse(content)
@fail_safe_api(for_model=Comment, needs_authentication=True)
def delete(self, request, slug):
comment = Comment.objects.get(slug=slug)
is_parent = comment.is_parent
parent = comment.content_object if is_parent else comment.parent
comment.delete()
content = {
"status" : 200,
"data" : {
"is_parent" : is_parent,
"parent_slug" : parent.slug,
},
"message" : "comment deleted"
}
return JsonResponse(content)
| 26.666667
| 114
| 0.604435
| 2,178
| 0.878226
| 0
| 0
| 1,875
| 0.756048
| 0
| 0
| 181
| 0.072984
|
5a7b8772eb3240b031d703bd91a985fdc85cecd0
| 2,857
|
py
|
Python
|
src/router.py
|
mix2zeta/social-d
|
923cc2b224470e940ae6ac9cc712adb685c1b216
|
[
"MIT"
] | null | null | null |
src/router.py
|
mix2zeta/social-d
|
923cc2b224470e940ae6ac9cc712adb685c1b216
|
[
"MIT"
] | null | null | null |
src/router.py
|
mix2zeta/social-d
|
923cc2b224470e940ae6ac9cc712adb685c1b216
|
[
"MIT"
] | 1
|
2021-03-11T09:07:11.000Z
|
2021-03-11T09:07:11.000Z
|
from aiohttp import web
import urllib.parse
from conf import settings
ROUTER = {
"poke_task": {
"url": "/poke",
"GET": "request_handle.poke_task",
"POST": "request_handle.poke_task",
},
"task": {
"url": "/task/{task_id}",
"GET": "request_handle.get_task",
},
"message": {
"url": "/message/{msg_id}",
"GET": "request_handle.get_message_by_id"
},
"message-daily": {
"url": "/date/{from}/{to}/message/daily",
"GET": "request_handle.get_daily_message_count"
},
"message-top": {
"url": "/date/{from}/{to}/message/top",
"GET": "request_handle.get_account_by_message"
},
"message-engagement": {
"url": "/date/{from}/{to}/message/engagement",
"GET": "request_handle.get_message_by_engagement"
},
"wordcloud":{
"url": "/date/{from}/{to}/message/{cloud_type}",
"GET": "request_handle.get_word_cloud"
},
}
def generate_routes() -> list:
routes = []
for key, value in ROUTER.items():
if "GET" in value:
handler = value["GET"]
routes.append(
web.get(value["url"], object_at_end_of_path(handler), name=f"get-{key}")
)
if "PUT" in value:
handler = value["PUT"]
routes.append(
web.put(value["url"], object_at_end_of_path(handler), name=f"put-{key}")
)
if "POST" in value:
handler = value["POST"]
routes.append(
web.post(
value["url"], object_at_end_of_path(handler), name=f"post-{key}"
)
)
if "DELETE" in value:
handler = value["DELETE"]
routes.append(
web.delete(
value["url"], object_at_end_of_path(handler), name=f"delete-{key}"
)
)
return routes
def reverse(name: str, **kwargs) -> str:
return urllib.parse.urljoin(
settings.BASE_URL,
urllib.parse.quote_plus("." + ROUTER[name]["url"].format(**kwargs), safe="/"),
)
def object_at_end_of_path(path):
"""Attempt to return the Python object at the end of the dotted
path by repeated imports and attribute access.
"""
access_path = path.split(".")
module = None
for index in range(1, len(access_path)):
try:
# import top level module
module_name = ".".join(access_path[:-index])
module = __import__(module_name)
except ImportError:
continue
else:
for step in access_path[1:-1]: # walk down it
module = getattr(module, step)
break
if module:
return getattr(module, access_path[-1])
else:
return globals()["__builtins__"][path]
| 28.287129
| 88
| 0.533427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 921
| 0.322366
|
5a7f094b28c04c830704df3edc53f45db870422e
| 3,668
|
py
|
Python
|
golly_python/manager.py
|
golly-splorts/golly-python
|
54bc277cc2aed9f35b67a6f8de1d468d9893440c
|
[
"MIT"
] | null | null | null |
golly_python/manager.py
|
golly-splorts/golly-python
|
54bc277cc2aed9f35b67a6f8de1d468d9893440c
|
[
"MIT"
] | null | null | null |
golly_python/manager.py
|
golly-splorts/golly-python
|
54bc277cc2aed9f35b67a6f8de1d468d9893440c
|
[
"MIT"
] | null | null | null |
import json
from .life import BinaryLife
class GOL(object):
team_names: list = []
columns = 0
rows = 0
def __init__(self, **kwargs):
self.load_config(**kwargs)
self.create_life()
def __repr__(self):
s = []
s.append("+" + "-" * (self.columns) + "+")
for i in range(self.rows):
row = "|"
for j in range(self.columns):
if self.life.is_alive(j, i):
color = self.life.get_cell_color(j, i)
if color == 1:
row += "#"
elif color == 2:
row += "o"
else:
row += "?"
else:
row += "."
row += "|"
s.append(row)
s.append("+" + "-" * (self.columns) + "+")
rep = "\n".join(s)
rep += "\n"
livecounts = self.count()
rep += "\nGeneration: %d" % (self.generation)
rep += "\nLive cells, color 1: %d" % (livecounts["liveCells1"])
rep += "\nLive cells, color 2: %d" % (livecounts["liveCells2"])
rep += "\nLive cells, total: %d" % (livecounts["liveCells"])
rep += "\nVictory Percent: %0.1f %%" % (livecounts["victoryPct"])
rep += "\nCoverage: %0.2f %%" % (livecounts["coverage"])
rep += "\nTerritory, color 1: %0.2f %%" % (livecounts["territory1"])
rep += "\nTerritory, color 2: %0.2f %%" % (livecounts["territory2"])
return rep
def load_config(self, **kwargs):
"""Load configuration from user-provided input params"""
if "s1" in kwargs and "s2" in kwargs:
self.ic1 = kwargs["s1"]
self.ic2 = kwargs["s2"]
else:
raise Exception("ERROR: s1 and s2 parameters must both be specified")
if "rows" in kwargs and "columns" in kwargs:
self.rows = kwargs["rows"]
self.columns = kwargs["columns"]
else:
raise Exception(
"ERROR: rows and columns parameters must be provided to GOL constructor"
)
if "team1" in kwargs and "team2" in kwargs:
self.team_names = [kwargs["team1"], kwargs["team2"]]
else:
self.team_names = ["Team 1", "Team 2"]
# Whether to stop when a victor is detected
if "halt" in kwargs:
self.halt = kwargs["halt"]
else:
self.halt = True
self.found_victor = False
# Neighbor color legacy mode was used in Seasons 1-3
if "neighbor_color_legacy_mode" in kwargs:
self.neighbor_color_legacy_mode = kwargs["neighbor_color_legacy_mode"]
else:
self.neighbor_color_legacy_mode = False
def create_life(self):
try:
s1 = json.loads(self.ic1)
except json.decoder.JSONDecodeError:
err = "Error: Could not load data as json:\n"
err += self.ic1
raise Exception(err)
try:
s2 = json.loads(self.ic2)
except json.decoder.JSONDecodeError:
err = "Error: Could not load data as json:\n"
err += self.ic1
raise Exception(err)
self.life = BinaryLife(
s1,
s2,
self.rows,
self.columns,
self.neighbor_color_legacy_mode
)
def next_step(self):
return self.life.next_step()
def count(self):
return self.life.get_stats()
@property
def running(self):
return self.life.running
@property
def generation(self):
return self.life.generation
| 30.823529
| 88
| 0.507361
| 3,624
| 0.988004
| 0
| 0
| 136
| 0.037077
| 0
| 0
| 848
| 0.231189
|
5a7f42aae312bdb1dfd1e806bfb1013a4638beeb
| 48
|
py
|
Python
|
surge_multiplier_mdp/__init__.py
|
mbattifarano/surge-multiplier-mdp
|
8a8477662a2a9b7daa7acb8b8cf486bef0ec8c05
|
[
"MIT"
] | null | null | null |
surge_multiplier_mdp/__init__.py
|
mbattifarano/surge-multiplier-mdp
|
8a8477662a2a9b7daa7acb8b8cf486bef0ec8c05
|
[
"MIT"
] | null | null | null |
surge_multiplier_mdp/__init__.py
|
mbattifarano/surge-multiplier-mdp
|
8a8477662a2a9b7daa7acb8b8cf486bef0ec8c05
|
[
"MIT"
] | null | null | null |
from .mdp_value_iteration import value_iteration
| 48
| 48
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5a7f6cebc7d1d5a0a12a5527001bd5fbb8d22d54
| 568
|
py
|
Python
|
DiplomaProject/office/admin.py
|
iamgo100/diploma
|
fc7314468631bf43774b4678890d2a315658713c
|
[
"MIT"
] | null | null | null |
DiplomaProject/office/admin.py
|
iamgo100/diploma
|
fc7314468631bf43774b4678890d2a315658713c
|
[
"MIT"
] | null | null | null |
DiplomaProject/office/admin.py
|
iamgo100/diploma
|
fc7314468631bf43774b4678890d2a315658713c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Shift, Service, Appointment
class ShiftAdmin(admin.ModelAdmin):
fields = ['date', 'master', 'room']
list_display = ('date', 'master', 'status')
class ServicetAdmin(admin.ModelAdmin):
list_display = ('service_name', 'cost', 'duration', 'room')
class AppointmentAdmin(admin.ModelAdmin):
list_display = ('service', 'client', 'date', 'time', 'shift')
admin.site.register(Shift, ShiftAdmin)
admin.site.register(Service, ServicetAdmin)
admin.site.register(Appointment, AppointmentAdmin)
| 35.5
| 66
| 0.713028
| 336
| 0.591549
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.200704
|
5a7fe776654c20e1290bc4e948072b1dcc063b7e
| 2,007
|
py
|
Python
|
util/query_jmx.py
|
perfsonar/esmond
|
391939087321c1438d54cdadee3eb936b95f3e92
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-10-23T01:10:19.000Z
|
2022-03-26T18:40:44.000Z
|
util/query_jmx.py
|
perfsonar/esmond
|
391939087321c1438d54cdadee3eb936b95f3e92
|
[
"BSD-3-Clause-LBNL"
] | 23
|
2018-12-05T20:30:04.000Z
|
2020-11-11T19:20:57.000Z
|
util/query_jmx.py
|
perfsonar/esmond
|
391939087321c1438d54cdadee3eb936b95f3e92
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-02-11T20:40:41.000Z
|
2022-03-26T18:40:50.000Z
|
#!/usr/bin/env python3
"""
Code to issue calls to the cassandra MX4J http server and get stats.
"""
import os
import sys
from optparse import OptionParser
from esmond.api.client.jmx import CassandraJMX
def main():
usage = '%prog [ -U ]'
parser = OptionParser(usage=usage)
parser.add_option('-U', '--url', metavar='URL',
type='string', dest='url', default='http://localhost:8081',
help='URL:port to cassandra mx4j server (default=%default).')
parser.add_option('-v', '--verbose',
dest='verbose', action='count', default=False,
help='Verbose output - -v, -vv, etc.')
options, args = parser.parse_args()
cjmx = CassandraJMX(options.url)
print('Heap mem:', cjmx.get_heap_memory())
print('Non-heap mem:', cjmx.get_non_heap_memory())
print('Read latency:', cjmx.get_read_latency())
print('Write latency:', cjmx.get_write_latency())
print('Range latency:', cjmx.get_range_latency())
print('GC count:', cjmx.get_gc_count())
print('GC time:', cjmx.get_gc_time())
print('Active read tasks:', cjmx.get_read_active())
print('Pending read tasks:', cjmx.get_read_pending())
print('Completed read tasks:', cjmx.get_read_completed())
print('Active write tasks:', cjmx.get_write_active())
print('Pending write tasks:', cjmx.get_write_pending())
print('Completed write tasks:',cjmx.get_write_completed())
print('Active gossip tasks:', cjmx.get_gossip_active())
print('Pending gossip tasks:', cjmx.get_gossip_pending())
print('Completed gossip tasks:',cjmx.get_gossip_completed())
print('OS load:', cjmx.get_os_load())
print('OS free mem:', cjmx.get_os_free_memory())
print('OS free swap:', cjmx.get_os_free_swap())
print('OS committed virtual mem:', cjmx.get_os_committed_virtual_memory())
print('Pending compaction', cjmx.get_compaction_pending())
print('Completed compaction', cjmx.get_compaction_complete())
if __name__ == '__main__':
main()
| 38.596154
| 78
| 0.678127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 695
| 0.346288
|
5a8074c85da0b1531e270b6b0eaa82126e705010
| 1,294
|
py
|
Python
|
apps/accounts/management/commands/amend_hostingproviders_stats.py
|
BR0kEN-/admin-portal
|
0c38dc0d790031f45bf07660bce690e972fe2858
|
[
"Apache-2.0"
] | null | null | null |
apps/accounts/management/commands/amend_hostingproviders_stats.py
|
BR0kEN-/admin-portal
|
0c38dc0d790031f45bf07660bce690e972fe2858
|
[
"Apache-2.0"
] | null | null | null |
apps/accounts/management/commands/amend_hostingproviders_stats.py
|
BR0kEN-/admin-portal
|
0c38dc0d790031f45bf07660bce690e972fe2858
|
[
"Apache-2.0"
] | null | null | null |
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
help = "Add missing id column for hostingstats."
def handle(self, *args, **options):
with connection.cursor() as cursor:
self.cursor = cursor
self.cursor.execute(
"""
START TRANSACTION;
CREATE TABLE `hostingproviders_stats_copy` (
`id` INT(11) primary key Not null auto_increment,
`id_hp` Int( 11 ) NOT NULL,
`green_checks` Int( 11 ) NOT NULL,
`green_domains` Int( 11 ) NOT NULL,
CONSTRAINT `id_hp` UNIQUE( `id_hp` ) )
CHARACTER SET = latin1
COLLATE = latin1_swedish_ci
ENGINE = InnoDB;
-------------------------------------------------------------
INSERT into hostingproviders_stats_copy(id_hp, green_checks, green_domains)
SELECT id_hp, green_checks, green_domains FROM hostingproviders_stats;
DROP table hostingproviders_stats;
ALTER table hostingproviders_stats_copy rename to hostingproviders_stats;
COMMIT;
"""
)
| 38.058824
| 91
| 0.532457
| 1,206
| 0.931994
| 0
| 0
| 0
| 0
| 0
| 0
| 985
| 0.761206
|
5a80b2e184b51cbc11327bc99c0e1506a3d4bc1b
| 2,493
|
py
|
Python
|
src/brain_atlas/diff_exp.py
|
MacoskoLab/brain-atlas
|
6db385435ea1a6e96fd019963b4f7e23148a7b9a
|
[
"MIT"
] | 2
|
2022-01-21T19:13:35.000Z
|
2022-03-24T07:46:57.000Z
|
src/brain_atlas/diff_exp.py
|
MacoskoLab/brain-atlas
|
6db385435ea1a6e96fd019963b4f7e23148a7b9a
|
[
"MIT"
] | null | null | null |
src/brain_atlas/diff_exp.py
|
MacoskoLab/brain-atlas
|
6db385435ea1a6e96fd019963b4f7e23148a7b9a
|
[
"MIT"
] | null | null | null |
import numba as nb
import numpy as np
import scipy.stats
@nb.njit(parallel=True)
def tiecorrect(rankvals):
"""
parallelized version of scipy.stats.tiecorrect
:param rankvals: p x n array of ranked data (output of rankdata function)
"""
tc = np.ones(rankvals.shape[1], dtype=np.float64)
for j in nb.prange(rankvals.shape[1]):
arr = np.sort(np.ravel(rankvals[:, j]))
idx = np.nonzero(
np.concatenate((np.array([True]), arr[1:] != arr[:-1], np.array([True])))
)[0]
t_k = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
if size >= 2:
tc[j] = 1.0 - (t_k ** 3 - t_k).sum() / (size ** 3 - size)
return tc
@nb.njit(parallel=True)
def rankdata(data):
"""
parallelized version of scipy.stats.rankdata
:param data: p x n array of data to rank, column-wise
"""
ranked = np.empty(data.shape, dtype=np.float64)
for j in nb.prange(data.shape[1]):
arr = np.ravel(data[:, j])
sorter = np.argsort(arr)
arr = arr[sorter]
obs = np.concatenate((np.array([True]), arr[1:] != arr[:-1]))
dense = np.empty(obs.size, dtype=np.int64)
dense[sorter] = obs.cumsum()
# cumulative counts of each unique value
count = np.concatenate((np.nonzero(obs)[0], np.array([len(obs)])))
ranked[:, j] = 0.5 * (count[dense] + count[dense - 1] + 1)
return ranked
def mannwhitneyu(x, y, use_continuity=True):
"""Version of Mann-Whitney U-test that runs in parallel on 2d arrays
This is the two-sided test, asymptotic algo only. Returns log p-values
"""
x = np.asarray(x)
y = np.asarray(y)
assert x.shape[1] == y.shape[1]
n1 = x.shape[0]
n2 = y.shape[0]
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[:n1, :] # get the x-ranks
u1 = n1 * n2 + (n1 * (n1 + 1)) / 2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1 * n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
# if *everything* is identical we'll raise an error, not otherwise
if np.all(T == 0):
raise ValueError("All numbers are identical in mannwhitneyu")
sd = np.sqrt(T * n1 * n2 * (n1 + n2 + 1) / 12.0)
meanrank = n1 * n2 / 2.0 + 0.5 * use_continuity
bigu = np.maximum(u1, u2)
with np.errstate(divide="ignore", invalid="ignore"):
z = (bigu - meanrank) / sd
logp = np.minimum(scipy.stats.norm.logsf(z) + np.log(2), 0)
return u2, logp
| 29.329412
| 85
| 0.584436
| 0
| 0
| 0
| 0
| 1,378
| 0.552748
| 0
| 0
| 630
| 0.252708
|
5a81a24952b6eed80c202bd9ff7db7e295855534
| 2,088
|
py
|
Python
|
piece.py
|
brouxco/quarto-solver
|
12ae87f43d4a80137cb4394de9c399d8f9894da3
|
[
"0BSD"
] | null | null | null |
piece.py
|
brouxco/quarto-solver
|
12ae87f43d4a80137cb4394de9c399d8f9894da3
|
[
"0BSD"
] | null | null | null |
piece.py
|
brouxco/quarto-solver
|
12ae87f43d4a80137cb4394de9c399d8f9894da3
|
[
"0BSD"
] | null | null | null |
class Piece(object):
def __init__(self,
is_tall: bool = True,
is_dark: bool = True,
is_square: bool = True,
is_solid: bool = True,
string: str = None):
if string:
self.is_tall = (string[0] == "1")
self.is_dark = (string[1] == "1")
self.is_square = (string[2] == "1")
self.is_solid = (string[3] == "1")
else:
self.is_tall = is_tall
self.is_dark = is_dark
self.is_square = is_square
self.is_solid = is_solid
def __str__(self):
return "{0}{1}{2}{3}".format(
'1' if self.is_tall else '0',
'1' if self.is_dark else '0',
'1' if self.is_square else '0',
'1' if self.is_solid else '0'
)
def __hash__(self):
res = 0
res += 1 if self.is_tall else 0
res += 2 if self.is_dark else 0
res += 4 if self.is_square else 0
res += 8 if self.is_solid else 0
return res
def __eq__(self, other_piece):
if not isinstance(other_piece, type(self)):
return False
return self.__hash__() == other_piece.__hash__()
def has_in_common_with(self, *other_pieces):
all_pieces_are_as_tall = True
all_pieces_are_as_dark = True
all_pieces_are_as_square = True
all_pieces_are_as_solid = True
for p in other_pieces:
if not(self.is_tall == p.is_tall):
all_pieces_are_as_tall = False
if not(self.is_dark == p.is_dark):
all_pieces_are_as_dark = False
if not(self.is_square == p.is_square):
all_pieces_are_as_square = False
if not(self.is_solid == p.is_solid):
all_pieces_are_as_solid = False
return (all_pieces_are_as_tall
or all_pieces_are_as_dark
or all_pieces_are_as_square
or all_pieces_are_as_solid)
if __name__ == "__main__":
pass
| 32.625
| 56
| 0.531609
| 2,049
| 0.981322
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 0.028736
|
5a81e0954b1a9e5e3552a3af4e53c8b36b9c007f
| 21,061
|
py
|
Python
|
tests/test_build_docs.py
|
simon-ritchie/action-py-script
|
f502ede320089562d77d13231e85e65b9de64938
|
[
"MIT"
] | null | null | null |
tests/test_build_docs.py
|
simon-ritchie/action-py-script
|
f502ede320089562d77d13231e85e65b9de64938
|
[
"MIT"
] | 16
|
2021-02-13T05:19:16.000Z
|
2021-02-23T11:40:18.000Z
|
tests/test_build_docs.py
|
simon-ritchie/action-py-script
|
f502ede320089562d77d13231e85e65b9de64938
|
[
"MIT"
] | null | null | null |
import hashlib
import os
import shutil
from random import randint
from typing import List
from retrying import retry
import build_docs
from apysc._file import file_util
from build_docs import _CodeBlock
from build_docs import _CodeBlockFlake8Error
from build_docs import _CodeBlockMypyError
from build_docs import _CodeBlockNumdoclintError
from build_docs import _RunReturnData
from build_docs import _ScriptData
from tests.testing_helper import assert_attrs
from tests.testing_helper import assert_raises
_CHECKOUT_FILE_PATHS: List[str] = [
'docs_src/hashed_vals/stage.md',
]
def teardown() -> None:
"""
The function would be called when the test ended.
"""
for checkout_file_path in _CHECKOUT_FILE_PATHS:
os.system(f'git checkout {checkout_file_path}')
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__replace_static_path_recursively() -> None:
tmp_dir_1: str = '../.tmp_test_build_docs/'
shutil.rmtree(tmp_dir_1, ignore_errors=True)
tmp_dir_2: str = os.path.join(tmp_dir_1, 'subdir/')
os.makedirs(tmp_dir_2, exist_ok=True)
html_path: str = os.path.join(tmp_dir_1, 'test.html')
with open(html_path, 'w') as f:
f.write(
'<link rel="stylesheet" type="text/css" '
'href="_static/groundwork.css" />')
js_path: str = os.path.join(tmp_dir_2, 'test.js')
with open(js_path, 'w') as f:
f.write('"_static/groundwork.css"')
pkl_path: str = os.path.join(tmp_dir_1, 'test.pkl')
with open(pkl_path, 'w') as f:
f.write('')
jslib_path: str = os.path.join(tmp_dir_1, 'jquery.min.js')
with open(jslib_path, 'w') as f:
f.write('"_static/groundwork.css"')
build_docs._replace_static_path_recursively(dir_path=tmp_dir_1)
target_file_paths: List[str] = [html_path, js_path]
for target_file_path in target_file_paths:
with open(target_file_path) as f:
file_txt: str = f.read()
assert '_static' not in file_txt
assert 'static' in file_txt
with open(jslib_path) as f:
file_txt = f.read()
assert '_static' in file_txt
shutil.rmtree(tmp_dir_1, ignore_errors=True)
class Test_CodeBlock:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test___init__(self) -> None:
code_block: _CodeBlock = _CodeBlock(
code_type='py', code='print(100)', runnable=True)
assert_attrs(
expected_attrs={
'code_type': 'py',
'code': 'print(100)',
'runnable': True
},
any_obj=code_block)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__get_code_blocks_from_txt() -> None:
md_txt: str = (
'Hello'
'\n\n```py'
'\nprint(100)'
'\nprint(200)'
'\n```'
'\n\nWorld'
'\n```py'
'\n# runnable'
'\nprint(300)'
'\n```'
'\n'
'\n```'
'\n$ ls -l'
'\n```'
)
code_blocks: List[_CodeBlock] = build_docs._get_code_blocks_from_txt(
md_txt=md_txt)
assert len(code_blocks) == 3
assert code_blocks[0].code_type == 'py'
assert code_blocks[0].code == 'print(100)\nprint(200)'
assert not code_blocks[0].runnable
assert code_blocks[1].code == 'print(300)'
assert code_blocks[1].runnable
assert code_blocks[2].code_type == ''
assert code_blocks[2].code == '$ ls -l'
assert not code_blocks[2].runnable
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__replace_html_saving_export_path_by_doc_path() -> None:
code: str = """from apysc import Stage
from apysc import save_overall_html
stage = Stage(stage_width=300, stage_height=180, background_color='#333')
save_overall_html(
dest_dir_path='./quick_start_stage_creation')"""
code = build_docs._replace_html_saving_export_path_by_doc_path(code=code)
expected: str = """save_overall_html(
dest_dir_path='./docs_src/source/_static/quick_start_stage_creation/')"""
assert expected in code
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__get_runnable_scripts_in_md_code_blocks() -> None:
tmp_md_file_path: str = (
'../tmp_test__get_runnable_scripts_in_md_code_blocks.md')
md_txt: str = """Hello
```py
print(100)
```
World!
```py
# runnable
print(200)
save_overall_html(
dest_dir_path='quick_start_stage_creation/')
```
```
# runnable
print(300)
```
"""
with open(tmp_md_file_path, 'w') as f:
f.write(md_txt)
runnable_scripts: List[str] = \
build_docs._get_runnable_scripts_in_md_code_blocks(
md_file_path=tmp_md_file_path)
assert len(runnable_scripts) == 1
assert runnable_scripts == (
['print(200)'
'\nsave_overall_html('
"\n dest_dir_path='./docs_src/source/_static/"
"quick_start_stage_creation/',"
"\n js_lib_dir_path='../', skip_js_lib_exporting=True)"
]
)
file_util.remove_file_if_exists(file_path=tmp_md_file_path)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__exec_document_lint_and_script() -> None:
hash_file_path: str = os.path.join(
build_docs.HASHED_VALS_DIR_PATH,
'quick_start.md',
)
file_util.remove_file_if_exists(file_path=hash_file_path)
executed_scripts: List[str] = build_docs._exec_document_lint_and_script(
limit_count=10)
assert len(executed_scripts) <= 10
for executed_script in executed_scripts:
if 'save_overall_html' not in executed_script:
continue
assert './docs_src/source/_static/' in executed_script
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__remove_runnable_inline_comment_from_code_blocks() -> None:
tmp_dir_path: str = '../tmp_test_build_docs/'
shutil.rmtree(tmp_dir_path, ignore_errors=True)
tmp_subdir_path: str = os.path.join(tmp_dir_path, 'subdir/')
os.makedirs(tmp_subdir_path)
tmp_html_path_1: str = os.path.join(tmp_dir_path, 'tmp_1.html')
tmp_html_path_2: str = os.path.join(tmp_subdir_path, 'tmp_2.html')
tmp_txt_path_1: str = os.path.join(tmp_dir_path, 'tmp_1.txt')
html_txt: str = (
'<span>a</span>'
'<span></span><span class="c1"># runnable</span>'
'\n<span>b</span>'
)
for file_path in (tmp_html_path_1, tmp_html_path_2, tmp_txt_path_1):
with open(file_path, 'w') as f:
f.write(html_txt)
build_docs._remove_runnable_inline_comment_from_code_blocks(
dir_path=tmp_dir_path)
expected: str = (
'<span>a</span>'
'\n<span>b</span>'
)
for file_path in (tmp_html_path_1, tmp_html_path_2):
with open(file_path) as f:
txt: str = f.read()
assert txt == expected
with open(tmp_txt_path_1) as f:
txt = f.read()
assert txt == html_txt
shutil.rmtree(tmp_dir_path, ignore_errors=True)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__read_md_file_hashed_val_from_file() -> None:
tmp_hash_file_path: str = '../tmp_test_build_docs_1.md'
file_util.remove_file_if_exists(file_path=tmp_hash_file_path)
hashed_val: str = build_docs._read_md_file_hashed_val_from_file(
hash_file_path=tmp_hash_file_path)
assert hashed_val == ''
file_util.save_plain_txt(txt='1234567890', file_path=tmp_hash_file_path)
hashed_val = build_docs._read_md_file_hashed_val_from_file(
hash_file_path=tmp_hash_file_path)
assert hashed_val == '1234567890'
file_util.remove_file_if_exists(file_path=tmp_hash_file_path)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__read_md_file_and_hash_txt() -> None:
tmp_file_path: str = '../test_build_docs_2.md'
file_util.save_plain_txt(
txt='1234567890', file_path=tmp_file_path)
hashed_val: str = build_docs._read_md_file_and_hash_txt(
md_file_path=tmp_file_path)
assert hashed_val == hashlib.sha1('1234567890'.encode()).hexdigest()
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__get_md_under_source_file_path() -> None:
under_source_file_path: str = build_docs._get_md_under_source_file_path(
md_file_path='./doc_src/source/any/path.md')
assert under_source_file_path == 'any/path.md'
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__slice_md_file_by_hashed_val() -> None:
original_hashed_vals_dir_path: str = build_docs.HASHED_VALS_DIR_PATH
build_docs.HASHED_VALS_DIR_PATH = '../tmp_test_build_docs_3/hashed_vals/'
shutil.rmtree(build_docs.HASHED_VALS_DIR_PATH, ignore_errors=True)
os.makedirs(build_docs.HASHED_VALS_DIR_PATH, exist_ok=True)
tmp_hash_file_path_1: str = os.path.join(
build_docs.HASHED_VALS_DIR_PATH,
'tmp_1.md'
)
file_util.save_plain_txt(
txt=hashlib.sha1('0123'.encode()).hexdigest(),
file_path=tmp_hash_file_path_1)
tmp_hash_file_path_2: str = os.path.join(
build_docs.HASHED_VALS_DIR_PATH,
'tmp_2.md'
)
file_util.save_plain_txt(
txt=hashlib.sha1('4567'.encode()).hexdigest(),
file_path=tmp_hash_file_path_2)
tmp_src_dir_path: str = '../tmp_test_build_docs_4/source/'
shutil.rmtree(tmp_src_dir_path, ignore_errors=True)
os.makedirs(tmp_src_dir_path, exist_ok=True)
tmp_md_file_path_1: str = os.path.join(
tmp_src_dir_path, 'tmp_1.md')
tmp_md_file_path_2: str = os.path.join(
tmp_src_dir_path, 'tmp_2.md')
tmp_md_file_path_3: str = os.path.join(
tmp_src_dir_path, 'tmp_3.md')
tmp_md_file_path_4: str = os.path.join(
build_docs.HASHED_VALS_DIR_PATH, 'tmp_4.md')
md_file_paths: List[str] = [
tmp_md_file_path_1,
tmp_md_file_path_2,
tmp_md_file_path_3,
tmp_md_file_path_4,
]
with open(tmp_md_file_path_1, 'w') as f:
f.write('0123')
with open(tmp_md_file_path_2, 'w') as f:
f.write('0000')
with open(tmp_md_file_path_3, 'w') as f:
f.write('890')
sliced_md_file_paths: List[str]
hashed_vals: List[str]
sliced_md_file_paths, hashed_vals = \
build_docs._slice_md_file_by_hashed_val(
md_file_paths=md_file_paths)
assert sliced_md_file_paths == [tmp_md_file_path_2, tmp_md_file_path_3]
assert hashed_vals == [
hashlib.sha1('0000'.encode()).hexdigest(),
hashlib.sha1('890'.encode()).hexdigest(),
]
shutil.rmtree(tmp_src_dir_path, ignore_errors=True)
shutil.rmtree(build_docs.HASHED_VALS_DIR_PATH, ignore_errors=True)
build_docs.HASHED_VALS_DIR_PATH = original_hashed_vals_dir_path
def test__save_md_hashed_val() -> None:
original_hashed_vals_dir_path: str = build_docs.HASHED_VALS_DIR_PATH
build_docs.HASHED_VALS_DIR_PATH = '../tmp_test_build_docs_5/hashed_vals/'
expected_file_path: str = os.path.join(
build_docs.HASHED_VALS_DIR_PATH,
'any/path.md')
file_util.remove_file_if_exists(file_path=expected_file_path)
build_docs._save_md_hashed_val(
md_file_path='./docs_src/source/any/path.md', hashed_val='1234')
hashed_val: str = build_docs._read_md_file_hashed_val_from_file(
hash_file_path=expected_file_path)
assert hashed_val == '1234'
build_docs.HASHED_VALS_DIR_PATH = original_hashed_vals_dir_path
file_util.remove_file_if_exists(file_path=expected_file_path)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_js_lib_path_and_skip_settings() -> None:
"""_append_js_lib_path_and_skip_settings 関数のテスト。
"""
code: str = """print(200)
save_overall_html(
dest_dir_path='quick_start_stage_creation/')"""
code = build_docs._append_js_lib_path_and_skip_settings(
code=code)
expected: str = """print(200)
save_overall_html(
dest_dir_path='quick_start_stage_creation/',
js_lib_dir_path='../', skip_js_lib_exporting=True)"""
assert code == expected
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__make_script_data_list() -> None:
os.makedirs('./tmp/', exist_ok=True)
tmp_file_path_1: str = './tmp/tmp_test_build_docs_1.md'
tmp_file_path_2: str = './tmp/tmp_test_build_docs_2.md'
with open(tmp_file_path_1, 'w') as f:
f.write(
'# heading'
'\n\n```py'
'\n# runnable'
'\nprint(100)'
'\n```'
'\n\n```py'
'\n# runnable'
'\nprint(200)'
'\n```'
'\n'
)
with open(tmp_file_path_2, 'w') as f:
f.write(
'# heading'
'\n\n```py'
'\n# runnable'
'\nprint(300)'
'\n```'
'\n'
)
script_data_list: List[_ScriptData] = build_docs._make_script_data_list(
md_file_paths=[
tmp_file_path_1,
tmp_file_path_2,
],
hashed_vals=['abc', 'def'],
limit_count=None)
assert len(script_data_list) == 3
assert script_data_list[0] == {
'md_file_path': tmp_file_path_1,
'hashed_val': 'abc',
'runnable_script': 'print(100)',
}
assert script_data_list[1] == {
'md_file_path': tmp_file_path_1,
'hashed_val': 'abc',
'runnable_script': 'print(200)',
}
assert script_data_list[2] == {
'md_file_path': tmp_file_path_2,
'hashed_val': 'def',
'runnable_script': 'print(300)',
}
script_data_list = build_docs._make_script_data_list(
md_file_paths=[
tmp_file_path_1,
tmp_file_path_2,
],
hashed_vals=['abc', 'def'],
limit_count=2)
assert len(script_data_list) == 2
file_util.remove_file_if_exists(file_path=tmp_file_path_1)
file_util.remove_file_if_exists(file_path=tmp_file_path_2)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__run_code_block_script() -> None:
return_data: _RunReturnData = build_docs._run_code_block_script(
script_data={
'md_file_path': 'test.md',
'hashed_val': 'abc',
'runnable_script': 'print(200)',
})
assert return_data == {
'md_file_path': 'test.md',
'runnable_script': 'print(200)',
'stdout': '200\n',
}
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__validate_script_return_data() -> None:
build_docs._validate_script_return_data(
return_data_list=[{
'md_file_path': 'test.md',
'runnable_script': 'print(100)',
'stdout': '100\n',
}])
assert_raises(
expected_error_class=Exception,
func_or_method=build_docs._validate_script_return_data,
kwargs={'return_data_list': [{
'md_file_path': 'test.md',
'runnable_script': 'print(100)',
'stdout': 'Traceback: most recent call ...'
}]},
match='Error occurred while executing the document codeblock.')
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__save_hashed_val() -> None:
hashed_val: str = build_docs._read_md_file_hashed_val_from_file(
hash_file_path='docs_src/hashed_vals/stage.md')
os.remove('docs_src/hashed_vals/stage.md')
build_docs._save_hashed_val(
script_data_list=[{
'md_file_path': 'docs_src/source/stage.md',
'hashed_val': hashed_val,
'runnable_script': 'print(100)',
}])
saved_hashed_val: str = build_docs._read_md_file_hashed_val_from_file(
hash_file_path='docs_src/hashed_vals/stage.md')
assert saved_hashed_val == hashed_val
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__check_code_block_with_flake8() -> None:
script_data: _ScriptData = {
'md_file_path': './tmp.py',
'hashed_val': 'abc',
'runnable_script':
'a=10',
}
assert_raises(
expected_error_class=_CodeBlockFlake8Error,
func_or_method=build_docs._check_code_block_with_flake8,
kwargs={'script_data': script_data},
match=r'There is a flake8 error in the following document '
r'code block:')
script_data = {
'md_file_path': './tmp.py',
'hashed_val': 'abc',
'runnable_script': 'a = 20',
}
build_docs._check_code_block_with_flake8(script_data=script_data)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__check_code_block_with_numdoclint() -> None:
script_data: _ScriptData = {
'md_file_path': './tmp.py',
'hashed_val': 'abc',
'runnable_script':
'def func_1'
'(a):\n print(100)',
}
assert_raises(
expected_error_class=_CodeBlockNumdoclintError,
func_or_method=build_docs._check_code_block_with_numdoclint,
kwargs={'script_data': script_data},
match=r'There is a numdoclint error in the following '
r'document code block')
script_data = {
'md_file_path': './tmp.py',
'hashed_val': 'abc',
'runnable_script':
'def func_2'
'(a):'
'\n """'
'\n test function.'
'\n\n Parameters'
'\n ----------'
'\n a : int'
'\n Test argument.'
'\n """'
'\n print(100)',
}
build_docs._check_code_block_with_numdoclint(script_data=script_data)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__check_code_block_with_mypy() -> None:
script_data: _ScriptData = {
'md_file_path': './tmp.py',
'hashed_val': 'abc',
'runnable_script':
'def func_1'
'(a):\n print(100)',
}
assert_raises(
expected_error_class=_CodeBlockMypyError,
func_or_method=build_docs._check_code_block_with_mypy,
kwargs={'script_data': script_data},
match='There is a mypy error in the following document code block')
script_data = {
'md_file_path': './tmp.py',
'hashed_val': 'abc',
'runnable_script': 'print(100)',
}
build_docs._check_code_block_with_mypy(script_data=script_data)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__get_code_block_output_dir_paths() -> None:
tmp_test_dir_path: str = 'tmp/test_build_docs_1/'
shutil.rmtree(tmp_test_dir_path, ignore_errors=True)
dir_paths: List[str] = build_docs._get_code_block_output_dir_paths(
output_dir_path=tmp_test_dir_path)
assert dir_paths == []
tmp_subdir_path_1: str = os.path.join(
tmp_test_dir_path, 'test_1/')
os.makedirs(tmp_subdir_path_1, exist_ok=True)
tmp_subdir_path_2: str = os.path.join(
tmp_test_dir_path, 'test_2/')
os.makedirs(tmp_subdir_path_2, exist_ok=True)
tmp_index_path: str = os.path.join(
tmp_subdir_path_2, 'index.html',
)
with open(tmp_index_path, 'w') as f:
f.write('')
tmp_static_file_path: str = os.path.join(
tmp_test_dir_path, 'tmp_test.js')
with open(tmp_static_file_path, 'w') as f:
f.write('')
dir_paths = build_docs._get_code_block_output_dir_paths(
output_dir_path=tmp_test_dir_path)
assert dir_paths == ['tmp/test_build_docs_1/test_2/']
shutil.rmtree(tmp_test_dir_path, ignore_errors=True)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__move_code_block_outputs() -> None:
tmp_test_dir_path: str = 'tmp/test_build_docs_2/'
shutil.rmtree(tmp_test_dir_path, ignore_errors=True)
tmp_subdir_path: str = os.path.join(
tmp_test_dir_path, 'tmp_test_build_docs/')
os.makedirs(tmp_subdir_path, exist_ok=True)
tmp_index_path: str = os.path.join(tmp_subdir_path, 'index.html')
with open(tmp_index_path, 'w') as f:
f.write('')
expected_dir_path: str = './docs/static/tmp_test_build_docs/'
expected_file_path: str = os.path.join(
expected_dir_path, 'index.html',
)
shutil.rmtree(expected_dir_path, ignore_errors=True)
build_docs._move_code_block_outputs(
output_dir_path=tmp_test_dir_path)
assert os.path.isfile(expected_file_path)
assert not os.path.isdir(tmp_subdir_path)
shutil.rmtree(tmp_test_dir_path, ignore_errors=True)
shutil.rmtree(expected_dir_path, ignore_errors=True)
| 35.160267
| 78
| 0.651346
| 451
| 0.0214
| 0
| 0
| 19,330
| 0.9172
| 0
| 0
| 4,387
| 0.208161
|
5a8286acf837a481397e002bada53024ba40d6ed
| 15,551
|
py
|
Python
|
Generator/views.py
|
SmilingTornado/sfia_generator
|
f675a3fe55e3b56267cafade44ebd069bac185d7
|
[
"Apache-2.0"
] | 2
|
2020-08-19T08:43:51.000Z
|
2021-11-18T09:05:55.000Z
|
Generator/views.py
|
SmilingTornado/sfia_generator
|
f675a3fe55e3b56267cafade44ebd069bac185d7
|
[
"Apache-2.0"
] | 5
|
2020-06-06T14:15:30.000Z
|
2021-09-22T18:47:36.000Z
|
Generator/views.py
|
SmilingTornado/sfia_generator
|
f675a3fe55e3b56267cafade44ebd069bac185d7
|
[
"Apache-2.0"
] | null | null | null |
# Create your views here.
import docx
import gensim
import numpy as np
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
from docx.shared import RGBColor, Inches, Pt
from nltk.tokenize import sent_tokenize, word_tokenize
from .models import Skill, Level
# View for home page
def index(request):
# Request to get the form
if request.method == "GET":
context = {'searched': False}
# Return the standard blank form
return render(request, 'form.html', context)
# POSTing to the form means the request body will have some data
elif request.method == "POST":
# Checks whether the POST request wants to generate a form by checking request body
if 'type' in request.POST and 'sk1' in request.POST and 'sk2' in request.POST \
and 'sk1_min' in request.POST and 'sk2_min' in request.POST \
and 'sk1_max' in request.POST and 'sk2_max' in request.POST:
# Checking validity of request
if is_valid(request):
# Generate and return the form
return generate(request)
else:
# Return the page for an invalid request
return render(request, 'invalid.html', {})
# If data was posted from the search function form.
elif 'input' in request.POST:
return search_similarities(request)
# If data was posted from the skill selector
elif 'code_1' and 'code_2' in request.POST:
context = {'searched': False, 'sk1_code': request.POST['code_1'], 'sk2_code': request.POST['code_2']}
return render(request, 'form.html', context)
# Any other type of POST request would be invalid
else:
return render(request, 'invalid.html', {})
else:
# Any other request would just be returned the blank form
context = {'searched': False}
return render(request, 'form.html', context)
# View for search page
def search_page(request):
# Returns the search page
return render(request, 'search.html', {})
# View to list skills
def list_skills(request):
set_1, set_2, set_3 = get_skill_sets() # Gets skills in 3 evenly split sets
return render(request, 'list_skills.html', {"set_1": set_1, "set_2": set_2,
"set_3": set_3}) # Renders and returns the page of the list of skills
# View to list skills for second skill selection
def select_second(request, code_1): # Same as list_skills but addional context is added to be rendered
set_1, set_2, set_3 = get_skill_sets() # Gets skills in 3 evenly split sets
return render(request, 'list_skills.html', {"code_1": code_1, "set_1": set_1, "set_2": set_2,
"set_3": set_3}) # Renders and returns the page of the list of skills
# View details of skill
def show_skill(request, code):
try:
skill_object = Skill.objects.get(code=code.lower()) # Get the skill from the code
levels = Level.objects.filter(skill=skill_object) # Get the levels using the skill_object as the key
context = {
'skill': skill_object,
'levels': levels
} # Prepare context for rendering onto template
return render(request, 'show_skill.html', context) # Render and return context
except: # In the case where the skill code is invalid
return render(request, 'invalid.html', {}) # Return page for invalid requests
# View details of second selected skill
def view_second(request, code_1, code_2):
try:
skill_object = Skill.objects.get(code=code_2.lower()) # Get the skill from the code
levels = Level.objects.filter(skill=skill_object) # Get the levels using the skill_object as the key
context = {
'skill': skill_object,
'levels': levels,
'code_1': code_1,
'code_2': code_2
} # Prepare context for rendering onto template
return render(request, 'show_skill.html', context) # Render and return context
except: # In the case where the skill code is invalid
return render(request, 'invalid.html', {}) # Return page for invalid requests
def get_skill_sets():
set_1 = [] # Column 1
set_2 = [] # Column 2
set_3 = [] # Column 3
skill_objects = Skill.objects.all().order_by('code') # Get all the skills and order them by the skill code
length = len(skill_objects) # Find number of skills
for num, skill in enumerate(skill_objects, start=0):
if num < length / 3: # Checks if the skill is in the first third of the list
set_1.append(skill) # Appends to first column set
elif num < length * (2 / 3): # Checks if the skill is in the second third of the list
set_2.append(skill) # Appends to the second column set
else: # All other skills
set_3.append(skill) # Appended to the last column set
return set_1, set_2, set_3
def search_similarities(request):
similarities = {} # Dictionary to store the calculated similarities
input = request.POST['input'] # Get the input from the request
# Create a list of sentences where each sentence has been broken down into a list of words
gen_docs = [[w.lower() for w in word_tokenize(text)]
for text in sent_tokenize(input)]
# Create a dictionary of unique words
dictionary = gensim.corpora.Dictionary(gen_docs)
# Generate bag of words to measure frequency of word use
corpus = [dictionary.doc2bow(gen_doc) for gen_doc in gen_docs]
# Calculate Term Frequency, Inverse Document Frequency of words
tf_idf = gensim.models.TfidfModel(corpus)
# Create similarity model
sims = gensim.similarities.Similarity(settings.BASE_DIR + '/Generator/gensim', tf_idf[corpus],
num_features=len(dictionary))
# Checking for similarities with level descriptions
for level in Level.objects.all():
skill_sim_total = 0
for sentence in sent_tokenize(level.description):
query_doc = [w.lower() for w in word_tokenize(sentence)]
query_doc_bow = dictionary.doc2bow(query_doc)
query_doc_tf_idf = tf_idf[query_doc_bow]
sum_of_sims = (np.sum(sims[query_doc_tf_idf], dtype=np.float32))
similarity = float(sum_of_sims / len(sent_tokenize(input)))
skill_sim_total += similarity
skill_sim = skill_sim_total / len(sent_tokenize(level.description))
# Check if similarities for a skill has been calculated before
if level.skill.code not in similarities:
similarities[level.skill.code] = skill_sim
# If calculated before, check if new description is more similar
elif similarities[level.skill.code] < skill_sim:
similarities[level.skill.code] = skill_sim
# Checking for similarities with skill descriptions
# Same procedure as with for levels
for skill in Skill.objects.all():
skill_sim_total = 0
for sentence in sent_tokenize(skill.description):
query_doc = [w.lower() for w in word_tokenize(sentence)]
query_doc_bow = dictionary.doc2bow(query_doc)
query_doc_tf_idf = tf_idf[query_doc_bow]
sum_of_sims = (np.sum(sims[query_doc_tf_idf], dtype=np.float32))
similarity = float(sum_of_sims / len(sent_tokenize(input)))
skill_sim_total += similarity
skill_sim = skill_sim_total / len(sent_tokenize(skill.description))
if skill.code not in similarities:
similarities[skill.code] = skill_sim
elif similarities[skill.code] < skill_sim:
similarities[skill.code] = skill_sim
# Find the most similar skill
first_match = max(similarities, key=similarities.get)
# If the maximum similarity score was 0, return the form
if (similarities[first_match] == 0):
return render(request, 'form.html', {'searched': True})
# Removes the most similar skill
similarities.pop(first_match, None)
# Finds the current maximum similarity score
second_match = max(similarities, key=similarities.get)
# If the new maximum similarity score is 0, return only the first match
if (similarities[second_match] == 0):
return render(request, 'form.html', {'sk1_code': first_match.upper, 'searched': True})
# Return rendered form with found matches
context = {'sk1_code': first_match.upper, 'sk2_code': second_match.upper, 'searched': True}
return render(request, 'form.html', context)
# Returns whether a skill is valid
def is_valid(request):
# Grabbing data from request
sk1 = request.POST['sk1']
sk1_start = int(request.POST['sk1_min'])
sk1_stop = int(request.POST['sk1_max'])
sk2 = request.POST['sk2']
sk2_start = int(request.POST['sk2_min'])
sk2_stop = int(request.POST['sk2_max'])
type = request.POST['type']
# Check if request is valid
if 'type' in request.POST and 'sk1' in request.POST and 'sk2' in request.POST \
and 'sk1_min' in request.POST and 'sk2_min' in request.POST \
and 'sk1_max' in request.POST and 'sk2_max' in request.POST:
if sk1_start >= 1 and sk2_start >= 1 and sk1_stop <= 7 and sk2_stop <= 7 and (
type == 'student' or type == 'employer'):
try: # Try to retrieve the skill object
skill_object = Skill.objects.get(code=sk1.lower())
except:
return False
if sk2 != '': # If the second skill isn't blank
try: # Try to retrieve the second skill object
skill_object = Skill.objects.get(code=sk2.lower())
except:
return False
return True
else:
return False
else:
return False
def generate(request):
# Setting variables taken from request body
sk1 = request.POST['sk1']
sk1_start = int(request.POST['sk1_min'])
sk1_stop = int(request.POST['sk1_max'])
sk2 = request.POST['sk2']
sk2_start = int(request.POST['sk2_min'])
sk2_stop = int(request.POST['sk2_max'])
type = request.POST['type']
dedicate = False
# Check if skills are to be rendered on dedicated pages
if 'dedicate' in request.POST:
dedicate = True
# Generating the document
if type == 'employer':
doc = docx.Document(settings.BASE_DIR + '/Generator/DocxTemplates/employer_template.docx')
else:
doc = docx.Document(settings.BASE_DIR + '/Generator/DocxTemplates/student_template.docx')
if dedicate:
# Addidng a page break
add_page_break(doc)
if sk2 != '':
sk1_concat = ''.join([level['description'] for level in get_levels(sk1, [sk1_start, sk1_stop])])
sk2_concat = ''.join([level['description'] for level in get_levels(sk2, [sk2_start, sk2_stop])])
# Check if skill 1 is longer than skill 2
if len(sk1_concat) <= len(sk2_concat):
# Adding skill information
add_skill_info(sk1, doc)
# Adding the first table
add_skill_table(sk1, [sk1_start, sk1_stop], doc)
# Addidng a page break
add_page_break(doc)
# Adding skill information
add_skill_info(sk2, doc)
# Adding the second table
add_skill_table(sk2, [sk2_start, sk2_stop], doc)
filename = '%s-%s.docx' % (sk1.upper(), sk2.upper())
else:
# Adding skill information
add_skill_info(sk2, doc)
# Adding the first table
add_skill_table(sk2, [sk2_start, sk2_stop], doc)
# Addidng a page break
add_page_break(doc)
# Adding skill information
add_skill_info(sk1, doc)
# Adding the second table
add_skill_table(sk1, [sk1_start, sk1_stop], doc)
filename = '%s-%s.docx' % (sk2.upper(), sk1.upper())
else:
# Adding skill information
add_skill_info(sk1, doc)
# Adding the first table
add_skill_table(sk1, [sk1_start, sk1_stop], doc)
filename = '%s.docx' % (sk1.upper())
# Saving to output
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
response['Content-Disposition'] = 'attachment; filename=' + filename
doc.save(response)
return response
# Get skill information
def get_skill(sk_code):
skill_object = Skill.objects.get(code=sk_code.lower())
# Put skill information into dictionary
skill = {
'name': skill_object.name,
'code': skill_object.code,
'description': skill_object.description,
'levels': []
}
# Put each level's information into a dictionary and append to levels list in the skills dictionary
for level in Level.objects.filter(skill=skill_object):
skill['levels'].append({
'level': level.level,
'description': level.description,
})
# Return the dictionary
return skill
# Get levels in a certain range
def get_levels(sk_code, sk_range):
sk = get_skill(sk_code)
levels = []
# Put each level's information into a dictionary and append to levels list in the skills dictionary
for i in range(sk_range[0], sk_range[1] + 1):
for level in sk['levels']:
if level['level'] == i:
description = level['description']
levels.append({'level': i, 'description': description})
break
return levels
def add_skill_table(sk_code, sk_range, doc):
# Get the information for the skill
levels = get_levels(sk_code, sk_range)
# Table Generation
t = doc.add_table(2, len(levels)) # Create Table
t.autofit = True
t.style = 'Table Grid'
t.alignment = docx.enum.table.WD_TABLE_ALIGNMENT.CENTER
# Finding total length of descriptions for width calculations later
total_description_length = 0
for level in levels:
total_description_length += len(level["description"])
# Populating cells
cell_count = 0
for level in levels:
top_cell = t.cell(0, cell_count).paragraphs[0].add_run('Level ' + str(level['level']))
top_cell.bold = True
top_cell.font.name = 'Calibri'
bottom_cell = t.cell(1, cell_count).paragraphs[0].add_run(level['description'])
bottom_cell.font.name = 'Calibri'
bottom_cell.font.size = Pt(10)
cell_width = 1.25 / len(levels) + 10.75 * len(level['description']) / total_description_length
t.cell(0, cell_count).width = Inches(cell_width)
t.cell(1, cell_count).width = Inches(cell_width)
cell_count += 1
# Generate description for the skill
def add_skill_info(sk_code, doc):
sk = get_skill(sk_code)
p = doc.add_paragraph('')
name = p.add_run(sk['name'] + ' ')
name.bold = True
name.font.size = Pt(14)
name.font.name = 'Calibri'
code = p.add_run(sk['code'].upper())
code.bold = True
code.font.size = Pt(11)
code.font.color.rgb = RGBColor(0x89, 0x89, 0x89)
code.font.name = 'Calibri'
description = p.add_run(' – ' + sk['description'])
description.font.size = Pt(10)
description.font.name = 'Calibri'
def add_page_break(doc):
paragraph = doc.add_paragraph('')
run = paragraph.add_run('')
run.add_break(docx.enum.text.WD_BREAK.PAGE)
| 42.02973
| 118
| 0.641952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,130
| 0.32984
|
5a83d552df37fe7fdd13e1e5236c56ad3f9e80ab
| 3,076
|
py
|
Python
|
flask_pancake/extension.py
|
arthurio/flask-pancake
|
5fc752d6e917bbe8e06be7d7a802cdeb10cca591
|
[
"MIT"
] | 4
|
2020-01-21T04:33:01.000Z
|
2021-04-27T22:56:23.000Z
|
flask_pancake/extension.py
|
arthurio/flask-pancake
|
5fc752d6e917bbe8e06be7d7a802cdeb10cca591
|
[
"MIT"
] | 16
|
2020-01-25T19:27:11.000Z
|
2020-10-13T20:09:18.000Z
|
flask_pancake/extension.py
|
arthurio/flask-pancake
|
5fc752d6e917bbe8e06be7d7a802cdeb10cca591
|
[
"MIT"
] | 2
|
2020-06-18T08:38:28.000Z
|
2021-04-28T02:53:39.000Z
|
from __future__ import annotations
import abc
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
from cached_property import cached_property
from .constants import EXTENSION_NAME
from .registry import registry
from .utils import GroupFuncType, import_from_string, load_cookies, store_cookies
if TYPE_CHECKING:
from flask import Flask
from .flags import Flag, Sample, Switch
__all__ = ["FlaskPancake"]
class FlaskPancake:
def __init__(
self,
app: Flask = None,
*,
name: str = EXTENSION_NAME,
redis_extension_name: str = "redis",
group_funcs: Optional[
Dict[str, Union[str, Type[GroupFunc], GroupFunc, GroupFuncType]]
] = None,
cookie_name=None,
cookie_options: Dict[str, Any] = None,
) -> None:
self.redis_extension_name = redis_extension_name
self._group_funcs = group_funcs
self.name = name
self.cookie_name = cookie_name or self.name
self.cookie_options = cookie_options or {"httponly": True, "samesite": "Lax"}
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app: Flask) -> None:
app.extensions[self.name] = self
app.before_request(load_cookies(self))
app.after_request(store_cookies(self))
@cached_property
def group_funcs(self) -> Optional[Dict[str, GroupFunc]]:
if self._group_funcs is None:
return None
ret = {}
for key, value in self._group_funcs.items():
if isinstance(value, str):
value = import_from_string(value)
if isinstance(value, type) and issubclass(value, GroupFunc):
value = value()
if isinstance(value, GroupFunc):
ret[key] = value
elif callable(value):
ret[key] = FunctionGroupFunc(value)
else:
raise ValueError(f"Invalid group function {value!r} for {key!r}.")
return ret
@property
def flags(self) -> Dict[str, Flag]:
return registry.flags(self.name)
@property
def switches(self) -> Dict[str, Switch]:
return registry.switches(self.name)
@property
def samples(self) -> Dict[str, Sample]:
return registry.samples(self.name)
class GroupFunc(abc.ABC):
@abc.abstractmethod
def __call__(self) -> Optional[str]:
... # pragma: no cover
@abc.abstractmethod
def get_candidate_ids(self) -> List[str]:
... # pragma: no cover
class FunctionGroupFunc(GroupFunc):
def __init__(self, func: Callable[[], Optional[str]]):
self._func = func
def __call__(self) -> Optional[str]:
return self._func()
def get_candidate_ids(self) -> List[str]:
sub_func = getattr(self._func, "get_candidate_ids", None)
if sub_func:
return sub_func()
return []
def __eq__(self, other) -> bool:
return isinstance(other, FunctionGroupFunc) and self._func == other._func
| 29.295238
| 85
| 0.624187
| 2,620
| 0.851756
| 0
| 0
| 1,161
| 0.377438
| 0
| 0
| 149
| 0.04844
|
5a857abf3570c3df69b81be2e28f99b2e77798fb
| 1,563
|
py
|
Python
|
tests/pygithub/test_targettag.py
|
ktlim/sqre-codekit
|
98122404cd9065d4d1d570867fe518042669126c
|
[
"MIT"
] | null | null | null |
tests/pygithub/test_targettag.py
|
ktlim/sqre-codekit
|
98122404cd9065d4d1d570867fe518042669126c
|
[
"MIT"
] | 23
|
2015-12-04T16:54:15.000Z
|
2019-03-15T01:14:26.000Z
|
tests/pygithub/test_targettag.py
|
ktlim/sqre-codekit
|
98122404cd9065d4d1d570867fe518042669126c
|
[
"MIT"
] | 3
|
2016-08-08T16:44:04.000Z
|
2020-04-29T00:58:00.000Z
|
#!/usr/bin/env python3
import codekit.pygithub
import github
import itertools
import pytest
@pytest.fixture
def git_author():
return github.InputGitAuthor(name='foo', email='foo@exmaple.org')
def test_init(git_author):
"""Test TargetTag object instantiation"""
t_tag = codekit.pygithub.TargetTag(
name='foo',
sha='bar',
message='baz',
tagger=git_author,
)
assert isinstance(t_tag, codekit.pygithub.TargetTag), type(t_tag)
def test_attributes(git_author):
"""Test TargetTag attributes"""
t_tag = codekit.pygithub.TargetTag(
name='foo',
sha='bar',
message='baz',
tagger=git_author,
)
assert t_tag.name == 'foo'
assert t_tag.sha == 'bar'
assert t_tag.message == 'baz'
assert isinstance(t_tag.tagger, github.InputGitAuthor), type(t_tag.tagger)
def test_init_required_args(git_author):
"""TargetTag requires named args"""
all_args = dict(
name='foo',
sha='bar',
message='baz',
tagger=git_author,
)
args = {}
# try all named args but one
for k, v in itertools.islice(all_args.items(), len(all_args) - 1):
args[k] = v
with pytest.raises(KeyError):
codekit.pygithub.TargetTag(**args)
def test_init_tagger_type():
"""TargetTag tagger named arg must be correct type"""
with pytest.raises(AssertionError):
codekit.pygithub.TargetTag(
name='foo',
sha='bar',
message='baz',
tagger='bonk',
)
| 22.328571
| 78
| 0.614203
| 0
| 0
| 0
| 0
| 103
| 0.065899
| 0
| 0
| 313
| 0.200256
|
5a898eeb8ca1914311a3bfe38f233e0ef651e459
| 497
|
py
|
Python
|
src/test/model/test_node.py
|
AstrorEnales/GenCoNet
|
c596d31a889f14499883fcdf74fdc67f927a806e
|
[
"MIT"
] | 2
|
2019-12-05T11:46:48.000Z
|
2022-03-09T00:11:06.000Z
|
src/test/model/test_node.py
|
AstrorEnales/GenCoNet
|
c596d31a889f14499883fcdf74fdc67f927a806e
|
[
"MIT"
] | null | null | null |
src/test/model/test_node.py
|
AstrorEnales/GenCoNet
|
c596d31a889f14499883fcdf74fdc67f927a806e
|
[
"MIT"
] | null | null | null |
import unittest
from model import node
class DummyNode(node.Node):
def __init__(self, ids: [str], names: [str]):
super().__init__(ids, names)
self.primary_id_prefix = 'TEST'
class TestMethods(unittest.TestCase):
def test_label(self):
n = DummyNode([], [])
self.assertEqual(n.label, 'DummyNode')
def test_str(self):
n = DummyNode(['TEST:1'], ['test name'])
self.assertEqual(str(n), 'DummyNode={ids: [TEST:1], names: ["test name"]}')
| 26.157895
| 83
| 0.615694
| 452
| 0.909457
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.171026
|
5a8acbff39d71356c0bdbbffc0011959d6b7ec58
| 1,109
|
py
|
Python
|
2020/Python/day06.py
|
kamoshi/Advent-of-Code
|
5b78fa467409e8b8c5a16efe31684b8ce493bcee
|
[
"MIT"
] | 1
|
2020-12-21T13:27:52.000Z
|
2020-12-21T13:27:52.000Z
|
2020/Python/day06.py
|
kamoshi/advent-of-code
|
5b78fa467409e8b8c5a16efe31684b8ce493bcee
|
[
"MIT"
] | null | null | null |
2020/Python/day06.py
|
kamoshi/advent-of-code
|
5b78fa467409e8b8c5a16efe31684b8ce493bcee
|
[
"MIT"
] | null | null | null |
import functools
def parse_input() -> list[list[str]]:
groups = [[]]
with open("input.txt") as file:
for line in file:
line_ = line.rstrip()
if len(line_) > 0:
groups[-1].append(line_)
else:
if not groups[-1] == []:
groups.append([])
return groups
def solve_p1(groups: list[list[str]]) -> int:
def count(group: list[str]) -> int:
chars = set()
for line in group:
for char in line:
chars.add(char)
return len(chars)
return sum(map(count, groups))
def solve_p2(groups: list[list[str]]) -> int:
def count_intersection(group: list[str]) -> int:
sets = []
for line in group:
new_set = set()
for char in line:
new_set.add(char)
sets.append(new_set)
result_set = functools.reduce(set.intersection, sets)
return len(result_set)
return sum(map(count_intersection, groups))
GROUPS = parse_input()
print(solve_p1(GROUPS))
print(solve_p2(GROUPS))
| 22.632653
| 61
| 0.537421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.009919
|