max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
goldberg/benchmark.py
|
daberg/goldberg
| 0
|
12782951
|
<filename>goldberg/benchmark.py
import goldberg.algo as algo
import graph_tool.flow
import time
import memory_profiler
def benchmark_all(graph, source, target, capacity):
graph.edge_properties["capacity"] = capacity
original_g = graph
results = []
print("Running benchmarks for all implementations")
_print_separator()
print("Input stats")
print()
print("Number of vertices: {}".format(graph.num_vertices()))
print("Number of edges: {}".format(graph.num_edges()))
_print_separator()
# BGL implementation
name="BGL implementation"
graph = original_g.copy()
capacity = graph.edge_properties["capacity"]
residual_capacity, time, mem = profilerun(
graph_tool.flow.push_relabel_max_flow,
graph,
graph.vertex(original_g.vertex_index[source]),
graph.vertex(original_g.vertex_index[target]),
capacity
)
residual_capacity.a = capacity.get_array() - residual_capacity.get_array()
maxflow = sum(residual_capacity[e] for e in target.in_edges())
result = _compose_result(maxflow, time, mem)
results.append((name, result))
print("{} run stats".format(name))
print()
_print_result(result)
_print_separator()
# Stack push-relabel implementation
name="Stack push-relabel"
graph = original_g.copy()
capacity = graph.edge_properties["capacity"]
flow, time, mem = profilerun(
algo.stack_push_relabel,
graph,
graph.vertex(original_g.vertex_index[source]),
graph.vertex(original_g.vertex_index[target]),
capacity
)
maxflow = sum(flow[e] for e in target.in_edges())
result = _compose_result(maxflow, time, mem)
results.append((name, result))
print("{} run stats".format(name))
print()
_print_result(result)
_print_separator()
return results
def profilerun(flownet_function, graph, source, target, capacity):
start_time = time.time()
ret = flownet_function(graph, source, target, capacity)
end_time = time.time()
time_diff = end_time - start_time
interval = time_diff / 1000.0
start_mem = memory_profiler.memory_usage(lambda: None, max_usage=True)[0]
end_mem = memory_profiler.memory_usage(
(flownet_function, [graph, source, target, capacity]),
interval=interval,
timeout=time_diff,
max_usage=True
)[0]
mem_diff = end_mem - start_mem
return (ret, time_diff * 1000.0, mem_diff * 1024.0)
def _compose_result(maxflow, time, memory):
result = {
"maxflow" : maxflow,
"time" : time,
"memory" : memory
}
return result
def _print_result(result):
print("Computed maximum flow: {}".format(result["maxflow"]))
print("Elapsed time: {} ms".format(result["time"]))
print("Allocated memory: {} KiB".format(result["memory"]))
def _print_separator():
print(separator)
separator = "-" * 79
| 2.5625
| 3
|
stepik/3559/66578/step_2/script.py
|
tshemake/Software-Development
| 0
|
12782952
|
print(12345678987654321 + 98765432123456789)
| 1.25
| 1
|
askme/askme_api/views.py
|
seattlechem/askme
| 0
|
12782953
|
"""Rest Api views."""
from rest_framework.views import APIView
from rest_framework.response import Response
from .rectotext import rec_to_text
from .search import find
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from gtts import gTTS
import os
from askme.settings import BASE_DIR
@method_decorator(csrf_exempt, name='dispatch')
class AskViewApi(APIView):
"""Using apiview."""
def post(self, request):
"""Upload audio file."""
try:
f = request.FILES['file']
uploadedFile = open(os.path.join(BASE_DIR, "askme_api/assets/file.wav", "wb"))
uploadedFile.write(f.read())
uploadedFile.close()
question = rec_to_text()
answer = find(question)
except KeyError:
answer = "Sorry we have some connection problems.\
I didn't catch your request"
return JsonResponse({'answer': answer})
@method_decorator(csrf_exempt, name='dispatch')
class AudioViewApi(APIView):
"""Using apiview."""
def post(self, request):
"""Upload audio file."""
try:
uploadedFile = open(os.path.join(BASE_DIR, 'askme/assets/file.wav'), 'wb')
f = request.FILES['data']
uploadedFile.write(f.read())
uploadedFile.close()
question = rec_to_text()
answer = find(question)
except KeyError:
answer = 'I am sorry. We have some connection issues.\
I couldn\'t get get Your file'
tts = gTTS(text=answer, lang='en')
fname = os.path.join(BASE_DIR, "askme/assets/good.mp3")
tts.save(fname)
f = open(fname, "rb")
response = HttpResponse()
response.write(f.read())
response['Content-Type'] = 'audio/mp3'
response['Content-Length'] = os.path.getsize(fname)
return response
| 2.359375
| 2
|
monero_glue/protocol/tsx_sign_state.py
|
ph4r05/monero-agent
| 20
|
12782954
|
<filename>monero_glue/protocol/tsx_sign_state.py
from monero_glue.compat.micropython import const
class TState(object):
"""
Transaction state
"""
START = const(0)
INIT = const(1)
INP_CNT = const(2)
INPUT = const(3)
INPUT_DONE = const(4)
INPUT_PERM = const(5)
INPUT_VINS = const(6)
INPUT_ALL_DONE = const(7)
OUTPUT = const(8)
RSIG = const(9)
OUTPUT_DONE = const(10)
FINAL_MESSAGE = const(11)
SIGNATURE = const(12)
SIGNATURE_DONE = const(13)
FINAL = const(14)
FAIL = const(250)
def __init__(self):
self.s = self.START
self.in_mem = False
def state_save(self):
return self.s, self.in_mem
def state_load(self, x):
self.s, self.in_mem = x
def init_tsx(self):
if self.s != self.START:
raise ValueError("Illegal state")
self.s = self.INIT
def inp_cnt(self, in_mem):
if self.s != self.INIT:
raise ValueError("Illegal state")
self.s = self.INP_CNT
self.in_mem = in_mem
def input(self):
if self.s != self.INP_CNT and self.s != self.INPUT:
raise ValueError("Illegal state")
self.s = self.INPUT
def input_done(self):
if self.s != self.INPUT:
raise ValueError("Illegal state")
self.s = self.INPUT_DONE
def input_permutation(self):
if self.s != self.INPUT_DONE:
raise ValueError("Illegal state")
self.s = self.INPUT_PERM
def input_vins(self):
if self.s != self.INPUT_PERM and self.s != self.INPUT_VINS:
raise ValueError("Illegal state")
self.s = self.INPUT_VINS
def is_input_vins(self):
return self.s == self.INPUT_VINS
def input_all_done(self):
if (not self.in_mem and self.s != self.INPUT_VINS) or (
self.in_mem and self.s != self.INPUT_PERM
):
raise ValueError("Illegal state")
self.s = self.INPUT_ALL_DONE
def set_output(self):
if self.s != self.INPUT_ALL_DONE and self.s != self.OUTPUT:
raise ValueError("Illegal state")
self.s = self.OUTPUT
def set_output_done(self):
if self.s != self.OUTPUT:
raise ValueError("Illegal state")
self.s = self.OUTPUT_DONE
def set_final_message_done(self):
if self.s != self.OUTPUT_DONE:
raise ValueError("Illegal state")
self.s = self.FINAL_MESSAGE
def set_signature(self):
if self.s != self.FINAL_MESSAGE and self.s != self.SIGNATURE:
raise ValueError("Illegal state")
self.s = self.SIGNATURE
def set_signature_done(self):
if self.s != self.SIGNATURE:
raise ValueError("Illegal state")
self.s = self.SIGNATURE_DONE
def set_final(self):
if self.s != self.SIGNATURE_DONE:
raise ValueError("Illegal state")
self.s = self.FINAL
def set_fail(self):
self.s = self.FAIL
def is_terminal(self):
return self.s in [self.FINAL, self.FAIL]
| 2.203125
| 2
|
oldScripts/merged_model.py
|
hanzy1110/ProbabilisticFatigue
| 0
|
12782955
|
<filename>oldScripts/merged_model.py
#%%
import pymc3 as pm
import arviz as az
import pandas as pd
import numpy as np
from pymc3.gp.util import plot_gp_dist
from scipy import stats
from typing import Dict
import theano.tensor as tt
import matplotlib.pyplot as plt
def basquin_rel(N, B,b):
return B*(N**b)
B = 10e5
b = -1e-1
Ns = np.linspace(1e4, 1e7, 200)
sigmas = np.array([basquin_rel(val, B,b) for val in Ns])
logN = np.log(Ns)
logSigma = np.log(sigmas)
variation_coeff = 1
sigmas_rand = sigmas*(1+variation_coeff*np.random.normal(0, scale = .1, size = len(sigmas)))
logSrand = np.log(sigmas_rand)
fig = plt.figure()
ax = fig.gca()
ax.plot(Ns, sigmas, label = 'mean')
ax.scatter(Ns, sigmas_rand, label = 'obs', color = 'r', marker='x')
plt.legend()
fig = plt.figure()
ax = fig.gca()
ax.plot(logN, logSigma, label = 'mean')
ax.scatter(logN, logSrand, label = 'obs', color = 'r')
plt.legend()
def mixture_density(alpha, beta, scalling, x):
logp = pm.Weibull.dist(alpha, beta).logp(x)
return scalling * tt.exp(logp)
data = pd.read_csv('cleansed_csvs/5BBOL2-137_VANO 136_OPGW_807625_19_02_2020.csv')
data.set_index(data['Frequency [Hz]'])
#%%
tot_cycles = np.array(data.iloc[-1,1:], dtype=np.float64)
amplitudes = np.array(list(data.columns)[1:], dtype=np.float64)
#%%
idxs = np.argsort(amplitudes)
tot_cycles = np.array([tot_cycles[idx] for idx in idxs])
amplitudes = np.hstack((np.zeros(1), amplitudes))
amplitudes = np.sort(amplitudes)
amplitudes /= amplitudes.mean()
# tot_cycles /= np.sum(tot_cycles)
tot_cycles /= tot_cycles.max()
# plt.plot(amplitudes[1:], tot_cycles)
plt.hist(tot_cycles, bins = amplitudes)
#%%
# amplitudes = amplitudes[1:].reshape(-1,1)
amplitudes = amplitudes[1:]
alpha_0 = np.array([1.5])
beta_0 = np.array([1.])
scalling =np.array([1])
yhat = mixture_density(alpha_0, beta_0, scalling, amplitudes).eval()
y = yhat + np.random.normal(loc=0, scale=.1, size=len(yhat))
plt.plot(amplitudes, yhat, label='Approximación')
plt.scatter(amplitudes, y, label = 'Approx + Ruido')
plt.scatter(amplitudes,tot_cycles, label = 'Datos Observados')
plt.legend()
#%%
with pm.Model() as GUEDE_disp_model:
alpha = pm.HalfNormal('Alpha', sigma= 1., shape=1)
beta = pm.HalfNormal('Beta', sigma= 1., shape=1)
# beta = 1
scalling = pm.HalfNormal('Scale Factor', sigma= 2., shape=1)
# scalling = 1
# alpha = pm.Beta('alpha', alpha=2, beta=2, shape=1)
# beta = pm.Beta('beta', alpha=2, beta=2, shape=1)
noise = pm.HalfNormal('Noise', sigma=1)
a = pm.Normal('a', mu=0, sigma = 10)
A = pm.HalfCauchy('A', beta = 8)
variation_coeff = pm.HalfCauchy('variation_coeff',beta=5)
mean = a*logN + A
noise_GUEDE = variation_coeff*mean
normed_disp = pm.Normal('obs',
mixture_density(alpha, beta, scalling, amplitudes),
noise,
observed=tot_cycles)
likelihood = pm.Normal('y', mu = mean, sigma = noise_GUEDE, observed = logSrand)
# trace:Dict[str,np.ndarray] = pm.sample_smc()
trace:Dict[str,np.ndarray] = pm.sample(draws=4000, chains = 4, tune=2000, target_accept=0.92)
print(az.summary(trace))
az.plot_trace(trace)
# %%
def weibull_samples(a, b, scale = 1, size=None):
uniform = np.random.uniform(size=size)
return b * (-np.log(uniform/scale)) ** (1 / a)
def theano_weibull_samples(a, b, scale = 1, size=None):
uniform = np.random.uniform(size=size)
return b * (-tt.log(uniform/scale)) ** (1 / a)
def Miner_sRule(B,b, n_samples, alpha, beta, scalling):
samples = theano_weibull_samples(a=alpha, b=beta, size=n_samples)
total_cycles = mixture_density(alpha, beta, scalling, x=samples) * n_samples
stresses = 3e9* samples
baskin = (stresses/B) ** (1/b)
damage = tt.dot(total_cycles, baskin)
return damage
label = 'damage5'
with GUEDE_disp_model:
B = tt.exp(A)
damage = pm.Deterministic(label,
Miner_sRule(B, a, 200000, alpha, beta, scalling))
samples:Dict[str,np.ndarray] = pm.sample_posterior_predictive(trace, samples = 10000, var_names=[label])
# %%
plt.hist(samples[label],bins = 40,density=True)
# %%
def indicator(x:np.ndarray):
slice_ = x[x<0]
return np.ones_like(slice_).sum()
perf = 1-samples[label]
p_failure = indicator(perf)/len(perf)
print('p_failure--->', p_failure)
# %%
| 1.929688
| 2
|
Modulo_1/semana2/Estructura-de-Datos/list/listas-remove.py
|
rubens233/cocid_python
| 0
|
12782956
|
<reponame>rubens233/cocid_python<filename>Modulo_1/semana2/Estructura-de-Datos/list/listas-remove.py
a=["a", "b", "c"]
a.remove("a")
print(a)
| 3.171875
| 3
|
app/workers/search/schemas/indices.py
|
d3vzer0/reternal-backend
| 6
|
12782957
|
from pydantic import BaseModel, validator, Field
from typing import List, Dict
from datetime import datetime
class IndicesIn(BaseModel):
index: str
source: str
sourcetype: str
class IndiceList(BaseModel):
indices: List[IndicesIn]
integration: str
execution_date: datetime
@validator('execution_date', pre=True, always=True)
def _get_execution_date(cls, v):
return datetime.strptime(v, '%Y-%m-%dT%H:%M:%S.%f')
| 2.53125
| 3
|
trexplot_oldcode.py
|
hammytheham/trexplot
| 0
|
12782958
|
<reponame>hammytheham/trexplot
# coding: utf-8
# Plotting script for TREACTMECH files written by <NAME> - hr0392 at bristol.ac.uk
#
# Run the script within the directory containing the flowdata, flowvector, stress strain, displacement files. Output by default is within same directory.
#
# Displacement gives the corner nodes, everything else gives the centre of the cells.
#
#
#
import pandas as pd
import os
import numpy as np
import matplotlib.dates as mdates
import datetime
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import sys
from trexoptions import * #import the option file
cwd = os.getcwd()
def flowdata_import():
"""
Imports the flowdata file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'flowfaces' that contains the flowdata for each of the default and user specificed faces.
"""
flowdata=pd.read_csv(cwd+'/flowdata.tec',sep=r"\s*",skiprows=[0],engine='python')
flowdata_modified= flowdata[flowdata.columns[:-1]]
flowdata_modified.columns = flowdata.columns[1:]
flowdata=flowdata_modified.rename(index=str,columns={'"X(m)"':"X", '"Y(m)"':"Y", '"Z(m)"':"Z", '"P(Pa)"':"Pressure(Pa)", '"T(C)"':"Temperature(C)",
'"SatGas"':"SatGas",'"SatLiq"':"SatLiq",'"X1"':"X1", '"X2"':"X2", '"Pcap(Pa)"':"Pcap", '"DGas_kg/m3"':"DGas_kg/m3",
'"DLiq_kg/m3"':"DLiq_kg/m3", '"Porosity"':"Porosity", '"Perm_X(m2)"':"Perm_X(m2)", '"Perm_Y(m2)"':"Perm_Y(m2)",
'"Perm_Z(m2)"':"Perm_Z(m2)", '"Krel_Gas"':"Krel_Gas", '"Krel_Liq"':"Krel_Liq", '"HGas(J/kg)"':"HGas(J/kg)",
'"HLiq(J/kg)"':"HLiq(J/kg)", '"Cp(J/kg/C)"':"Cp(J/kg/C)", '"TC(W/m/C)"':"TC(W/m/C)", '"DBlk_kg/m3"':"DBlk_kg/m3",
'"Tdif(m2/s)"':"Tdif(m2/s)"})
#Last time step - top, bottom, side walls
val=int(flowdata.loc[flowdata["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(flowdata.index[-1])
length=lastval - val #length of last time zone
zone=flowdata[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
flowfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
flowfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
flowfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
return flowfaces
def flowvector_import():
"""
Imports the flowvector file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'vecfaces' that contains the vector data for each of the default and user specificed faces.
"""
flowvector=pd.read_csv(cwd+'/flowvector.tec',sep=r"\s*",skiprows=[0],engine='python')
flowvector_modified= flowvector[flowvector.columns[:-1]]
flowvector_modified.columns = flowvector.columns[1:]
flowvector=flowvector_modified.rename(index=str,columns={'"X(m)"':"X", '"Y(m)"':"Y",'"Z(m)"':"Z",
'"FluxLiq"':"FluxLiq", '"FluxLiq_X"':"FluxLiq_X",'"FluxLiq_Y"':"FluxLiq_Y", '"FluxLiq_Z"':"FluxLiq_Z",
'"PorVelLiq"':"PorVelLiq", '"PorVelLiqX"':"PorVelLiqX",'"PorVelLiqY"':"PorVelLiqY", '"PorVelLiqZ"':"PorVelLiqZ",
'"FluxGas"':"FluxGas",'"FluxGas_X"':"FluxGas_X",'"FluxGas_Y"':"FluxGas_Y", '"FluxGas_Z"':"FluxGas_Z",
'"PorVelGas"':"PorVelGas",'"PorVelGasX"':"PorVelGasX",'"PorVelGasY"':"PorVelGasY", '"PorVelGasZ"':"PorVelGasZ",
'"HeatFlux"':"HeatFlux", '"HeatFlux_X"':"HeatFlux_X",'"HeatFlux_Y"':"HeatFlux_Y", '"HeatFlux_Z"':"HeatFlux_Z"})
val=int(flowvector.loc[flowvector["X"] == 'Zone'][-1:].index[0])
lastval=int(flowvector.index[-1])
length=lastval - val
zone=flowvector[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z)
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
vecfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
vecfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
vecfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
return vecfaces
def displace_import():
"""
Imports the displacement file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'dispfaces' that contains the vector data for each of the default and user specificed faces.
Note I added one to xsec user and half values as you get an extra datapoint for displacement output files.
"""
column_names=["X","Y","Z","Disp_x","Disp_y","Disp_z"]
displace=pd.read_csv(cwd+'/displacement.tec',sep=r"\s+",skiprows=[0,1],usecols=[0,1,2,3,4,5],
names=column_names,engine='python')
val=int(displace.loc[displace["X"] == 'Zone'][-1:].index[0])
lastval=int(displace.index[-1])
length=lastval - val
zone=displace[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z)
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)+1]],zone.Y.unique()[int(len(zone.Y.unique())/2)+1]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)+1]],zone.X.unique()[int(len(zone.X.unique())/2)+1]
dispfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True: #added one to xsec half values as you get an extra datapoint
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]+1]
dispfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]+1]
dispfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
return dispfaces
def corner_val_import():
"""
By default in trexoptions.py op_corner=False the corner values from the displacement output are used.
If you set op_corner =True and input into arrays you can override/if no displacement.tec has been output.
The reason is that flowvector, data etc all output the value of the centre of the cell. Plotting
functions require us to know the indices.
These values arent used in the facechoose function (see plotting running order) unless its actually for
displacement values (which are recorded as the corner indices).
The function returns a shaped 3D mesh 'vals' with each value in the mesh the actual co-ordinate location.
"""
if op_corner == False:
column_names=["X","Y","Z"]
displace=pd.read_csv(cwd+'/displacement.tec',sep=r"\s+",skiprows=[0,1],usecols=[0,1,2],
names=column_names,engine='python')
val=int(displace.loc[displace["X"] == 'Zone'][-1:].index[0])
lastval=int(displace.index[-1])
length=lastval - val
zone=displace[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
zone['xyz'] = list(zip(zone.X,zone.Y,zone.Z))
vals=zone.xyz.values.reshape(len(zone.X.unique()),len(zone.Y.unique()),len(zone.Z.unique()))
if op_corner == True:
corner_x=op_corner_x
corner_y=op_corner_y
corner_z=op_corner_z
a=[]
for z in corner_z:
for y in corner_y:
for x in corner_x:
a.append(tuple([x,y,z]))
df1 = pd.DataFrame(data=pd.Series(a))
vals=df1.values.reshape(len(corner_z),len(corner_y),len(corner_x))
return vals
def stress_strain_import():
"""
Imports the stress-strain file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'stressfaces' that contains the stress_strain data for each of the default and user specificed faces.
"""
column_names=["X","Y","Z","Sigma_xx","Sigma_yy","Sigma_zz","Sigma_yz","Sigma_xz","Sigma_xy",
"Strain_xx","Strain_yy","Strain_zz","Strain_yz", "Strain_xz", "Strain_xy","Vol_Strain",
"E_fail_xx", "E_fail_yy", "E_fail_zz","E_fail_yz2","E_fail_xz2","E_fail_xy2","E_fail_vol"]
stress=pd.read_csv(cwd+'/stress_strain.tec',sep=r"\s+",skiprows=[1],names=column_names,engine='python')
val=int(stress.loc[stress["X"] == 'Zone'][-1:].index[0])
lastval=int(stress.index[-1])
length=lastval - val
zone=stress[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore',
downcast='float')
top,tpval = zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z)
bot,btval = zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)
MaxY,MxYval= zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)
MinY,MnYval= zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)
MaxX,MxXval= zone.loc[zone["X"] == max(zone.X) ],max(zone.X)
MinX,MnXval= zone.loc[zone["X"] == min(zone.X) ],min(zone.X)
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
stressfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,
'Max-X':MaxX,'Min-X':MinX,'tpval' : tpval, 'btval' : btval,
'MxYval' : MxYval, 'MnYval' : MnYval, 'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
stressfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
stressfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
return stressfaces
def face_choose(axis1,axis2,param):
"""
Returns a shapped array based on the input parameter (e.g. porosity) for the face (e.g. top face)
"""
face_data=np.reshape(param.values,(len(axis1.unique()),len(axis2.unique())))
return face_data
def plot_pcolormesh(axis1,axis2,facedata,data,name,name2,surlabel,xlabel,ylabel,rotate):
"""
Plots parameter values for each cell using the pcolormesh function. Uses the corner point values output by corner_point_vals()
"""
if rotate == True:
axis2,axis1 = np.meshgrid(axis1,axis2)
else:
axis1,axis2 = np.meshgrid(axis1,axis2)
fig,ax=plt.subplots(1,1,figsize=(10,10))
ax.set_title(label=('%(name)s surface (%(surlabel)s %(name2).1f) - %(data)s - min/max/mean (%(min).4e /%(max).4e / %(mean).2e) '
%{'name':name,'surlabel':surlabel,'name2':name2,'data':data,'min':np.min(facedata),
'max':np.max(facedata),'mean':np.mean(facedata) }),pad=15)
print ("axis1",axis1.size,"axis2",axis2.size,"facedata",facedata.size)
c=ax.pcolormesh(axis1,axis2,facedata,edgecolors='k',cmap='jet', vmin=np.min(facedata), vmax=np.max(facedata))
ax.set_xlabel('%(xlabel)s'%{'xlabel':xlabel})
ax.set_ylabel('%(ylabel)s'%{'ylabel':ylabel})
cbar=fig.colorbar(c, ax=ax)
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel('%(data)s'%{'data':data}, rotation=270)
return fig
def plot_contour(axis1,axis2,facedata,data,name,name2,surlabel,xlabel,ylabel,rotate):
"""
Plots contours of the parameter values. Uses the centre of cell cordinates.
Optionally, easily could overlay this output ontop of plot_colormesh
"""
if rotate == True:
axis2,axis1 = np.meshgrid(axis1,axis2)
else:
axis1,axis2 = np.meshgrid(axis1,axis2)
fig, ax = plt.subplots()
cont = ax.contour(axis1, axis2, facedata, cmap='gist_earth', vmin=facedata.min(), vmax=facedata.max()) #see cont.levels for info on changing
ax.set_title(label=('%(name)s surface (%(surlabel)s %(name2).1f) - %(data)s - min/max/mean (%(min).4e /%(max).4e / %(mean).2e) '
%{'name':name,'surlabel':surlabel,'name2':name2,'data':data,'min':np.min(facedata),
'max':np.max(facedata),'mean':np.mean(facedata) }),pad=15)
ax.set_xlabel('%(xlabel)s'%{'xlabel':xlabel})
ax.set_ylabel('%(ylabel)s'%{'ylabel':ylabel})
ax.clabel(cont,fmt='%1.1e')
return fig
def plot_flowvectors_no_cont(axis1,axis2,axis3,axis4,facedata,data,name,name2,surlabel,xlabel,ylabel,rotate):
"""
Following based on this: https://stackoverflow.com/questions/25342072/computing-and-drawing-vector-fields
Uses both the centre (or corner for displacement) values (graident function) and the corner values (plotting) for constructing the quiver plot
"""
if rotate == True:
dy,dx=np.gradient(facedata, axis2, axis1)
axis4,axis3 = np.meshgrid(axis3,axis4)
else:
dy,dx=np.gradient(facedata, axis2, axis1)
axis3,axis4 = np.meshgrid(axis3,axis4)
fig, ax = plt.subplots()
quiv=ax.quiver(axis3, axis4, dx, dy,facedata)
ax.set_title(label=('%(name)s surface (%(surlabel)s %(name2).1f) - %(data)s - min/max/mean (%(min).4e /%(max).4e / %(mean).2e) '
%{'name':name,'surlabel':surlabel,'name2':name2,'data':data,'min':np.min(facedata),
'max':np.max(facedata),'mean':np.mean(facedata) }),pad=15)
ax.set_xlabel('%(xlabel)s'%{'xlabel':xlabel})
ax.set_ylabel('%(ylabel)s'%{'ylabel':ylabel})
cbar=fig.colorbar(quiv, ax=ax)
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel('%(data)s'%{'data':data}, rotation=270)
return fig
def plot_flowvectors_cont(axis1,axis2,axis3,axis4,facedata,data,name,name2,surlabel,xlabel,ylabel,rotate):
"""
Following based on this: https://stackoverflow.com/questions/25342072/computing-and-drawing-vector-fields
Uses both the centre (or corner for displacement) values (graident function) and the corner values (plotting) for constructing the quiver plot
Overlay of contours of the parameter data. Not of the gradient.
"""
if rotate == True:
dy,dx=np.gradient(facedata, axis2, axis1)
axis4,axis3 = np.meshgrid(axis3,axis4)
axis2,axis1 = np.meshgrid(axis1,axis2)
else:
dy,dx=np.gradient(facedata, axis2, axis1)
axis3,axis4 = np.meshgrid(axis3,axis4)
axis1,axis2 = np.meshgrid(axis1,axis2)
fig, ax = plt.subplots()
quiv=ax.quiver(axis3, axis4, dx, dy,facedata)
cont = ax.contour(axis1, axis2, facedata, cmap='gist_earth', vmin=facedata.min(), vmax=facedata.max()) #see cont.levels for info on changing
ax.set_title(label=('%(name)s surface (%(surlabel)s %(name2).1f) - %(data)s - min/max/mean (%(min).4e /%(max).4e / %(mean).2e) '
%{'name':name,'surlabel':surlabel,'name2':name2,'data':data,'min':np.min(facedata),
'max':np.max(facedata),'mean':np.mean(facedata) }),pad=15)
ax.set_xlabel('%(xlabel)s'%{'xlabel':xlabel})
ax.set_ylabel('%(ylabel)s'%{'ylabel':ylabel})
cbar=fig.colorbar(quiv, ax=ax)
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel('%(data)s'%{'data':data}, rotation=270)
ax.clabel(cont,fmt='%1.1e')
return fig
flowdata_params=[]
flowvector_params=[]
displacement_params=[]
stress_strain_params=[]
if op_Porosity == True:flowdata_params.append('Porosity')
if op_Perm_X == True:flowdata_params.append('Perm_X(m2)')
if op_Perm_Y == True:flowdata_params.append('Perm_Y(m2)')
if op_Perm_Z == True:flowdata_params.append('Perm_Z(m2)')
if op_Pressure == True:flowdata_params.append('Pressure(Pa)')
if op_Temperature == True:flowdata_params.append('Temperature(C)')
if op_SatGas == True:flowdata_params.append('SatGas')
if op_SatLiq == True:flowdata_params.append('SatLiq')
if op_X1 == True:flowdata_params.append('X1')
if op_X2 == True:flowdata_params.append('X2')
if op_Pcap == True:flowdata_params.append('Pcap')
if op_DGas == True:flowdata_params.append('DGas_kg/m3')
if op_DLiq == True:flowdata_params.append('DLiq_kg/m3')
if op_Krel_Gas == True:flowdata_params.append('Krel_Gas')
if op_Krel_Liq == True:flowdata_params.append('Krel_Liq')
if op_HGas == True:flowdata_params.append('HGas(J/kg)')
if op_HLiq == True:flowdata_params.append('HLiq(J/kg)')
if op_Cp == True:flowdata_params.append('Cp(J/kg/C)')
if op_TC == True:flowdata_params.append('TC(W/m/C)')
if op_DBlk == True:flowdata_params.append('DBlk_kg/m3')
if op_Tdif == True:flowdata_params.append('Tdif(m2/s)')
if op_FluxLiq == True:flowvector_params.append('FluxLiq')
if op_FluxLiq_X == True:flowvector_params.append('FluxLiq_X')
if op_FluxLiq_Y == True:flowvector_params.append('FluxLiq_Y')
if op_FluxLiq_Z == True:flowvector_params.append('FluxLiq_Z')
if op_PorVelLiq == True:flowvector_params.append('PorVelLiq')
if op_PorVelLiqX == True:flowvector_params.append('PorVelLiqX')
if op_PorVelLiqY == True:flowvector_params.append('PorVelLiqY')
if op_PorVelLiqZ == True:flowvector_params.append('PorVelLiqZ')
if op_FluxGas == True:flowvector_params.append('FluxGas')
if op_FluxGas_X == True:flowvector_params.append('FluxGas_X')
if op_FluxGas_Y == True:flowvector_params.append('FluxGas_Y')
if op_FluxGas_Z == True:flowvector_params.append('FluxGas_Z')
if op_PorVelGas == True:flowvector_params.append('PorVelGas')
if op_PorVelGasX == True:flowvector_params.append('PorVelGasX')
if op_PorVelGasY == True:flowvector_params.append('PorVelGasY')
if op_PorVelGasZ == True:flowvector_params.append('PorVelGasZ')
if op_HeatFlux == True:flowvector_params.append('HeatFlux')
if op_HeatFlux_X == True:flowvector_params.append('HeatFlux_X')
if op_HeatFlux_Y == True:flowvector_params.append('HeatFlux_Y')
if op_HeatFlux_Z == True:flowvector_params.append('HeatFlux_Z')
if op_Disp_x == True:displacement_params.append('Disp_x')
if op_Disp_y == True:displacement_params.append('Disp_y')
if op_Disp_z == True:displacement_params.append('Disp_z')
if op_Sigma_xx == True:stress_strain_params.append('Sigma_xx')
if op_Sigma_yy == True:stress_strain_params.append('Sigma_yy')
if op_Sigma_zz == True:stress_strain_params.append('Sigma_zz')
if op_Sigma_yz == True:stress_strain_params.append('Sigma_yz')
if op_Sigma_xz == True:stress_strain_params.append('Sigma_xz')
if op_Sigma_xy == True:stress_strain_params.append('Sigma_xy')
if op_Strain_xx == True:stress_strain_params.append('Strain_xx')
if op_Strain_yy == True:stress_strain_params.append('Strain_yy')
if op_Strain_zz == True:stress_strain_params.append('Strain_zz')
if op_Strain_yz == True:stress_strain_params.append('Strain_yz')
if op_Strain_xz == True:stress_strain_params.append('Strain_xz')
if op_Strain_xy == True:stress_strain_params.append('Strain_xy')
if op_Vol_Strain == True:stress_strain_params.append('Vol_Strain')
if op_E_fail_xx == True:stress_strain_params.append('E_fail_xx')
if op_E_fail_yy == True:stress_strain_params.append('E_fail_yy')
if op_E_fail_zz == True:stress_strain_params.append('E_fail_zz')
if op_E_fail_yz2 == True:stress_strain_params.append('E_fail_yz2')
if op_E_fail_xz2 == True:stress_strain_params.append('E_fail_xz2')
if op_E_fail_xy2 == True:stress_strain_params.append('E_fail_xy2')
if op_E_fail_vol == True:stress_strain_params.append('E_fail_vol')
def corner_point_vals():
"""
Collects the shaped 3D mesh 'vals' with each value in the mesh the actual co-ordinate location from the displacement file.
corner_val_import returns mesh where cordinates are arranged in a shapped mesh of format [z,y,x]. Here we index
e.g. Top_X - top surfaces (so z=0m and 1st value [0]). We also want to look at it along the 1st y-column
[0]. i.e a[[0],[0],:]
We flatten this and then return a list of the x values of the coordinates which are tuples in (x,y,z) format
Returns a dictionary cpvs containing the corner point values for each face's X,Y or Z index
Yeh this indexing is really horrible to think about or look at. I'm sure there are better not so custom ways to do this....
"""
a=corner_val_import()
cpvs={'Top_X':[x[0] for x in a[[0],[0],:].flatten()],
'Top_Y':[x[1] for x in a[[0],:,[0]].flatten()],
'Bot_X':[x[0] for x in a[[-1],[0],:].flatten()],
'Bot_Y':[x[1] for x in a[[-1],:,[0]].flatten()],
'Max-Y_Z':[x[2] for x in a[:,[-1],[0]].flatten()],
'Max-Y_X':[x[0] for x in a[[0],[-1],:].flatten()],
'Min-Y_Z':[x[2] for x in a[:,[0],[0]].flatten()],
'Min-Y_X':[x[0] for x in a[[0],[0],:].flatten()],
'Max-X_Z':[x[2] for x in a[:,[0],[-1]].flatten()],
'Max-X_Y':[x[1] for x in a[[0],:,[-1]].flatten()],
'Min-X_Z':[x[2] for x in a[:,[0],[0]].flatten()],
'Min-X_Y':[x[1] for x in a[[0],:,[0]].flatten()],
'xsec_y_half_Z':[x[2] for x in a[:,[0],[(a[[0],[0],:].size//2)]].flatten()],
'xsec_y_half_Y':[x[1] for x in a[[0],:,[(a[[0],[0],:].size//2)]].flatten()],
'xsec_x_half_Z':[x[2] for x in a[:,[(a[[0],:,[0]].size//2)],[0]].flatten()],
'xsec_x_half_X':[x[0] for x in a[[0],[(a[[0],:,[0]].size//2)],:].flatten()]}
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
cpvs.update({'xsec_y_user_'+str(xsec_user_yvals[i])+'_Z':[x[2] for x in a[:,[0],[xsec_user_yvals[i]]].flatten()],
'xsec_y_user_'+str(xsec_user_yvals[i])+'_Y':[x[1] for x in a[[0],:,[xsec_user_yvals[i]]].flatten()]})
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
cpvs.update({'xsec_x_user_'+str(xsec_user_xvals[i])+'_Z':[x[2] for x in a[:,[xsec_user_xvals[i]],[0]].flatten()],
'xsec_x_user_'+str(xsec_user_xvals[i])+'_X':[x[0] for x in a[[0],[xsec_user_xvals[i]],:].flatten()]})
return cpvs
def centre_vals(axis):
"""Short function that simply returns the unique values of an axis - typically the centre cordinate values"""
centre=axis.unique()
return centre
def plotting(faces,name,name2,surlabel,dim1,dim2,xlabel,ylabel,data,rotate):
"""
Master controlling plotting script, run the individual plotting functions e.g. flow_vectors_cont
Returns a dictionary containing the relevant figure
"""
a=face_choose(faces[name][dim1],faces[name][dim2],faces[name][data])
c=corner_point_vals()[str(name+'_'+dim1)]#displacement values
f=corner_point_vals()[str(name+'_'+dim2)]
h=centre_vals(faces[name][dim1])#cell centres (flowdata,vector etc) or edges (displacement)
j=centre_vals(faces[name][dim2])
if colored_cells==True:
pcolor=plot_pcolormesh(f,c,a,data,name,faces[name2],surlabel,xlabel,ylabel,rotate)
if contour_plot==True:
contour=plot_contour(j,h,a,data,name,faces[name2],surlabel,xlabel,ylabel,rotate)
if flow_vectors_no_cont==True:
vectors_no_cont=plot_flowvectors_no_cont(j,h,f,c,a,data,name,faces[name2],surlabel,xlabel,ylabel,rotate)
if flow_vectors_cont==True:
vectors_cont=plot_flowvectors_cont(j,h,f,c,a,data,name,faces[name2],surlabel,xlabel,ylabel,rotate)
figdict={pcolor:'pcolor',contour:'contour',vectors_no_cont:'vectors_no_cont',vectors_cont:'vectors_cont'}
return figdict
def pdfplotting(faces,params,file_name):
"""
Pdf plotting - opens a pdf file, loops through parameters for selected faces and calls plotting() function
Writes the resulting figure to the pdf file.
"""
pp=PdfPages(filename=file_name)
if op_Top == True:
for i in list(params):
for b in plotting(faces ,'Top' ,'tpval' ,'Z=','Y','X','X(m)','Y(m)',i,rotate=False):
pp.savefig(b)
plt.close('all')
if op_Bot == True:
for i in list(params):
for b in plotting(faces ,'Bot' ,'btval' ,'Z=','Y','X','X(m)','Y(m)',i,rotate=False):
pp.savefig(b)
plt.close('all')
if op_Max_Y== True:
for i in list(params):
for b in plotting(faces ,'Max-Y','MxYval','Y=','Z','X','X(m)','Z(m)',i,rotate=False):
pp.savefig(b)
plt.close('all')
if op_Min_Y== True:
for i in list(params):
for b in plotting(faces ,'Min-Y','MnYval','Y=','Z','X','X(m)','Z(m)',i,rotate=False):
pp.savefig(b)
plt.close('all')
if op_Max_X== True:
for i in list(params):
for b in plotting(faces ,'Max-X','MxXval','X=','Y','Z','Y(m)','Z(m)',i,rotate=True ):
pp.savefig(b)
plt.close('all')
if op_Min_X== True:
for i in list(params):
for b in plotting(faces ,'Min-X','MnXval','X=','Y','Z','Y(m)','Z(m)',i,rotate=True ):
pp.savefig(b)
plt.close('all')
if op_xsec_Y_half == True:
for i in list(params):
for a in plotting(faces ,'xsec_y_half','xsec_y_val_half','X=','Y','Z','Y(m)','Z(m)',i,rotate=True ) :
pp.savefig(a)
plt.close('all')
if op_xsec_X_half == True:
for i in list(params):
for a in plotting(faces ,'xsec_x_half','xsec_x_half_val','Y=','Z','X','X(m)','Z(m)',i,rotate=False ):
pp.savefig(a)
plt.close('all')
if op_xsec_X_user == True:
for a in list(range(len(xsec_user_xvals))):
for i in list(params):
for b in plotting(faces,'xsec_x_user_'+str(xsec_user_xvals[a]),
'xsec_x_user_val'+str(xsec_user_xvals[a]),
'Y=','Z','X','X(m)','Z(m)',i,rotate=False ):
pp.savefig(b)
plt.close('all')
if op_xsec_Y_user == True:
for a in list(range(len(xsec_user_yvals))):
for i in list(params):
for b in plotting(faces,'xsec_y_user_'+str(xsec_user_yvals[a]),
'xsec_y_user_val'+str(xsec_user_yvals[a]),
'X=','Y','Z','Y(m)','Z(m)',i,rotate=True ):
pp.savefig(b)
plt.close('all')
pp.close()
def pngplotting(faces,params):
"""
Save the figure output to a png file in a sub folder called 'trexplot_output_pngs' in the current working directory.
"""
if op_Top == True:
for i in list(params):
for b in plotting(faces ,'Top' ,'tpval' ,'Z=','Y','X','X(m)','Y(m)',i,rotate=False):
b.savefig(cwd+'/trexplot_output_pngs/'+'Top'+str(i))
plt.close('all')
if op_Bot == True:
for i in list(params):
for b in plotting(faces ,'Bot' ,'btval' ,'Z=','Y','X','X(m)','Y(m)',i,rotate=False):
b.savefig(cwd+'/trexplot_output_pngs/'+'Bot'+str(i))
plt.close('all')
if op_Max_Y== True:
for i in list(params):
for b in plotting(faces ,'Max-Y','MxYval','Y=','Z','X','X(m)','Z(m)',i,rotate=False):
b.savefig(cwd+'/trexplot_output_pngs/'+'Max-Y'+str(i))
plt.close('all')
if op_Min_Y== True:
for i in list(params):
for b in plotting(faces ,'Min-Y','MnYval','Y=','Z','X','X(m)','Z(m)',i,rotate=False):
b.savefig(cwd+'/trexplot_output_pngs/'+'Min-Y'+str(i))
plt.close('all')
if op_Max_X== True:
for i in list(params):
for b in plotting(faces ,'Max-X','MxXval','X=','Y','Z','Y(m)','Z(m)',i,rotate=True ):
b.savefig(cwd+'/trexplot_output_pngs/'+'Max-X'+str(i))
plt.close('all')
if op_Min_X== True:
for i in list(params):
for b in plotting(faces ,'Min-X','MnXval','X=','Y','Z','Y(m)','Z(m)',i,rotate=True ):
b.savefig(cwd+'/trexplot_output_pngs/'+'Min-X'+str(i))
plt.close('all')
if op_xsec_Y_half == True:
for i in list(params):
for b in plotting(faces ,'xsec_y_half','xsec_y_val_half','X=','Y','Z','Y(m)','Z(m)',i,rotate=True ).savefig(cwd+'/trexplot_output_pngs/'+ 'xsec_y_half'+str(i)):
b.savefig(cwd+'/trexplot_output_pngs/'+'xsec_y_half'+str(i))
plt.close('all')
if op_xsec_X_half == True:
for i in list(params):
for b in plotting(faces ,'xsec_x_half','xsec_x_half_val','Y=','Z','X','X(m)','Z(m)',i,rotate=False ).savefig(cwd+'/trexplot_output_pngs/'+ 'xsec_x_half'+str(i)):
b.savefig(cwd+'/trexplot_output_pngs/'+'xsec_x_half'+str(i))
plt.close('all')
if op_xsec_X_user == True:
for a in list(range(len(xsec_user_xvals))):
for i in list(params):
for b in plotting(faces,'xsec_x_user_'+str(xsec_user_xvals[a]),
'xsec_x_user_val'+str(xsec_user_xvals[a]),
'Y=','Z','X','X(m)','Z(m)',i,rotate=False ):
b.savefig(cwd+'/trexplot_output_pngs/'+ 'xsec_x_user_val'+str(xsec_user_xvals[a])+str(i))
plt.close('all')
if op_xsec_Y_user == True:
for a in list(range(len(xsec_user_yvals))):
for i in list(params):
for b in plotting(faces,'xsec_y_user_'+str(xsec_user_yvals[a]),
'xsec_y_user_val'+str(xsec_user_yvals[a]),
'X=','Y','Z','Y(m)','Z(m)',i,rotate=True ):
b.savefig(cwd+'/trexplot_output_pngs/'+ 'xsec_y_user_val'+str(xsec_user_yvals[a])+str(i))
plt.close('all')
def fig_return():
"""
Return a dictionary containing figures from which they can be modified in other programs
"""
fig_dictionary={}
if op_Top == True:
for i in list(params):
for b in plotting(faces ,'Top' ,'tpval' ,'Z=','Y','X','X(m)','Y(m)',i,rotate=False):
fig_dictionary.update({'fig_Top'+str(i)+b.key():b})
if op_Bot == True:
for i in list(params):
for b in plotting(faces ,'Bot' ,'btval' ,'Z=','Y','X','X(m)','Y(m)',i,rotate=False):
fig_dictionary.update({'fig_Bot'+str(i)+b.key():b})
if op_Max_Y== True:
for i in list(params):
for b in plotting(faces ,'Max-Y' ,'MxYval' ,'Y=','Z','X','X(m)','Z(m)',i,rotate=False):
fig_dictionary.update({'fig_Max-Y'+str(i)})
if op_Min_Y== True:
for i in list(params):
for b in plotting(faces ,'Min-Y' ,'MnYval' ,'Y=','Z','X','X(m)','Z(m)',i,rotate=False):
fig_dictionary.update({'fig_Min-Y'+str(i)})
if op_Max_X== True:
for i in list(params):
for b in plotting(faces ,'Max-X' ,'MxXval' ,'X=','Y','Z','Y(m)','Z(m)',i,rotate=True ):
fig_dictionary.update({'fig_Max-X'+str(i)})
if op_Min_X== True:
for i in list(params):
for b in plotting(faces ,'Min-X' ,'MnXval' ,'X=','Y','Z','Y(m)','Z(m)',i,rotate=True ):
fig_dictionary.update({'fig_Min-X'+str(i)})
if op_xsec_Y_half == True:
for i in list(params):
for b in plotting(faces ,'xsec_y_half','xsec_y_val_half','X=','Y','Z','Y(m)','Z(m)',i,rotate=True):
fig_dictionary.update({'fig_xsec_y_half'+str(i)})
if op_xsec_X_half == True:
for i in list(params):
for b in plotting(faces ,'xsec_x_half','xsec_x_half_val','Y=','Z','X','X(m)','Z(m)',i,rotate=False ):
fig_dictionary.update({'fig_xsec_x_half'+str(i)})
if op_xsec_X_user == True:
for a in list(range(len(xsec_user_xvals))):
for i in list(params):
for b in plotting(faces,'xsec_x_user_'+str(xsec_user_xvals[a]),
'xsec_x_user_val'+str(xsec_user_xvals[a]),
'Y=','Z','X','X(m)','Z(m)',i,rotate=False ):
fig_dictionary.update({'xsec_x_user_val'+str(xsec_user_xvals[a])+str(i)})
if op_xsec_Y_user == True:
for a in list(range(len(xsec_user_yvals))):
for i in list(params):
for b in plotting(faces,'xsec_y_user_'+str(xsec_user_yvals[a]),
'xsec_y_user_val'+str(xsec_user_yvals[a]),
'X=','Y','Z','Y(m)','Z(m)',i,rotate=True ):
fig_dictionary.update({'xsec_y_user_val'+str(xsec_user_yvals[a])+str(i)})
return fig_dictionary
def main():
if op_Flowdata==True:
flowfaces=flowdata_import()
if op_png==True:
pngplotting(flowfaces,flowdata_params)
if op_pdf==True:
pdfplotting(flowfaces,flowdata_params,cwd+"/flow_data.pdf")
if op_fig==True:
fig_return(flowfaces,flowdata_params)
if op_Flowvector==True:
flowvecfaces=flowvector_import()
if op_png==True:
pngplotting(flowvecfaces,flowvector_params)
if op_pdf==True:
pdfplotting(flowvecfaces,flowvector_params,cwd+"/flow_vector.pdf")
# if op_fig==True:
# fig_return(flowvecfaces,flowvector_params)
if op_Displacement==True:
dispfaces=displace_import()
if op_png==True:
pngplotting(dispfaces,displacement_params)
if op_pdf==True:
pdfplotting(dispfaces,displacement_params,cwd+"/displacement.pdf")
# if op_fig==True:
# fig_return(dispfaces,displacement_params)
if op_Stress_Strain==True:
stressfaces=stress_strain_import()
if op_png==True:
pngplotting(stressfaces,stress_strain_params)
if op_pdf==True:
pdfplotting(stressfaces,stress_strain_params,cwd+"/stress_strain.pdf")
# if op_fig==True:
# fig_return(stressfaces,stress_strain_params)
# if op_fig==True:
# return fig_dictionary
if __name__ == "__main__":
main()
# In[ ]:
#Old code
#corner_val_import()[3] #returns all the x and z values for the 3rd value of op_corner_z i.e. 6 or -900 - the reshape order above i dont get but works
#corner_val_import()[:,:,[1]].flatten() #how to select[z,y,x] and then flatten
##face and data choosing
#def corner_points(cpvs,emptylist):
# #function makes a crude iumperfect mesh
# B=cpvs
# A=emptylist
# for i in range(len(B)):
# if i == 0:
# A[0]=B[i]-((B[i+1]-B[i])/2)
# A[i+1]=B[i+1]-((B[i+1]-B[i])/2)
# if (i+1)==len(B):
# break
# else:
# A[i+1]=B[i+1]-((B[i+1]-B[i])/2)
# print("cp script",A)
# return A
#
#
#
#def corner_point_vals(axis):
# last=np.array(float(axis.unique()[-1])+(float(axis.unique()[-1])-float(axis.unique()[-2])))
# cpvs=np.append(axis.unique(),last)
# print(cpvs)
# emptylist=np.zeros(len(cpvs))
# return cpvs,emptylist
#corner_val_import()[:,:,[1]].flatten() #how to select[z,y,x]
#stressfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,
#def plotting(faces,name,name2,surlabel,dim1,dim2,xlabel,ylabel,data,rotate):
# a=face_choose(faces[name][dim1],faces[name][dim2],faces[name][data])
# b=corner_point_vals(faces[name][dim1])
# print("B",b[0],"B1",b[1]) #return the corner points and empty list same size
# c=corner_points(b[0],b[1]) # returns an array of the correct size filled with correct corners
# print("C",c)
# e=corner_point_vals(faces[name][dim2])
# f=corner_points(e[0],e[1])
# fig=plot_pcolormesh(f,c,a,data,name,faces[name2],surlabel,xlabel,ylabel,rotate)
# return fig
#def axes_values():
# #By default the corner values from the displacement output are used.
# #google read in xyz coordinate file and make a resulting 3D mesh out of it.
# #might aswell do it properly!! Future proof plus eric has more complex meshes.
#
# if op_corner == True:
# xvals=op_corner_x
# yvals=op_corner_y
# zvals=op_corner_z
# else:
# xvals=displace_import()['Top']['X'].unique()
# yvals=displace_import()['Top']['X'].unique()
# zvals=displace_import()['Top']['X'].unique()
#
# #difa=np.diff(axis1[:])
# #print('difa=',difa)
# #difb=np.diff(axis2[:])
# #print('difb=',difb)
# dy,dx=np.gradient(facedata, axis2, axis1)
# axis2,axis1 = np.meshgrid(axis1,axis2)
#
# #dy[np.isinf(dy)]=np.nan
# #dx[np.isinf(dx)]=np.nan
# print(dy,dx)
# In[ ]:
array=[[5.327318e-08, 5.302213e-08, 5.264198e-08, 5.232477e-08, 5.226435e-08
,5.258210e-08, 5.325536e-08, 5.410714e-08, 5.487324e-08, 5.532210e-08],
[4.953998e-08, 4.913573e-08, 4.851350e-08, 4.795346e-08, 4.774013e-08,
4.807313e-08, 4.894058e-08, 5.009330e-08, 5.113477e-08, 5.174467e-08],
[4.280405e-08, 4.201194e-08, 4.078997e-08, 3.962570e-08, 3.900056e-08,
3.931232e-08, 4.059173e-08, 4.244279e-08, 4.411359e-08, 4.509325e-08],
[3.548336e-08, 3.381851e-08, 3.131880e-08, 2.888704e-08, 2.730930e-08,
2.739829e-08, 2.930735e-08, 3.251616e-08, 3.536279e-08, 3.705464e-08],
[3.500417e-08, 3.167505e-08, 2.698106e-08, 2.250465e-08, 1.925050e-08,
1.868078e-08, 2.108715e-08, 2.641765e-08, 3.093883e-08, 3.376233e-08],
[3.829746e-08, 3.372373e-08, 2.752396e-08, 2.174541e-08, 1.738411e-08,
1.632301e-08, 1.880493e-08, 2.553067e-08, 3.106102e-08, 3.467288e-08],
[3.978388e-08, 3.471880e-08, 2.794453e-08, 2.168303e-08, 1.691082e-08,
1.566582e-08, 1.815470e-08, 2.540173e-08, 3.130537e-08, 3.522191e-08],
[4.169603e-08, 3.606093e-08, 2.863494e-08, 2.183823e-08, 1.660694e-08,
1.515640e-08, 1.763109e-08, 2.546118e-08, 3.177640e-08, 3.604230e-08],
[4.410161e-08, 3.780208e-08, 2.963374e-08, 2.224080e-08, 1.649566e-08,
1.481601e-08, 1.725275e-08, 2.573638e-08, 3.250728e-08, 3.717480e-08],
[6.536926e-07, 5.937457e-07, 5.085931e-07, 4.250558e-07, 3.587753e-07,
3.242687e-07, 3.190302e-07, 2.625842e-08, 3.353554e-08, 3.866824e-08],
[9.520179e-05, 1.094854e-04, 1.194284e-04, 1.099747e-04, 8.360293e-05,
4.614698e-05, 5.924715e-06, 4.173997e-08, 3.490613e-08, 4.058102e-08],
[3.640606e-05, 9.214843e-05, 1.146954e-04, 1.063602e-04, 7.358278e-05,
3.175129e-05, 5.873365e-06, 4.257781e-08, 3.667121e-08, 4.298280e-08],
[6.332070e-07, 5.813288e-07, 5.036060e-07, 4.240752e-07, 3.599401e-07,
3.255439e-07, 3.182350e-07, 2.993201e-08, 3.889183e-08, 4.595719e-08],
[3.944654e-08, 3.513427e-08, 2.901323e-08, 2.219682e-08, 3.109058e-07,
3.500287e-07, 4.238882e-07, 5.178232e-07, 6.159438e-07, 6.885575e-07],
[3.861711e-08, 3.470954e-08, 2.903090e-08, 3.828296e-08, 8.048252e-05,
1.259718e-04, 1.641670e-04, 1.778327e-04, 1.617571e-04, 1.398119e-04],
[3.809521e-08, 3.453472e-08, 2.924624e-08, 3.887079e-08, 3.825505e-05,
1.088375e-04, 1.580838e-04, 1.710754e-04, 1.385573e-04, 5.521310e-05],
[3.783447e-08, 3.457314e-08, 2.963125e-08, 2.365725e-08, 3.137766e-07,
3.520025e-07, 4.232712e-07, 5.124309e-07, 6.007149e-07, 6.616466e-07],
[3.779601e-08, 3.479368e-08, 3.016164e-08, 2.447899e-08, 1.688958e-08,
1.840337e-08, 2.337519e-08, 2.980264e-08, 3.612878e-08, 4.082953e-08],
[3.794715e-08, 3.516983e-08, 3.081637e-08, 2.541763e-08, 1.846540e-08,
1.961266e-08, 2.407896e-08, 2.992447e-08, 3.567364e-08, 3.988821e-08],
[3.826045e-08, 3.567910e-08, 3.157718e-08, 2.645390e-08, 2.005500e-08,
2.090930e-08, 2.492847e-08, 3.026053e-08, 3.550527e-08, 3.930720e-08],
[4.079975e-08, 3.913685e-08, 3.626867e-08, 3.260099e-08, 2.884180e-08,
2.843189e-08, 3.028694e-08, 3.316273e-08, 3.604072e-08, 3.798938e-08],
[4.907877e-08, 4.811656e-08, 4.639948e-08, 4.425569e-08, 4.228165e-08,
4.159599e-08, 4.209853e-08, 4.325655e-08, 4.450283e-08, 4.532603e-08],
[5.567425e-08, 5.506284e-08, 5.397009e-08, 5.263876e-08, 5.144292e-08,
5.083017e-08, 5.085334e-08, 5.129416e-08, 5.184568e-08, 5.221874e-08],
[5.914666e-08, 5.868727e-08, 5.787003e-08, 5.688669e-08, 5.599869e-08,
5.545781e-08, 5.533023e-08, 5.550704e-08, 5.579456e-08, 5.599997e-08]]
#array= np.multiply(array,1000000)
# In[ ]:
difa= [0 ,100 ,200 ,300 ,400, 500, 600, 700, 800, 900]
difb= [1 ,1 ,1 ,1, 1, 1, 1, 1, 1, 1 , 1, 1, 1 , 1
,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1]
#difb= [100. ,100. ,100. ,100. , 50., 10., 10., 10., 10. , 10. , 10., 10., 10. , 10.
# ,10. ,10. ,10. ,10. ,10. ,10. ,100. ,100. ,100. ,100.]
# In[ ]:
dy,dx=np.gradient(array,difb,difa)
dx
# In[ ]:
x = np.linspace(-90000000,9000000, 30)
x
# In[ ]:
#def plot_flowvectors(axis1,axis2,axis3,axis4,facedata,data,name,name2,surlabel,xlabel,ylabel,rotate):
# if rotate == True:
# dy,dx=np.gradient(facedata, axis2, axis1)
# axis4,axis3 = np.meshgrid(axis3,axis4)
# else:
# dy,dx=np.gradient(facedata, axis2, axis1)
# axis3,axis4 = np.meshgrid(axis4,axis3)
#
# fig, ax = plt.subplots()
# ax.quiver(axis3, axis4, dx, dy,facedata)
# ax.set(aspect=1, title='Quiver Plot')
# plt.show()
# return fig
#def plot_flowvectors(axis1,axis2,axis3,axis4,facedata,data,name,name2,surlabel,xlabel,ylabel,rotate):
# if rotate == True:
# dy,dx=np.gradient(facedata, axis2, axis1)
# axis4,axis3 = np.meshgrid(axis3,axis4)
# else:
# dy,dx=np.gradient(facedata, axis2, axis1)
# axis3,axis4 = np.meshgrid(axis4,axis3)
#
# fig, ax = plt.subplots()
# im=ax.imshow(facedata, extent=[axis3.min(), axis3.max(), axis4.min(), axis4.max()])
# ax.quiver(axis3, axis4, dx, dy,facedata)
# ax.set(aspect=1, title='Quiver Plot')
# fig.colorbar(im)
# plt.show()
# return fig
#def plot_flowvectors_no_cont(axis1,axis2,axis3,axis4,facedata,data,name,name2,surlabel,xlabel,ylabel,rotate):
# """
# Following based on this: https://stackoverflow.com/questions/25342072/computing-and-drawing-vector-fields
# Uses
#
# """
# if rotate == True:
# dy,dx=np.gradient(facedata, axis2, axis1)
# axis4,axis3 = np.meshgrid(axis3,axis4)
# #axis2,axis1 = np.meshgrid(axis1,axis2)
#
# else:
# dy,dx=np.gradient(facedata, axis2, axis1)
# axis3,axis4 = np.meshgrid(axis3,axis4)
# #axis1,axis2 = np.meshgrid(axis1,axis2)
#
# if op_Bot == True:
# for i in list(params):
# b in plotting(faces ,'Bot' ,'btval' ,'Z=','Y','X','X(m)','Y(m)',i,rotate=False).savefig(cwd+'/trexplot_output_pngs/'+'Bot'+str(i))
# b.savefig(cwd+'/trexplot_output_pngs/'+'Bot'+str(i))
# plt.close('all')
#
| 2.359375
| 2
|
solutions_automation/vdc/dashboard/monitoring.py
|
threefoldtech/js-sdk
| 13
|
12782959
|
from solutions_automation.vdc.dashboard.common import CommonChatBot
from jumpscale.packages.vdc_dashboard.chats.monitoringstack import InstallMonitoringStack
class MonitoringStackAutomated(CommonChatBot, InstallMonitoringStack):
pass
| 1.15625
| 1
|
002-V2rayPool/core/conf.py
|
xhunmon/PythonIsTools
| 18
|
12782960
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
import os
class Config:
__v2ray_core_path = None
__v2ray_node_path = None
def __init__(self):
self.config = configparser.ConfigParser()
parent_dir = os.path.dirname(os.path.abspath(__file__))
self.config_path = os.path.join(Config.__v2ray_core_path, 'config.json')
self.json_path = os.path.join(parent_dir, 'json_template')
# self.config.read(self.config_path)
def get_path(self, key):
# return self.config.get('path', key)
return self.config_path
def get_data(self, key):
return self.config.get('data', key)
def set_data(self, key, value):
self.config.set('data', key, value)
self.config.write(open(self.config_path, "w"))
@staticmethod
def set_v2ray_core_path(dir: str):
"""设置当前v2ray_core程序的目录"""
Config.__v2ray_core_path = dir
@staticmethod
def get_v2ray_core_path():
"""获取当前v2ray_core程序的目录"""
return Config.__v2ray_core_path
@staticmethod
def set_v2ray_node_path(dir: str):
"""设置当前v2ray保存节点的目录"""
Config.__v2ray_node_path = dir
@staticmethod
def get_v2ray_node_path():
"""获取当前v2ray保存节点的目录"""
return Config.__v2ray_node_path
| 2.75
| 3
|
src/update_all_courses_utils.py
|
PrincetonUSG/tigersnatch
| 1
|
12782961
|
<gh_stars>1-10
# ----------------------------------------------------------------------
# update_all_courses_utils.py
# Contains utilities for update_all_courses.py for the purpose of
# multiprocessing (top-level functions required).
# ----------------------------------------------------------------------
from mobileapp import MobileApp
from database import Database
from sys import stderr
import time
_api = MobileApp()
# return all department codes (e.g. COS, ECE, etc.)
def get_all_dept_codes(term):
# hidden feature of MobileApp API (thanks to <NAME>
# from OIT for helping us find this)
res = _api.get_courses(term=term, subject="list")
try:
codes = tuple([k["code"] for k in res["term"][0]["subjects"]])
codes[0] and codes[1]
except:
raise Exception("failed to get all department codes")
return codes
# fetches and inserts new course information into the database
def process_dept_codes(dept_codes: str, current_term_code: str, hard_reset: bool):
try:
db = Database()
old_enrollments = list(
db._db.enrollments.find(
{}, {"_id": 0, "classid": 1, "last_notif": 1, "prev_enrollment": 1}
)
)
# precompute dictionary of times of last notif
old_last_notifs = {}
old_prev_enrollments = {}
for enrollment in old_enrollments:
if "last_notif" in enrollment:
old_last_notifs[enrollment["classid"]] = enrollment["last_notif"]
if "prev_enrollment" in enrollment:
old_prev_enrollments[enrollment["classid"]] = enrollment[
"prev_enrollment"
]
courses = _api.get_courses(term=current_term_code, subject=dept_codes)
if "subjects" not in courses["term"][0]:
raise RuntimeError("no query results")
if hard_reset:
db.reset_db()
else:
db.soft_reset_db()
n_courses = 0
n_sections = 0
# iterate through all subjects, courses, and classes
for subject in courses["term"][0]["subjects"]:
print("> processing dept code", subject["code"])
for course in subject["courses"]:
courseid = course["course_id"]
if db.courses_contains_courseid(courseid):
print("already processed courseid", courseid, "- skipping")
continue
# "new" will contain a single course document to be entered
# in the courses (and, in part, the mapppings) collection
new = {
"courseid": courseid,
"displayname": subject["code"] + course["catalog_number"],
"displayname_whitespace": subject["code"]
+ " "
+ course["catalog_number"],
"title": course["title"],
"time": time.time(),
"has_reserved_seats": course["detail"]["seat_reservations"] == "Y",
}
for x in course["crosslistings"]:
new["displayname"] += "/" + x["subject"] + x["catalog_number"]
new["displayname_whitespace"] += (
"/" + x["subject"] + " " + x["catalog_number"]
)
print("inserting", new["displayname"], "into mappings")
db.add_to_mappings(new)
del new["time"]
all_new_classes = []
lecture_idx = 0
for class_ in course["classes"]:
meetings = class_["schedule"]["meetings"][0]
section = class_["section"]
# skip dummy sections (end with 99)
if section.endswith("99"):
continue
# skip 0-capacity sections
if int(class_["capacity"]) == 0:
continue
classid = class_["class_number"]
# in the (very) occurrence that a class does not have any meetings...
start_time_ = (
"Unknown"
if "start_time" not in meetings
else meetings["start_time"]
)
end_time_ = (
"Unknown"
if "end_time" not in meetings
else meetings["end_time"]
)
days_ = (
["Unknown"] if len(meetings["days"]) == 0 else meetings["days"]
)
# new_class will contain a single lecture, precept,
# etc. for a given course
new_class = {
"classid": classid,
"section": section,
"type_name": class_["type_name"],
"start_time": start_time_,
"end_time": end_time_,
"days": " ".join(days_),
"enrollment": int(class_["enrollment"]),
"capacity": int(class_["capacity"]),
}
# new_class_enrollment will contain enrollment and
# capacity for a given class within a course
new_class_enrollment = {
"classid": classid,
"courseid": courseid,
"section": section,
"enrollment": int(class_["enrollment"]),
"capacity": int(class_["capacity"]),
"swap_out": [],
}
if not hard_reset and classid in old_last_notifs:
print("preserving time of last notif for class", classid)
new_class_enrollment["last_notif"] = old_last_notifs[classid]
if not hard_reset and classid in old_prev_enrollments:
print("preserving previous enrollment for class", classid)
new_class_enrollment["prev_enrollment"] = old_prev_enrollments[
classid
]
print(
"inserting",
new["displayname"],
new_class["section"],
"into enrollments",
)
db.add_to_enrollments(new_class_enrollment)
# pre-recorded lectures are marked as 01:00 AM start
if new_class["start_time"] == "01:00 AM":
new_class["start_time"] = "Pre-Recorded"
new_class["end_time"] = ""
# lectures should appear before other section types
if class_["type_name"] == "Lecture":
all_new_classes.insert(lecture_idx, new_class)
lecture_idx += 1
else:
all_new_classes.append(new_class)
n_sections += 1
for i, new_class in enumerate(all_new_classes):
new[f'class_{new_class["classid"]}'] = new_class
print("inserting", new["displayname"], "into courses")
db.add_to_courses(new)
n_courses += 1
print(f"> processed {n_courses} courses and {n_sections} sections")
print(f"> performed a {'hard' if hard_reset else 'soft'} reset")
return n_courses, n_sections
except Exception as e:
print(f"failed to get new course data with exception message {e}", file=stderr)
return 0, 0
| 2.34375
| 2
|
examples/ws_subscriptions.py
|
PetrZufan/cryptoxlib-aio
| 90
|
12782962
|
import logging
import os
import asyncio
from cryptoxlib.CryptoXLib import CryptoXLib
from cryptoxlib.PeriodicChecker import PeriodicChecker
from cryptoxlib.Pair import Pair
from cryptoxlib.clients.binance.BinanceClient import BinanceClient
from cryptoxlib.clients.binance.BinanceWebsocket import OrderBookSymbolTickerSubscription
from cryptoxlib.version_conversions import async_run
LOG = logging.getLogger("cryptoxlib")
LOG.setLevel(logging.INFO)
LOG.addHandler(logging.StreamHandler())
print(f"Available loggers: {[name for name in logging.root.manager.loggerDict]}\n")
async def order_book_update(response: dict) -> None:
pass
class Subscriptions:
def __init__(self):
self.subscriptions = [
[
OrderBookSymbolTickerSubscription(Pair("BTC", "USDT"), callbacks = [self.call1]),
OrderBookSymbolTickerSubscription(Pair("ETH", "USDT"), callbacks = [self.call1])
],
[
OrderBookSymbolTickerSubscription(Pair("BNB", "USDT"), callbacks = [self.call2]),
OrderBookSymbolTickerSubscription(Pair("XRP", "USDT"), callbacks = [self.call2])
],
[
OrderBookSymbolTickerSubscription(Pair("ADA", "USDT"), callbacks = [self.call3]),
OrderBookSymbolTickerSubscription(Pair("DOT", "USDT"), callbacks = [self.call3])
]
]
self.subscription_set_ids = []
self.timers = [
PeriodicChecker(100),
PeriodicChecker(100),
PeriodicChecker(100)
]
async def call1(self, response : dict):
if self.timers[0].check():
print(response)
async def call2(self, response : dict):
if self.timers[1].check():
print(response)
async def call3(self, response : dict):
if self.timers[2].check():
print(response)
# global container for various subscription compositions
sub = Subscriptions()
async def main_loop(client: BinanceClient) -> None:
i = 0
sleep_sec = 1
while True:
if i == 3:
print("Unsubscribing BTC/USDT")
await client.unsubscribe_subscriptions([sub.subscriptions[0][0]])
if i == 6:
print("Unsubscribing BNB/USDT")
await client.unsubscribe_subscriptions([sub.subscriptions[1][0]])
if i == 9:
print("Unsubscribing ADA/USDT and DOT/USDT")
await client.unsubscribe_subscription_set(sub.subscription_set_ids[2])
if i == 12:
print("Unsubscribing all")
await client.unsubscribe_all()
if i == 15:
print("Subscribe BNB/BTC")
await client.add_subscriptions(sub.subscription_set_ids[0],
[OrderBookSymbolTickerSubscription(Pair("BNB", "BTC"), callbacks = [sub.call1])])
if i == 18:
print("Subscribe ETH/USDT again")
await client.add_subscriptions(sub.subscription_set_ids[0],
[OrderBookSymbolTickerSubscription(Pair("ETH", "USDT"), callbacks = [sub.call1])])
if i == 21:
print("Subscribe ADA/USDT and XRP/USDT again")
await client.add_subscriptions(sub.subscription_set_ids[1],
[OrderBookSymbolTickerSubscription(Pair("ADA", "USDT"), callbacks = [sub.call2]),
OrderBookSymbolTickerSubscription(Pair("XRP", "USDT"), callbacks = [sub.call2])])
if i == 24:
print("Shutting down websockets.")
await client.shutdown_websockets()
if i == 27:
print("Quitting the main loop.")
break
i += 1
await asyncio.sleep(sleep_sec)
async def run():
api_key = os.environ['APIKEY']
sec_key = os.environ['SECKEY']
client = CryptoXLib.create_binance_client(api_key, sec_key)
# initialize three independent websockets
sub.subscription_set_ids.append(client.compose_subscriptions(sub.subscriptions[0]))
sub.subscription_set_ids.append(client.compose_subscriptions(sub.subscriptions[1]))
sub.subscription_set_ids.append(client.compose_subscriptions(sub.subscriptions[2]))
try:
await asyncio.gather(*[
client.start_websockets(),
main_loop(client)
])
except Exception as e:
print(f"Out: {e}")
await client.close()
print("Exiting.")
if __name__ == "__main__":
async_run(run())
| 2.25
| 2
|
get_sim_face_net.py
|
chenyu3050/fullbboxrestoration
| 3
|
12782963
|
<filename>get_sim_face_net.py
from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
import numpy as np
import os
from PIL import Image
import torch.nn as nn
def preprocess(path=None,im=None):
if path is not None:
tmp = np.array(Image.open(path))
else:
tmp = im
if len(tmp.shape) > 2:
tmp = np.transpose(tmp,(2,0,1))
tmp = torch.FloatTensor(tmp)
tmp -= tmp.min()
tmp /= tmp.max()
tmp = tmp * 2 - 1
tmp = nn.Upsample(size = (160,160))(tmp.expand(3,tmp.shape[-1],tmp.shape[-1])[None,...])
return tmp
def get_sim(device, path1=None, path2=None, im1=None, im2=None):
resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)
mtcnn = MTCNN(
image_size=160, margin=0, min_face_size=20,
thresholds=[0.6,0.7,0.7], factor=0.709,
device=device
)
with torch.no_grad():
if path1 is not None:
emb1 = resnet(preprocess(path=path1).to(device))
else:
emb1 = resnet(preprocess(im=im1).to(device))
if path2 is not None:
emb2 = resnet(preprocess(path=path2).to(device))
else:
emb2 = resnet(preprocess(im=im2).to(device))
return torch.cosine_similarity(emb1,emb2).item()
| 2.5625
| 3
|
pyptv/client.py
|
Sumedh-k/pyptv
| 0
|
12782964
|
<reponame>Sumedh-k/pyptv
#!/usr/bin/env python
from datetime import datetime
import hmac
from hashlib import sha1
import json
import urlparse # TODO: remove in favour of better lib
import urllib
import requests
from pyptv.platform_ import Platform # don't clobber the builtin platform
from pyptv.direction import Direction
from pyptv.stop import StopFactory
from pyptv.line import LineFactory
from pyptv.run import RunFactory
from pyptv.outlet import OutletFactory
from pyptv.disruption import DisruptionFactory
from pyptv.location import parse_location
from pyptv.utils import parse_datetime_tz
API_BASE_URL = "http://timetableapi.ptv.vic.gov.au/"
class PTVClient(object):
MODES = {"train": 0,
"tram": 1,
"bus": 2,
"vline": 3,
"nightrider": 4,
"ticket_outlet": 100,
}
FLAGS = {"RR": "Reservations Required",
"GC": "Guaranteed Connection",
"DOO": "Drop Off Only",
"PUO": "Pick Up Only",
"MO": "Mondays only",
"TU": "Tuesdays only",
"WE": "Wednesdays only",
"TH": "Thursdays only",
"FR": "Fridays only",
"SS": "School days only",
}
def __init__(self, developer_id=None, api_key=None):
self.developer_id = developer_id
self.api_key = api_key
def api_request(self, api_path, timed=True):
""" Call some api end point
API request will have proper signing key appended.
"""
parsed = urlparse.urlparse(api_path)
# parse out current query
query = urlparse.parse_qsl(parsed.query)
# add timestamp
if timed:
now = datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
query.append(('timestamp', now))
# add developer id
query.append(('devid', self.developer_id))
unsigned_query = urllib.urlencode(query)
unsigned_parsed = parsed._replace(query=unsigned_query)
unsigned_path = unsigned_parsed.geturl()
digest = hmac.new(self.api_key, unsigned_path, sha1)
signature = digest.hexdigest()
query.append(('signature', signature))
signed_query = urllib.urlencode(query)
signed_parsed = unsigned_parsed._replace(query=signed_query)
signed_path = signed_parsed.geturl()
signed_url = urlparse.urljoin(API_BASE_URL, signed_path)
req = requests.get(signed_url)
data = json.loads(req.content)
return data
# API methods:
def healthcheck(self):
""" send off a health check to check the status of the system, the
local clock and the API credentials.
"""
return self.api_request("/v2/healthcheck")
def stops_nearby(self, location, mode=None, limit=None, with_distance=False):
""" Get stops near a location.
location: one of (lat, lon), a Location object, or something that has a
location property (which would be a Location object)
mode: (optional) filter results for only this tramsport mode
limit: (optional) only return this many results
with_distance: (optional) return tuples of (Stop, distance)
"""
lat, lon = parse_location(location)
base_path = "/v2/nearme/latitude/{lat}/longitude/{lon}"
path = base_path.format(lat=lat, lon=lon)
stops = self.api_request(path)
stop_factory = StopFactory(self)
out = [stop_factory.create(**stop['result']) for stop in stops]
# only provide certain stop types if we are provided with a mode
if mode is not None:
out = [stop for stop in out if stop.transport_type == mode]
# enforce limit if provided
if limit is not None:
out = out[:limit]
# convert into tuple of (Stop, distance)
if with_distance:
out = [(stop, location.distance(stop.location)) for stop in out]
return out
def transport_pois_by_map(self, poi, location1,
location2, griddepth, limit=20):
""" list of points of interest within a map grid defined by location1
& location2
poi: either a transport mode or outlet. A list of poi types can be
passed in as a comma separated
location1 & location2:
- are one of (lat, lon), a Location object, or something that has a
location property (which would be a Location object).
- define the top left corner (location1) and bottom right corner
(location2) of a rectangle on a map
griddepth: number of cell blocks per cluster
limit: minimum number of POIs required to create a cluster as well as
the maximum number of POIs returned
"""
lat1, lon1 = parse_location(location1)
lat2, lon2 = parse_location(location2)
base_path = "/v2/poi/{poi}/lat1/{lat1}/long1/{lon1}/" + \
"lat2/{lat2}/long2/{lon2}/" + \
"griddepth/{griddepth}/limit/{limit}"
poi_ids = ','.join([str(self.MODES[p]) for p in poi.split(',')])
path = base_path.format(poi=poi_ids, lat1=lat1, lon1=lon1,
lat2=lat2, lon2=lon2,
griddepth=griddepth, limit=limit)
data = self.api_request(path)
stop_factory = StopFactory(self)
outlet_factory = OutletFactory(self)
out = {}
for k, v in data.items():
if k == "locations":
out['locations'] = []
for location in v:
# either a Stop of an Outlet
if 'transport_type' in location:
item = stop_factory.create(**location)
else:
outlet_type = location.pop('outlet_type')
item = outlet_factory.create(transport_type=outlet_type,
**location)
out['locations'].append(item)
else:
out[k] = v
return out
def search(self, term):
""" all stops and lines that match the search term
"""
path = "/v2/search/%s" % urllib.quote(term)
data = self.api_request(path)
stop_factory = StopFactory(self)
line_factory = LineFactory(self)
out = []
for result in data:
if result['type'] == 'stop':
out.append(stop_factory.create(**result['result']))
elif result['type'] == 'line':
out.append(line_factory.create(**result['result']))
else:
out.append(result)
return out
def lines_by_mode(self, mode, name=None):
""" all the lines for a particular transport mode """
base_path = "/v2/lines/mode/{mode}"
mode_id = self.MODES[mode]
path = base_path.format(mode=mode_id)
if name is not None:
path += "?name=%s" % name
data = self.api_request(path)
line_factory = LineFactory(self)
out = []
for line in data:
out.append(line_factory.create(**line))
return out
def stops_on_a_line(self, mode, line):
""" all the stops for a particular transport mode on a given line
mode: transport mode
line: the line_id of a particular line
"""
base_path = "/v2/mode/{mode}/line/{line}/stops-for-line"
mode_id = self.MODES[mode]
path = base_path.format(mode=mode_id, line=line)
data = self.api_request(path)
stop_factory = StopFactory(self)
out = []
for line in data:
out.append(stop_factory.create(**line))
return out
def _process_departures(self, departures):
""" common reponse parser for handling a list of departures """
line_factory = LineFactory(self)
stop_factory = StopFactory(self)
run_factory = RunFactory(self)
out = []
for departure in departures:
# - platform
# -- direction
# --- line
platform_details = departure['platform']
direction_details = platform_details.pop('direction')
line_details = direction_details.pop('line')
line = line_factory.create(**line_details)
direction_details['line'] = line
direction = Direction(**direction_details)
platform_details['direction'] = direction
# --- stop
stop_details = platform_details.pop('stop')
stop = stop_factory.create(**stop_details)
platform_details['stop'] = stop
platform = Platform(**platform_details)
# - run
run_details = departure['run']
run = run_factory.create(**run_details)
timetable = parse_datetime_tz(departure["time_timetable_utc"])
if departure["time_realtime_utc"] is not None:
realtime = parse_datetime_tz(departure["time_realtime_utc"])
else:
realtime = None
if departure['flags']:
flags = ', '.join([self.FLAGS[f] for f
in departure['flags'].split('-')
if f != 'E'])
else:
flags = None
out.append({"platform": platform,
"run": run,
"flags": flags,
"time_timetable_utc": timetable,
"time_realtime_utc": realtime,
})
return out
def broad_next_departures(self, mode, stop, limit=5):
""" departure times at a particular stop, irrespective of line or
direction.
mode: transport mode
stop: stop_id of a stop
limit: max results to return
"""
base_path = "/v2/mode/{mode}/stop/{stop}/" + \
"departures/by-destination/limit/{limit}"
mode_id = self.MODES[mode]
path = base_path.format(mode=mode_id, stop=stop, limit=limit)
departures = self.api_request(path)
return self._process_departures(departures["values"])
def specific_next_departures(self, mode, line, stop,
direction, limit=5, for_utc=None):
""" departure times at a particular stop for a given line and direction
mode: transport mode
line: line_id of transport line
stop: stop_id of a stop on the line
direction: direction_id of run's direction
limit: max results to return
for_utc: (optional) date and time of the request
"""
base_path = "/v2/mode/{mode}/line/{line}/stop/{stop}/" + \
"directionid/{direction}/departures/all/limit/{limit}"
mode_id = self.MODES[mode]
path = base_path.format(mode=mode_id, line=line, stop=stop,
direction=direction, limit=limit)
if for_utc is not None:
path += "?for_utc=%s" % for_utc
departures = self.api_request(path)
return self._process_departures(departures["values"])
def specific_next_departures_gtfs(self, mode, route_id, stop, direction,
for_utc=None):
""" TODO: explain how this differs from previous method """
base_path = "/v2/mode/{mode}/route_id/{route_id}/stop/{stop}/" + \
"direction/{direction}/departures/all/limit/{limit}"
path = base_path.format(mode=mode, route_id=route_id, stop=stop,
direction=direction)
if for_utc is not None:
path += "?for_utc=%s" % for_utc
departures = self.api_request(path)
return self._process_departures(departures["values"])
def stopping_pattern(self, mode, run, stop, for_utc=None):
""" stopping pattern for a particular run from a given stop
mode: transport mode
run: transport run_id
stop: stop_id of a stop
for_utc: (optional) date and time of the request
"""
base_path = "/v2/mode/{mode}/run/{run}/stop/{stop}/stopping-pattern"
mode_id = self.MODES[mode]
path = base_path.format(mode=mode_id, run=run, stop=stop)
if for_utc is not None:
path += "?for_utc=%s" % for_utc
data = self.api_request(path)
return self._process_departures(data['values'])
def disruptions(self, modes="general"):
""" planned and unplanned disruptions on the transport network.
modes: one or more of the following in a comma separted string format:
general
metro-bus
metro-train
metro-tram
regional-bus
regional-coach
regional-train
"""
path = "/v2/disruptions/modes/%s" % modes
data = self.api_request(path)
factory = DisruptionFactory(self)
out = []
for mode, items in data.items():
for item in items:
out.append(factory.create(transport_type=mode, **item))
return out
| 2.25
| 2
|
roboticstoolbox/mobile/ParticleFilter.py
|
Russ76/robotics-toolbox-python
| 0
|
12782965
|
#!/usr/bin/env python3
"""
Python EKF Planner
@Author: <NAME>, original MATLAB code and Python version
@Author: <NAME>, initial MATLAB port
Based on code by <NAME>, Oxford University,
http://www.robots.ox.ac.uk/~pnewman
"""
from collections import namedtuple
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from spatialmath import base
"""
Monte-carlo based localisation for estimating vehicle pose based on
odometry and observations of known landmarks.
"""
# TODO: refactor this and EKF, RNG, history, common plots, animation, movie
class ParticleFilter:
def __init__(self, robot, sensor, R, L, nparticles=500, seed=0, x0=None,
verbose=False, animate=False, history=True, workspace=None):
"""
Particle filter
:param robot: robot motion model
:type robot: :class:`VehicleBase` subclass,
:param sensor: vehicle mounted sensor model
:type sensor: :class:`SensorBase` subclass
:param R: covariance of the zero-mean Gaussian noise added to the particles at each step (diffusion)
:type R: ndarray(3,3)
:param L: covariance used in the sensor likelihood model
:type L: ndarray(2,2)
:param nparticles: number of particles, defaults to 500
:type nparticles: int, optional
:param seed: random number seed, defaults to 0
:type seed: int, optional
:param x0: initial state, defaults to [0, 0, 0]
:type x0: array_like(3), optional
:param verbose: display extra debug information, defaults to False
:type verbose: bool, optional
:param history: retain step-by-step history, defaults to True
:type history: bool, optional
:param workspace: dimension of workspace, see :func:`~spatialmath.base.graphics.expand_dims`
:type workspace: scalar, array_like(2), array_like(4)
This class implements a Monte-Carlo estimator or particle filter for
vehicle state, based on odometry, a landmark map, and landmark
observations. The state of each particle is a possible vehicle
configuration :math:`(x,y,\theta)`. Bootstrap particle resampling is
used.
The working area is defined by ``workspace`` or inherited from the
landmark map attached to the ``sensor`` (see
:func:`~spatialmath.base.graphics.expand_dims`):
============== ======= =======
``workspace`` x-range y-range
============== ======= =======
A (scalar) -A:A -A:A
[A, B] A:B A:B
[A, B, C, D] A:B C:D
============== ======= =======
Particles are initially distributed uniform randomly over this area.
Example::
V = np.diag([0.02, np.radians(0.5)]) ** 2
robot = Bicycle(covar=V, animation="car", workspace=10)
robot.control = RandomPath(workspace=robot)
map = LandmarkMap(nlandmarks=20, workspace=robot.workspace)
W = np.diag([0.1, np.radians(1)]) ** 2
sensor = RangeBearingSensor(robot, map, covar=W, plot=True)
R = np.diag([0.1, 0.1, np.radians(1)]) ** 2
L = np.diag([0.1, 0.1])
pf = ParticleFilter(robot, sensor, R, L, nparticles=1000)
pf.run(T=10)
map.plot()
robot.plot_xy()
pf.plot_xy()
plt.plot(pf.get_std()[:100,:])
.. note:: Set ``seed=0`` to get different behaviour from run to run.
:seealso: :meth:`run`
"""
self._robot = robot
self._sensor = sensor
self.R = R
self.L = L
self.nparticles = nparticles
self._animate = animate
# self.dim = sensor.map.dim
self._history = []
self.x = ()
self.weight = ()
self.w0 = 0.05
self._x0 = x0
# create a private random number stream if required
self._random = np.random.default_rng(seed)
self._seed = seed
self._keep_history = history # keep history
self._htuple = namedtuple("PFlog", "t odo xest std weights")
if workspace is not None:
self._dim = base.expand_dims(workspace)
else:
self._dim = sensor.map.workspace
self._workspace = self.robot.workspace
self._init()
def __str__(self):
#ParticleFilter.char Convert to string
#
# PF.char() is a string representing the state of the ParticleFilter
# object in human-readable form.
#
# See also ParticleFilter.display.
def indent(s, n=2):
spaces = ' ' * n
return s.replace('\n', '\n' + spaces)
s = f"ParticleFilter object: {self.nparticles} particles"
s += '\nR: ' + base.array2str(self.R)
s += '\nL: ' + base.array2str(self.L)
if self.robot is not None:
s += indent("\nrobot: " + str(self.robot))
if self.sensor is not None:
s += indent("\nsensor: " + str(self.sensor))
return s
@property
def robot(self):
"""
Get robot object
:return: robot used in simulation
:rtype: :class:`VehicleBase` subclass
"""
return self._robot
@property
def sensor(self):
"""
Get sensor object
:return: sensor used in simulation
:rtype: :class:`SensorBase` subclass
"""
return self._sensor
@property
def map(self):
"""
Get map object
:return: map used in simulation
:rtype: :class:`LandmarkMap` subclass
"""
return self._map
@property
def verbose(self):
"""
Get verbosity state
:return: verbosity
:rtype: bool
"""
return self._verbose
@property
def history(self):
"""
Get EKF simulation history
:return: simulation history
:rtype: list of namedtuples
At each simulation timestep a namedtuple of is appended to the history
list. It contains, for that time step, estimated state and covariance,
and sensor observation.
:seealso: :meth:`get_t` :meth:`get_xy` :meth:`get_std`
:meth:`get_Pnorm`
"""
return self._history
@property
def workspace(self):
"""
Size of robot workspace
:return: workspace bounds [xmin, xmax, ymin, ymax]
:rtype: ndarray(4)
Returns the bounds of the workspace as specified by constructor
option ``workspace``
"""
return self._workspace
@property
def random(self):
"""
Get private random number generator
:return: NumPy random number generator
:rtype: :class:`numpy.random.Generator`
Has methods including:
- ``integers(low, high, size, endpoint)``
- ``random(size)``
- ``uniform``
- ``normal(mean, std, size)``
- ``multivariate_normal(mean, covar, size)``
The generator is initialized with the seed provided at constructor
time every time ``init`` is called.
"""
return self._random
def _init(self, x0=None):
#ParticleFilter.init Initialize the particle filter
#
# PF.init() initializes the particle distribution and clears the
# history.
#
# Notes::
# - If initial particle states were given to the constructor the states are
# set to this value, else a random distribution over the map is used.
# - Invoked by the run() method.
self.robot.init()
self.sensor.init()
#clear the history
self._history = []
# create a new private random number generator
if self._seed is not None:
self._random = np.random.default_rng(self._seed)
self._t = 0
# initialize particles
if x0 is None:
x0 = self._x0
if x0 is None:
# create initial particle distribution as uniformly randomly distributed
# over the map workspace and heading angles
x = self.random.uniform(self.workspace[0], self.workspace[1], size=(self.nparticles,))
y = self.random.uniform(self.workspace[2], self.workspace[3], size=(self.nparticles,))
t = self.random.uniform(-np.pi, np.pi, size=(self.nparticles,))
self.x = np.c_[x, y, t]
self.weight = np.ones((self.nparticles,))
def run(self, T=10, x0=None):
"""
Run the particle filter simulation
:param T: maximum simulation time in seconds
:type T: float
:param animate: animate motion of vehicle, defaults to False
:type animate: bool, optional
:param movie: name of movie file to create, defaults to None
:type movie: str, optional
Simulates the motion of a vehicle (under the control of a driving agent)
and the EKF estimator. The steps are:
- initialize the filter, vehicle and vehicle driver agent, sensor
- for each time step:
- step the vehicle and its driver agent, obtain odometry
- take a sensor reading
- execute the EKF
- save information as a namedtuple to the history list for later display
:seealso: :meth:`history` :meth:`landmark` :meth:`landmarks`
:meth:`get_xy` :meth:`get_t` :meth:`get_std`
:meth:`plot_xy`
"""
self._init(x0=x0)
# anim = Animate(opt.movie)
# display the initial particles
if self._animate:
self.h, = plt.plot(self.x[:, 0], self.x[:, 1], 'go', zorder=0, markersize=3, markeredgecolor='none', alpha=0.3, label='particle')
# set(self.h, 'Tag', 'particles')
# self.robot.plot()
# iterate over time
for i in range(round(T / self.robot.dt)):
self._step()
# anim.add()
# anim.close()
def _step(self):
#fprintf('---- step\n')
odo = self.robot.step() # move the robot
# update the particles based on odometry
self._predict(odo)
# get a sensor reading
z, lm_id = self.sensor.reading()
if z is not None:
self._observe(z, lm_id)
#fprintf(' observe beacon #d\n', lm_id)
self._select()
# our estimate is simply the mean of the particles
x_est = self.x.mean(axis=0)
std_est = self.x.std(axis=0)
# std is more complex for angles, need to account for 2pi wrap
std_est[2] = np.sqrt(np.sum(base.angdiff(self.x[:,2], x_est[2]) ** 2)) / (self.nparticles-1)
# display the updated particles
# set(self.h, 'Xdata', self.x(:,1), 'Ydata', self.x(:,2), 'Zdata', self.x(:,3))
if self._animate:
self.h.set_xdata(self.x[:, 0])
self.h.set_ydata(self.x[:, 1])
# if ~isempty(self.anim)
# self.anim.add()
if self._keep_history:
hist = self._htuple(
self.robot._t,
odo.copy(),
x_est,
std_est,
self.weight.copy()
)
self._history.append(hist)
def plot_pdf(self):
"""
Plot particle PDF
Displays a discrete PDF of vehicle position. Creates a 3D plot where
the x- and y-axes are the estimated vehicle position and the z-axis is
the particle weight. Each particle is represented by a a vertical line
segment of height equal to particle weight.
"""
ax = base.plotvol3()
for (x, y, t), weight in zip(self.x, self.weight):
# ax.plot([x, x], [y, y], [0, weight], 'r')
ax.plot([x, x], [y, y], [0, weight], 'skyblue', linewidth=3)
ax.plot(x, y, weight, 'k.', markersize=6)
plt.grid(True)
plt.xlabel('X')
plt.ylabel('Y')
plt.xlim()
ax.set_zlabel('particle weight')
ax.view_init(29, 59)
def _predict(self, odo):
# step 2
# update the particle state based on odometry and a random perturbation
# Straightforward code:
#
# for i=1:self.nparticles
# x = self.robot.f( self.x(i,:), odo)' + sqrt(self.R)*self.randn[2,0]
# x[2] = angdiff(x[2])
# self.x(i,:) = x
#
# Vectorized code:
self.x = self.robot.f(self.x, odo) + \
self.random.multivariate_normal((0, 0, 0), self.R, size=self.nparticles)
self.x[:, 2] = base.angdiff(self.x[:, 2])
def _observe(self, z, lm_id):
# step 3
# predict observation and score the particles
# Straightforward code:
#
# for p = 1:self.nparticles
# # what do we expect observation to be for this particle?
# # use the sensor model h(.)
# z_pred = self.sensor.h( self.x(p,:), lm_id)
#
# # how different is it
# innov[0] = z[0] - z_pred[0]
# innov[1] = angdiff(z[1], z_pred[1])
#
# # get likelihood (new importance). Assume Gaussian but any PDF works!
# # If predicted obs is very different from actual obs this score will be low
# # ie. this particle is not very good at predicting the observation.
# # A lower score means it is less likely to be selected for the next generation...
# # The weight is never zero.
# self.weight(p) = exp(-0.5*innov'*inv(self.L)*innov) + 0.05
# end
#
# Vectorized code:
invL = np.linalg.inv(self.L)
z_pred = self.sensor.h(self.x, lm_id)
z_pred[:, 0] = z[0] - z_pred[:, 0]
z_pred[:, 1] = base.angdiff(z[1], z_pred[:, 1])
LL = -0.5 * np.r_[invL[0,0], invL[1,1], 2*invL[0,1]]
e = np.c_[z_pred[:, 0]**2, z_pred[:, 1]**2, z_pred[:,0] * z_pred[:, 1]] @ LL
self.weight = np.exp(e) + self.w0
def _select(self):
# step 4
# select particles based on their weights
#
# particles with large weights will occupy a greater percentage of the
# y axis in a cummulative plot
cdf = np.cumsum(self.weight) / self.weight.sum()
# so randomly (uniform) choosing y values is more likely to correspond to
# better particles...
iselect = self.random.uniform(0, 1, size=(self.nparticles,))
# find the particle that corresponds to each y value (just a look up)
interpfun = sp.interpolate.interp1d(cdf, np.arange(self.nparticles),
assume_sorted=True, kind='nearest', fill_value='extrapolate')
inextgen = interpfun(iselect).astype(np.int)
# copy selected particles for next generation..
self.x = self.x[inextgen, :]
def get_t(self):
"""
Get time from simulation
:return: simulation time vector
:rtype: ndarray(n)
Return simulation time vector, starts at zero. The timestep is an
attribute of the ``robot`` object.
"""
return np.array([h.t for h in self._history])
def get_xyt(self):
r"""
Get estimated vehicle trajectory
:return: vehicle trajectory where each row is configuration :math:`(x, y, \theta)`
:rtype: ndarray(n,3)
:seealso: :meth:`plot_xy` :meth:`run` :meth:`history`
"""
return np.array([h.xest[:2] for h in self._history])
def get_std(self):
r"""
Get standard deviation of particles
:return: standard deviation of vehicle position estimate
:rtype: ndarray(n,2)
Return the standard deviation :math:`(\sigma_x, \sigma_y)` of the
particle cloud at each time step.
:seealso: :meth:`get_xyt`
"""
return np.array([h.std for h in self._history])
def plot_xy(self, block=False, **kwargs):
r"""
Plot estimated vehicle position
:param args: position arguments passed to :meth:`~matplotlib.axes.Axes.plot`
:param kwargs: keywords arguments passed to :meth:`~matplotlib.axes.Axes.plot`
:param block: hold plot until figure is closed, defaults to False
:type block: bool, optional
Plot the estimated vehicle path in the xy-plane.
:seealso: :meth:`get_xy`
"""
xyt = self.get_xyt()
plt.plot(xyt[:, 0], xyt[:, 1], **kwargs)
# plt.show(block=block)
| 2.6875
| 3
|
pytorch/AdaBoost/ada_boost.py
|
OpenSourceAI/Algorithm
| 1
|
12782966
|
<filename>pytorch/AdaBoost/ada_boost.py<gh_stars>1-10
# -*- coding: UTF-8 -*-
# File Name:ada_boost_torch
# Author : <NAME>
# Date:2019/2/18
# Description :
__author__ = '<NAME>'
import torch
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# 单层决策树
class DecisionStump:
def __init__(self, X, y):
self.X = X
self.y = y
self.N = self.X.size()[0]
def train(self, W, steps=100):
min_v = float("inf")
threshold_value = 0
threshold_pos = 0
threshold_tag = 0
self.W = torch.Tensor(W)
for i in range(self.N): # value表示阈值,errcnt表示错误的数量
value, errcnt = self.find_min(i, 1, steps)
if (errcnt < min_v):
min_v = errcnt
threshold_value = value
threshold_pos = i
threshold_tag = 1
for i in range(self.N): # -1
value, errcnt = self.find_min(i, -1, steps)
if (errcnt < min_v):
min_v = errcnt
threshold_value = value
threshold_pos = i
threshold_tag = -1
self.threshold_value = threshold_value
self.threshold_pos = threshold_pos
self.threshold_res = threshold_tag
print(self.threshold_value, self.threshold_pos, self.threshold_res)
return min_v
def find_min(self, i, tag, steps):
t = 0
tmp = self.predintrain(self.X, i, t, tag).transpose(0, 1)
# print(type(tmp))
# print(type(self.y))
# ttt = tmp != self.y
# print("====", (tmp.cpu() != self.y.cpu()).size())
# print(self.W.size())
# errcnt = torch.sum((tmp != self.y).float() * self.W)
# print now
buttom = torch.min(self.X[i, :]) # 该项属性的最小值,下界
up = torch.max(self.X[i, :]) # 该项属性的最大值,上界
minerr = float("inf") # 将minerr初始化为无穷大
value = 0 # value表示阈值
st = (up - buttom) / steps # 间隔
for t in torch.arange(buttom, up, st):
tmp = self.predintrain(self.X, i, t, tag).transpose(0, 1)
tmp = tmp.float()
errcnt = torch.sum((tmp != self.y).float() * self.W)
if errcnt < minerr:
minerr = errcnt
value = t
return value, minerr
def predintrain(self, test_set, i, t, tag): # 训练时按照阈值为t时预测结果
test_set = test_set.view(self.N, -1)
pre_y = torch.ones((test_set.size()[1], 1))
pre_y[test_set[i, :] * tag < t * tag] = -1
return pre_y
def pred(self, test_X): # 弱分类器的预测
test_X = torch.Tensor(test_X).view(self.N, -1) # 转换为N行X列,-1懒得算
pre_y = torch.ones((torch.Tensor(test_X).size()[1], 1))
pre_y[test_X[self.threshold_pos, :] * self.threshold_res < self.threshold_value * self.threshold_res] = -1
return pre_y
class AdaBoost:
def __init__(self, X, y, Weaker=DecisionStump):
self.X = torch.Tensor(X)
self.y = torch.Tensor(y).flatten()
self.Weaker = Weaker
self.sums = torch.zeros(self.y.shape)
'''
W为权值,初试情况为均匀分布,即所有样本都为1/n
'''
self.W = torch.ones((self.X.size()[1], 1)).flatten() / self.X.size()[1]
self.Q = 0 # 弱分类器的实际个数
# M 为弱分类器的最大数量,可以在main函数中修改
def train(self, M=5):
self.G = {} # 表示弱分类器的字典
self.alpha = {} # 每个弱分类器的参数
for i in range(M):
self.G.setdefault(i)
self.alpha.setdefault(i)
for i in range(M): # self.G[i]为第i个弱分类器
self.G[i] = self.Weaker(self.X, self.y)
e = self.G[i].train(self.W) # 根据当前权值进行该个弱分类器训练
self.alpha[i] = 1.0 / 2 * torch.log((1 - e) / e) # 计算该分类器的系数
res = self.G[i].pred(self.X) # res表示该分类器得出的输出
# 计算当前次数训练精确度
print("weak classfier acc", accuracy_score(self.y,
res), "\n======================================================")
# Z表示规范化因子
Z = self.W * torch.exp(-self.alpha[i] * self.y * res.transpose(1, 0))
self.W = (Z / Z.sum()).flatten() # 更新权值
self.Q = i
# errorcnt返回分错的点的数量,为0则表示perfect
if (self.errorcnt(i) == 0):
print("%d个弱分类器可以将错误率降到0" % (i + 1))
break
def errorcnt(self, t): # 返回错误分类的点
self.sums = self.sums + self.G[t].pred(self.X).flatten() * self.alpha[t]
pre_y = torch.zeros_like(torch.Tensor(self.sums))
pre_y[self.sums >= 0] = 1
pre_y[self.sums < 0] = -1
t = (pre_y != self.y).sum()
return t
def pred(self, test_X): # 测试最终的分类器
test_X = torch.Tensor(test_X)
sums = torch.zeros(test_X.size()[1])
for i in range(self.Q + 1):
sums = sums + self.G[i].pred(test_X).flatten() * self.alpha[i]
pre_y = torch.zeros_like(torch.Tensor(sums))
pre_y[sums >= 0] = 1
pre_y[sums < 0] = -1
return pre_y
def main():
# load data
dataset = np.loadtxt('data.txt', delimiter=",")
x = dataset[:, 0:8]
y = dataset[:, 8]
# prepare train data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
# prepare test and train data
x_train = x_train.transpose()
y_train[y_train == 1] = 1
y_train[y_train == 0] = -1
x_test = x_test.transpose()
y_test[y_test == 1] = 1
y_test[y_test == 0] = -1
# train
ada = AdaBoost(x_train, y_train)
ada.train(50)
# predict
y_pred = ada.pred(x_test)
y_pred = y_pred.numpy()
print("total test", len(y_pred))
print("true pred", len(y_pred[y_pred == y_test]))
print("acc", accuracy_score(y_test, y_pred))
if __name__ == '__main__':
main()
| 2.34375
| 2
|
web/pipeline/migrations/0064_auto_20200826_2058.py
|
stevenstuber/CIT
| 10
|
12782967
|
# Generated by Django 2.2.13 on 2020-08-26 20:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0063_merge_20200826_2021'),
]
operations = [
migrations.RenameField(
model_name='censussubdivision',
old_name='households_owner_spending_30_pct_income',
new_name='households_owner_pct_spending_30_pct_income',
),
migrations.RenameField(
model_name='censussubdivision',
old_name='households_tenant_spending_30_pct_income',
new_name='households_tenant_pct_spending_30_pct_income',
),
migrations.AddField(
model_name='censussubdivision',
name='households_owner_count_mortgage',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='households_owner_count_spending_30_pct_income',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='households_tenant_count_spending_30_pct_income',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='households_tenant_count_subsidized_housing',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='pop_count_0_14',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='pop_count_14_65',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='pop_count_65',
field=models.IntegerField(null=True),
),
]
| 1.617188
| 2
|
src/node_manger.py
|
TillJohanndeiter/quorum-cluster
| 0
|
12782968
|
<reponame>TillJohanndeiter/quorum-cluster
"""
Provides NodeManger which is responsible for events in network
"""
import time
from synchronized_set import SynchronizedSet
from observer import Observer
from src.message_dict import MessageDict, DEFAULT_MESSAGE, DISPATCH_MESSAGE, \
JSON_SEPARATOR, HANDSHAKE_MESSAGE, MESSAGE_SEPARATOR
from src.pinger import INCOMING_MESSAGE, CONNECTION_LOST, PingMan
from src.handshake import NEW_ENTERING_NODE, Handshake
from src.beans import NodeInformation, node_information_from_json
from src.vote_strategy import VoteStrategy, NEW_MASTER, NO_MAJORITY_SHUTDOWN
TIME_BETWEEN_HANDSHAKE = 2
class NodeManger(Observer):
"""
Handel any kind of events lost node, dispatching, reentering or dispatching
"""
def __init__(self, own_information: NodeInformation,
ping_man: PingMan,
handshaker: Handshake,
message_dict: MessageDict,
connected: SynchronizedSet,
vote_strategy: VoteStrategy):
super(NodeManger, self).__init__()
self.own_information = own_information
self.ping_man = ping_man
self.handshaker = handshaker
self.message_dict = message_dict
self.connected = connected
self.dispatched = SynchronizedSet(set())
self.lost = SynchronizedSet(set())
self.vote_strategy = vote_strategy
self.master = None
self.running = False
def start(self):
"""
Will calculate init master. Start instance of PingMan and Handshake.
After that will start pingman and handshaker
:return: None
"""
self.vote_strategy.calc_new_master(self.connected, self.dispatched, self.lost)
try:
self.running = True
time.sleep(TIME_BETWEEN_HANDSHAKE)
self.ping_man.start()
time.sleep(TIME_BETWEEN_HANDSHAKE)
self.handshaker.start()
except KeyboardInterrupt:
pass
def kill(self):
"""
End PingMan and Handshake directly.
:return: None
"""
self.ping_man.kill()
self.handshaker.kill()
self.running = False
def dispatch(self):
"""
Will first end Handshake, then wait unit everybody received dispatch message
and then end pingman to ensure that everybody know that shutdown was wanted.
:return: None
"""
self.running = False
self.handshaker.kill()
self.message_dict.add_dispatch_message(self.own_information, self.connected)
self.message_dict.wait_until_everybody_received(self.__own_dispatch_message())
self.connected.clear()
self.ping_man.kill()
def __own_dispatch_message(self):
return DISPATCH_MESSAGE + JSON_SEPARATOR + self.own_information.to_json()
def update(self, update_value):
"""
Handle changes in the network by react to specific events
from observed instances
:param update_value: Notification about what happened
:return: None
"""
update_value = update_value[0]
event = update_value.name
if event == NEW_ENTERING_NODE:
self.__handle_entering_node(update_value)
elif event == INCOMING_MESSAGE:
self.__handle_messages(update_value)
elif event == CONNECTION_LOST:
self.__handle_connection_lost(update_value)
elif event == NEW_MASTER:
old_master, new_master = update_value.value
self.master = new_master
elif event == NO_MAJORITY_SHUTDOWN:
self.dispatch()
def __handle_connection_lost(self, new_value):
"""
React to lost connection by remove from connected if not dispatched and
add to lost. If more lost than connect will init dispatching process.
Otherwise will init start of new master calculation
:param new_value:
:return:
"""
lost_node = new_value.value
self.message_dict.delete_message_for_node(lost_node)
if lost_node in self.connected and lost_node not in self.dispatched:
self.connected.remove(lost_node)
self.lost.add(lost_node)
if len(self.lost) > len(self.connected):
print('{} dispatching because more lost than connected'.format(
self.own_information.name))
if self.running:
self.dispatch()
else:
self.vote_strategy.calc_new_master(self.connected,
self.dispatched,
self.lost)
def __handle_messages(self, new_value):
"""
Will sort that handshakes will be handheld first to avoid missing
information. Each message will be handled by __handle_message.
:param new_value:
:return:
"""
messages = str(new_value.value).split(MESSAGE_SEPARATOR)
messages.sort(key=lambda x: x.startswith(HANDSHAKE_MESSAGE))
for message in messages:
self.__handle_message(message)
def __handle_message(self, msg):
"""
React to different type of messages.
:param msg: incoming messages
:return: None
"""
subject = msg.split(JSON_SEPARATOR)[0]
if subject == DEFAULT_MESSAGE:
json = msg.split(JSON_SEPARATOR)[1]
node_info = node_information_from_json(json)
self.vote_strategy.vote_for(node_info, self.connected,
self.dispatched, self.lost)
json = msg.split(JSON_SEPARATOR)[1]
node_info = node_information_from_json(json)
if subject == DISPATCH_MESSAGE:
self.__handle_dispatch_msg(node_info)
self.message_dict.delete_message_for_node(node_info)
elif subject == HANDSHAKE_MESSAGE:
self.__handle_handshake_message(node_info)
def __handle_handshake_message(self, node_info):
"""
Add Node to connected and remove old node form dispatched if
new node has same name. After this will initiate calc of new master.
:param node_info: new node
:return: None
"""
if node_info != self.own_information:
self.message_dict.add_node(node_info)
self.__remove_node_from_dispatch_if_same_name(node_info)
self.connected.add(node_info)
#print('{} add {} to connected len of connected {}. Json {}'.format(self.own_information.name,
# node_info.name,
# len(self.connected), node_info.to_json()))
if node_info in self.dispatched:
self.dispatched.remove(node_info)
if node_info in self.lost:
self.lost.remove(node_info)
self.vote_strategy.calc_new_master(self.connected,
self.dispatched,
self.lost)
def __handle_dispatch_msg(self, node_info):
"""
Remove node from lost or connected and add to dispatch. Will also initiate calculation of new master
:param node_info: dispatching node
:return: None
"""
print('{} Dispatched from {}'.format(self.own_information.name, node_info.name))
if node_info in self.connected:
self.connected.remove(node_info)
if node_info in self.lost:
self.lost.remove(node_info)
self.dispatched.add(node_info)
if len(self.lost) > len(self.connected):
print('{} dispatching because more lost than connected'.format(
self.own_information.name))
if self.running:
self.dispatch()
else:
self.vote_strategy.calc_new_master(self.connected,
self.dispatched,
self.lost)
def __remove_node_from_dispatch_if_same_name(self, node_info):
"""
Check if entering node has same name as dispatched node and will
delete if both has same name.
:param node_info: node to check
:return: None
"""
for old_node in self.dispatched.copy():
if old_node.name == node_info.name and old_node in self.dispatched:
self.dispatched.remove(old_node)
def __handle_entering_node(self, new_value):
"""
Will add new node to connected, add handshake in message dict for new node and
initiate calculation of new master
:param new_value: update with node
:return: None
"""
node_info = new_value.value
if node_info != self.own_information:
self.message_dict.add_handshake_message(own=self.own_information, target=node_info)
self.__remove_node_from_dispatch_if_same_name(node_info)
self.connected.add(node_info)
self.vote_strategy.calc_new_master(self.connected,
self.dispatched,
self.lost)
| 2.390625
| 2
|
kata/odds.py
|
PauloBernal/Codewars
| 0
|
12782969
|
# Solution by PauloBA
def odds(values):
oddNums = []
for i in values:
if i % 2 == 1:
oddNums.append(i)
return oddNums
| 3.578125
| 4
|
app/routes/api.py
|
skielred/osmosis
| 2
|
12782970
|
from datetime import datetime
import logging
from flask import jsonify, request
from app import app, db
from app.models import Player, Chart, Score
from app.ranking import update_pb_for_score, update_player_osmos
from app.lazer import process_lazer_payload
from . import dumb_decryption
@app.route('/versions')
def versions():
return jsonify({
'osu': app.config.get('REQUIRED_OSU_VERSION', 0),
'pusher': app.config.get('REQUIRED_PUSHER_VERSION', 0),
})
@app.route('/lazer', methods=['POST'])
def lazer_score():
return score(process_lazer_payload(request.json), decrypt=False)
@app.route('/score', methods=['POST'])
def score(data=None, decrypt=True):
data = data or request.data
if not data:
logging.warning('Empty request')
return 'No data', 400
print('got a score!!')
print(data)
data = dumb_decryption(data) if decrypt else data
try:
player = Player.query.get(data['player']['id'])
if not player:
player = Player(data['player'])
else:
player.update_fields(data['player'])
chart = Chart.query.get(data['chart']['chart_id'])
if not chart:
chart = Chart(data['chart'])
else:
chart.update_fields(data['chart'])
data['score']['hash'] = data['chart'].get('hash')
score = Score(data['score'], chart)
score.achieved_at = datetime.utcnow()
score.player = player
score.version = 6
if not score.is_supported():
db.session.rollback()
print('score ignored because not supported')
return 'Not OK'
db.session.add_all([player, chart, score])
db.session.commit()
print('pushed to db! ({} played by {})'.format(
chart.name, player.username
))
score.set_osmos()
print('osmos set')
print('updating pb if needed')
if update_pb_for_score(player, score):
print('updated pb returned true')
update_player_osmos(player)
player.playcount += 1
db.session.commit()
except Exception as e:
db.session.rollback()
logging.warning(f'Malformed score payload: \n{data}')
raise logging.warning(e, exc_info=True)
return 'OK'
| 2.359375
| 2
|
mdso/data/gen_toeplitz_Rmat.py
|
artk2019/AISTAT_2019_107
| 0
|
12782971
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generate various Similarity matrix
through the MatrixGenerator methods
gen_matrix for synthetic data, and
gen_E_coli_matrix for DNA data.
"""
import numpy as np
# from scipy import sparse as sp
from scipy.linalg import toeplitz
def gen_lambdas(type_matrix, n):
'''
Generates lambdas to define a toeplitz matrix with
diagonal elements t_k = lambdas[k]
'''
array_lambdas = np.zeros(n)
if type_matrix == 'LinearBanded':
# Bandwidth = 10% ?
cov = int(np.floor(n/10))
array_lambdas[:cov] = cov - abs(np.arange(cov))
elif type_matrix == 'LinearStrongDecrease':
alpha = 0.1
array_lambdas = np.exp(-alpha*np.arange(n))
elif type_matrix == 'CircularBanded':
# Bandwidth = 10% ?
cov = int(np.floor(n/10))
array_lambdas[:cov] = cov - abs(np.arange(cov))
array_lambdas[-cov:] = array_lambdas[:cov][::-1]
elif type_matrix == 'CircularStrongDecrease':
alpha = 0.1
array_lambdas = np.exp(-alpha*np.arange(n))
p = int(np.floor(n/2))
array_lambdas[-p:] = array_lambdas[:p][::-1]
else:
raise ValueError("Unrecognized type_matrix !")
return(array_lambdas)
def gen_toeplitz_sim(lambdas):
'''Build Toeplitz strong-R-matrix'''
return(toeplitz(lambdas))
#
#
# def sym_max(X):
# """
# Returns symmetrization of sparse matrix X.
# X_sym = max(X, X.T) rather than X + X.T to avoid adding up values when
# there are duplicates in the overlap file.
# If X is triangular, max(X, X.T) and X + X.T are equal.
#
# TODO : check how many values are not symmetric
# and separate cases where Aij = 0 ...
# """
#
# dif_mat = X - X.T
# dif_mat.data = np.where(dif_mat.data < 0, 1, 0)
# return X - X.multiply(dif_mat) + X.T.multiply(dif_mat)
class MatrixGenerator():
# Apply permutation
def apply_perm(self, perm):
'''
Apply a permutation to the similarity matrix.
perm is given as a numpy array
'''
n_ = self.n
# check size is ok
if np.shape(perm)[0] != n_:
raise ValueError('the size of the permutation matrix does not match that of the\
similarity matrix.')
# check perm is a permutation
if not (np.sort(perm) == np.arange(n_)).all():
raise ValueError('perm is not considered as a'
'permutation matrix of [0; \cdots; n-1]')
self.sim_matrix = self.sim_matrix[perm]
self.sim_matrix = self.sim_matrix.T[perm]
self.sim_matrix = self.sim_matrix.T
return self
# Add additive noise
def add_sparse_noise(self, noise_prop, noise_eps,
law='uniform'):
'''
Create a function that add a symetric sparse noise!
noiseprop controls the support of the sparse noise
noiseeps controls the eps amplitude of the noise
'''
n_ = self.n
# first find a random support
N = np.tril(np.random.rand(n_, n_))
idx = np.where(N > noise_prop)
N[idx] = 0
# allocate value on the support
[ii, jj] = np.where(N != 0)
if law == 'gaussian':
N[np.where(N != 0)] = noise_eps * np.abs(
np.random.normal(0, 1, len(ii)))
elif law == 'uniform':
N[np.where(N != 0)] = noise_eps*np.random.rand(1, len(ii))
# symetrize the noise
N = N + N.T
# Add noise to similarity matrix
self.sim_matrix += N
return self
def gen_matrix(self, n, type_matrix='LinearBanded',
apply_perm=True, perm=None,
noise_prop=1, noise_ampl=0, law='uniform'):
self.n = n
lambdas = gen_lambdas(type_matrix, n)
self.sim_matrix = gen_toeplitz_sim(lambdas)
if apply_perm:
if not perm: # generate permutation if not provided by user
perm = np.random.permutation(n)
self.apply_perm(perm)
self.true_perm = perm
else:
self.true_perm = np.arange(n)
if noise_ampl > 0:
normed_fro = np.sqrt(np.mean(self.sim_matrix**2))
self.add_sparse_noise(noise_prop, noise_ampl*normed_fro, law=law)
return self
#
# def gen_E_coli_matrix(self, apply_perm=False):
# """
# generate similarity matrix from <NAME>i ONT reads [ref Loman et al.]
# TODO :
# - change the path to data folder if this is a package ?
# - recompute reads_pos with minimap2 instead of BWA.
# """
# # Read data matrix
# data_dir = './data/'
# mat_fn = data_dir + 'ecoli_mat.csv'
# pos_fn = data_dir + 'ecoli_ref_pos.csv'
# mat_idxs = np.genfromtxt(mat_fn, delimiter=',')
# reads_pos = np.genfromtxt(pos_fn, delimiter=',')
# n_reads = reads_pos.shape[0]
# sim_mat = sp.coo_matrix((mat_idxs[:, 2],
# (mat_idxs[:, 0]-1, mat_idxs[:, 1]-1)),
# shape=(n_reads, n_reads),
# dtype='float64').tocsr()
# sim_mat = sym_max(sim_mat)
# # Remove unaligned reads (unknown ground turh position)
# in_idx = np.argwhere(reads_pos < 7e6)[:, 0]
# sim_lil = sim_mat.tolil()
# self.n = len(in_idx)
# if apply_perm:
# perm = np.random.permutation(self.n)
# self.true_perm = perm
# in_idx = in_idx[perm]
# else:
# self.true_perm = np.arange(self.n)
# sim_lil = sim_lil[in_idx, :][:, in_idx]
# self.sim_matrix = sim_lil.tocsr()
#
# return self
| 2.875
| 3
|
core/templatetags/team.py
|
6ba/bbgo
| 22
|
12782972
|
<filename>core/templatetags/team.py
from django import template
# from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from teams.models import Team
register = template.Library()
@register.inclusion_tag('teams/show_team.html', takes_context=True)
def show_team(context, id):
"""Show team"""
user = context['request'].user
article = get_object_or_404(Team, pk=id)
slot_users = article.slot_users.all()
return {
'user': user,
'table': article.table,
'article_id': article.id,
'article_user': article.user,
'slot_in': article.slot,
'empty_slots': article.slot_total - article.slot,
'slot_users': slot_users,
}
| 1.953125
| 2
|
mapeo/migrations/0002_auto_20151117_1648.py
|
shiminasai/plataforma_FADCANIC
| 0
|
12782973
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mapeo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='organizaciones',
name='slug',
field=models.SlugField(default=1, max_length=450, editable=False),
preserve_default=False,
),
migrations.AlterField(
model_name='organizaciones',
name='tipo',
field=models.IntegerField(choices=[(1, b'Organizaci\xc3\xb3n que apoya y participa con la Campa\xc3\xb1a'), (2, b'Comit\xc3\xa9 comunal'), (3, b'Diplomado de promotor\xc3\xada'), (4, b'Diplomado de comunicaci\xc3\xb3n')]),
),
]
| 1.742188
| 2
|
00_TrackerApp/backend/repositories/user.py
|
JADSN/ReactCodes
| 0
|
12782974
|
<filename>00_TrackerApp/backend/repositories/user.py
# from datetime import datetime, timezone
# from schemas import UserItem
# from models import UserItem
# from dependencies import Session, Depends, HTTPException, status, get_db, oauth2_scheme
# from tokenizer import TokenData, SECRET_KEY, ALGORITHM
# from hasher import hash_password
# from jose import jwt, JWTError
# def read_first_user(email: str, db: Session = Depends(get_db)):
# item = db.query(UserItem).where(UserItem.email == email)
# if not item.first():
# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
# detail=f"Item with id {id} not found")
# db.commit()
# user = item.first()
# return UserItem(email=user.email, password=<PASSWORD>)
# def create(req_body: UserItem, db: Session = Depends(get_db)):
# hashed_password = <PASSWORD>_password(req_body.password)
# new_data = UserItem(
# email=req_body.email, password=<PASSWORD>)
# db.add(new_data)
# db.commit()
# db.refresh(new_data)
# return new_data
| 2.28125
| 2
|
dit_helpdesk/hierarchy/migrations/0011_auto_20200910_1653.py
|
uktrade/dit-helpdesk
| 3
|
12782975
|
# Generated by Django 2.2.13 on 2020-09-10 15:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("hierarchy", "0010_auto_20200910_1650")]
operations = [
migrations.AlterUniqueTogether(
name="subheading",
unique_together={("commodity_code", "description", "nomenclature_tree")},
)
]
| 1.476563
| 1
|
src/ghutil/cli/gist/clone.py
|
jwodder/ghutil
| 6
|
12782976
|
<reponame>jwodder/ghutil
import click
from ghutil.git import clone_repo
from ghutil.types import Gist
@click.command()
@click.option('--https', 'url', flag_value='https', help='Clone over HTTPS')
@click.option('--ssh', 'url', flag_value='ssh', default=True,
help='Clone over SSH [default]')
@Gist.argument('gist', implicit=False)
@click.argument('dir', required=False)
def cli(gist, dir, url): # noqa: B002
""" Clone a gist """
if url == 'https':
clone_url = gist.data["git_pull_url"]
elif url == 'ssh':
# Why isn't this URL returned by the API?
clone_url = f'git@gist.github.com:{gist.id}.git'
clone_repo(clone_url, dir)
| 2.78125
| 3
|
data_structures/binary_search_tree.py
|
dushyantss/william-fiset-data-structures-playlist
| 0
|
12782977
|
<reponame>dushyantss/william-fiset-data-structures-playlist
from typing import Optional
class Node:
def __init__(self, value, /, *, left: Node = None, right: Node = None):
self.value = value
self.left = left
self.right = right
class BinarySearchTree:
def __init__(self):
self.root: Optional[Node] = None
self.node_count = 0
def __len__(self):
return self.node_count
def add(self, value) -> bool:
if self.contains(value):
return False
self.root = self.__do_add(self.root, value)
self.node_count += 1
return True
def remove(self, value) -> bool:
if self.contains(value):
self.root = self.__do_remove(self.root, value)
self.node_count -= 1
return True
return False
def contains(self, value) -> bool:
return self.__do_contains(self.root, value)
def __do_add(self, node: Optional[Node], value) -> Node:
if not node:
node = Node(value)
elif value < node.value:
node.left = self.__do_add(node.left, value)
else:
node.right = self.__do_add(node.right, value)
return node
def __do_remove(self, node: Optional[Node], value) -> Optional[Node]:
if node is None:
return None
if value < node.value:
node.left = self.__do_remove(node.left, value)
elif value > node.value:
node.right = self.__do_remove(node.right, value)
elif node.left is None:
node = node.right
elif node.right is None:
node = node.left
else:
successor = self.__dig_left(node.right)
node.value = successor.value
node.right = self.__do_remove(node.right, successor.value)
return node
def __dig_left(self, node: Node) -> Node:
while node.left is not None:
node = node.left
return node
def __do_contains(self, node: Optional[Node], value) -> bool:
if node is None:
return False
if value < node.value:
return self.__do_contains(node.left, value)
elif value > node.value:
return self.__do_contains(node.right, value)
else:
return True
| 3.71875
| 4
|
python programs/variable.py
|
saddam-gif/Python-crushcourse
| 0
|
12782978
|
print("Hello World")
message = "Be humble"
print(message)
message = "Be humble to the Everyone"
print(message)
| 2.6875
| 3
|
modules/objectdetection/object_detection.py
|
tcastel-ippon/object-detection-serverless
| 5
|
12782979
|
<filename>modules/objectdetection/object_detection.py<gh_stars>1-10
import cv2
import numpy as np
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1]
for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(
img,
class_id,
classes,
confidence,
x,
y,
x_plus_w,
y_plus_h,
colors):
label = str(classes[class_id]) + str(format(confidence * 100, '.2f')) + '%'
color = colors[class_id]
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
cv2.putText(img, label, (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
def draw_bounding_boxes(
image,
boxes,
indices,
class_ids,
classes,
confidences,
colors):
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
draw_prediction(
image,
class_ids[i],
classes,
confidences[i],
round(x),
round(y),
round(
x + w),
round(
y + h),
colors)
def keep_relevant_predictions(outs, width, height):
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
return boxes, confidences, class_ids
def object_detection(image, classes, model_weight_path, model_conf_path):
net = cv2.dnn.readNetFromDarknet(model_conf_path, model_weight_path)
# prepare model
scale = 0.00392
blob = cv2.dnn.blobFromImage(
image, scale, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
# Apply model
outs = net.forward(get_output_layers(net))
# Keep relevant predictions
boxes, confidences, class_ids = keep_relevant_predictions(
outs=outs, width=image.shape[1], height=image.shape[0])
# Apply NMS
conf_threshold = 0.5
nms_threshold = 0.4
indices = cv2.dnn.NMSBoxes(
boxes,
confidences,
conf_threshold,
nms_threshold)
# Draw Bounding Box
colors = np.random.uniform(0, 255, size=(len(classes), 3))
draw_bounding_boxes(
image,
boxes,
indices,
class_ids,
classes,
confidences,
colors)
| 2.640625
| 3
|
dvc/fs/_callback.py
|
dtrifiro/dvc
| 0
|
12782980
|
from contextlib import ExitStack
from functools import wraps
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, overload
import fsspec
from funcy import cached_property
if TYPE_CHECKING:
from typing import BinaryIO, Callable, TextIO, Union
from typing_extensions import ParamSpec
from dvc.progress import Tqdm
from dvc.ui._rich_progress import RichTransferProgress
_P = ParamSpec("_P")
_R = TypeVar("_R")
class FsspecCallback(fsspec.Callback):
"""FsspecCallback usable as a context manager, and a few helper methods."""
@overload
def wrap_attr(self, fobj: "BinaryIO", method: str = "read") -> "BinaryIO":
...
@overload
def wrap_attr(self, fobj: "TextIO", method: str = "read") -> "TextIO":
...
def wrap_attr(
self, fobj: "Union[TextIO, BinaryIO]", method: str = "read"
) -> "Union[TextIO, BinaryIO]":
from tqdm.utils import CallbackIOWrapper
wrapped = CallbackIOWrapper(self.relative_update, fobj, method)
return wrapped
def wrap_fn(self, fn: "Callable[_P, _R]") -> "Callable[_P, _R]":
@wraps(fn)
def wrapped(*args: "_P.args", **kwargs: "_P.kwargs") -> "_R":
res = fn(*args, **kwargs)
self.relative_update()
return res
return wrapped
def wrap_and_branch(self, fn: "Callable") -> "Callable":
"""
Wraps a function, and pass a new child callback to it.
When the function completes, we increment the parent callback by 1.
"""
wrapped = self.wrap_fn(fn)
@wraps(fn)
def func(path1: str, path2: str):
kw: Dict[str, Any] = {}
with self.branch(path1, path2, kw):
return wrapped(path1, path2, **kw)
return func
def __enter__(self):
return self
def __exit__(self, *exc_args):
self.close()
def close(self):
"""Handle here on exit."""
def relative_update(self, inc: int = 1) -> None:
inc = inc if inc is not None else 0
return super().relative_update(inc)
def absolute_update(self, value: int) -> None:
value = value if value is not None else self.value
return super().absolute_update(value)
@classmethod
def as_callback(
cls, maybe_callback: Optional["FsspecCallback"] = None
) -> "FsspecCallback":
if maybe_callback is None:
return DEFAULT_CALLBACK
return maybe_callback
@classmethod
def as_tqdm_callback(
cls,
callback: Optional["FsspecCallback"] = None,
**tqdm_kwargs: Any,
) -> "FsspecCallback":
return callback or TqdmCallback(**tqdm_kwargs)
@classmethod
def as_rich_callback(
cls, callback: Optional["FsspecCallback"] = None, **rich_kwargs
):
return callback or RichCallback(**rich_kwargs)
def branch(
self,
path_1: str,
path_2: str,
kwargs: Dict[str, Any],
child: "FsspecCallback" = None,
) -> "FsspecCallback":
child = kwargs["callback"] = child or DEFAULT_CALLBACK
return child
class NoOpCallback(FsspecCallback, fsspec.callbacks.NoOpCallback):
pass
class TqdmCallback(FsspecCallback):
def __init__(
self,
size: Optional[int] = None,
value: int = 0,
progress_bar: "Tqdm" = None,
**tqdm_kwargs,
):
tqdm_kwargs["total"] = size or -1
self._tqdm_kwargs = tqdm_kwargs
self._progress_bar = progress_bar
self._stack = ExitStack()
super().__init__(size=size, value=value)
@cached_property
def progress_bar(self):
from dvc.progress import Tqdm
progress_bar = (
self._progress_bar
if self._progress_bar is not None
else Tqdm(**self._tqdm_kwargs)
)
return self._stack.enter_context(progress_bar)
def __enter__(self):
return self
def close(self):
self._stack.close()
def set_size(self, size):
# Tqdm tries to be smart when to refresh,
# so we try to force it to re-render.
super().set_size(size)
self.progress_bar.refresh()
def call(self, hook_name=None, **kwargs):
self.progress_bar.update_to(self.value, total=self.size)
def branch(
self,
path_1: str,
path_2: str,
kwargs,
child: Optional[FsspecCallback] = None,
):
child = child or TqdmCallback(bytes=True, desc=path_1)
return super().branch(path_1, path_2, kwargs, child=child)
class RichCallback(FsspecCallback):
def __init__(
self,
size: Optional[int] = None,
value: int = 0,
progress: "RichTransferProgress" = None,
desc: str = None,
bytes: bool = False, # pylint: disable=redefined-builtin
unit: str = None,
disable: bool = False,
) -> None:
self._progress = progress
self.disable = disable
self._task_kwargs = {
"description": desc or "",
"bytes": bytes,
"unit": unit,
"total": size or 0,
"visible": False,
"progress_type": None if bytes else "summary",
}
self._stack = ExitStack()
super().__init__(size=size, value=value)
@cached_property
def progress(self):
from dvc.ui import ui
from dvc.ui._rich_progress import RichTransferProgress
if self._progress is not None:
return self._progress
progress = RichTransferProgress(
transient=True,
disable=self.disable,
console=ui.error_console,
)
return self._stack.enter_context(progress)
@cached_property
def task(self):
return self.progress.add_task(**self._task_kwargs)
def __enter__(self):
return self
def close(self):
self.progress.clear_task(self.task)
self._stack.close()
def call(self, hook_name=None, **kwargs):
self.progress.update(
self.task,
completed=self.value,
total=self.size,
visible=not self.disable,
)
def branch(
self, path_1, path_2, kwargs, child: Optional[FsspecCallback] = None
):
child = child or RichCallback(
progress=self.progress, desc=path_1, bytes=True
)
return super().branch(path_1, path_2, kwargs, child=child)
DEFAULT_CALLBACK = NoOpCallback()
| 2.125
| 2
|
azure-de10nano-document/sensor-aggregation-reference-design-for-azure/sw/software-code/modules/RfsModule/test/suite_test.py
|
daisukeiot/terasic-de10-nano-kit
| 0
|
12782981
|
<reponame>daisukeiot/terasic-de10-nano-kit
# Copyright (C) 2021 Intel Corporation
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import sys
sys.path.append('../')
import unittest
import gsensor_test as g_test
import rfssensor_test as rfs_test
import sensor_test as s_test
import threshold_test as th_test
import thresholdcontroller_test as thc_test
g_suite = unittest.TestLoader().loadTestsFromTestCase(g_test.TestGsensor)
rfs_suite = unittest.TestLoader().loadTestsFromTestCase(rfs_test.TestRfsSensor)
s_suite = unittest.TestLoader().loadTestsFromTestCase(s_test.TestSensor)
th_suite = unittest.TestLoader().loadTestsFromTestCase(th_test.TestThreshold)
thc_suite = unittest.TestLoader().loadTestsFromTestCase(thc_test.TestThresholdController)
suite = unittest.TestSuite([g_suite,rfs_suite,s_suite,th_suite,thc_suite])
runner = unittest.TextTestRunner()
runner.run(suite)
| 1.960938
| 2
|
python/ossid/utils/bop_utils.py
|
r-pad/OSSID_code
| 1
|
12782982
|
<gh_stars>1-10
from datetime import time
import os
import csv
import numpy as np
import pandas as pd
import subprocess
from ossid.config import BOP_TOOLKIT_PATH
def saveResultsBop(
results, output_folder, result_name, dataset_name,
split_name='test', pose_key = 'pose', score_key='score', time_key = 'time',
input_unit = 'm', # If input is meter, it will be convert to milimeter
run_eval_script=False, # if True, run the BOP evaluation script
):
'''
Convert a list of dict containing the pose estimation results to a csv for BOP evaluation script.
'''
result_name = result_name.replace("_", "-")
output_filename = "%s_%s-%s.csv" % (result_name, dataset_name, split_name)
output_path = os.path.join(
output_folder,
output_filename
)
csv_file = open(output_path, mode="w")
fieldnames = ["scene_id","im_id","obj_id","score","R","t","time"]
csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
csv_writer.writeheader()
for r in results:
mat = r[pose_key].copy()
mat[:3, 3] = mat[:3, 3] * 1000.0
score = r[score_key] if score_key in r else 1
time = r[time_key] if time_key in r else -1
csv_writer.writerow({
"scene_id": r['scene_id'],
"im_id": r['im_id'],
"obj_id": r['obj_id'],
"score": score,
"R": " ".join([str(_) for _ in mat[:3, :3].flatten()]),
"t": " ".join([str(_) for _ in mat[:3, 3].flatten()]),
"time": time,
})
print("BOP logs saved to:", output_path)
csv_file.close()
if run_eval_script:
print("Executing the BOP evaluation script in the background")
os.system("cd %s; PYTHONPATH='/' python scripts/eval_bop19.py --renderer_type=cpp --result_filenames=%s" % (BOP_TOOLKIT_PATH, output_filename))
def readResultsBop(path):
df = pd.read_csv(path)
results = []
for i, r in df.iterrows():
pose = np.eye(4)
R = np.asarray([float(_) for _ in r['R'].split(" ")]).reshape((3, 3))
t = np.asarray([float(_) for _ in r['t'].split(" ")])
pose[:3, :3] = R
pose[:3, 3] = t
results.append({
"obj_id": int(r['obj_id']),
"scene_id": int(r['scene_id']),
"im_id": int(r['im_id']),
"score": float(r['score']),
"time": float(r['time']),
"pose": pose
})
return results
| 2.4375
| 2
|
gui.py
|
nehal2000/spell-checker
| 0
|
12782983
|
<filename>gui.py
from tkinter import*
import subprocess
from subprocess import*
def end():
screen.destroy()
def clear():
inputT.delete("1.0",END)
outputT.delete("1.0",END)
def check():
a= inputT.get("1.0","end")
m=len(a)
a=a[:m-1]
if " " in a or not a.isalpha():
def exit_popup():
er.destroy()
inputT.delete("1.0",END)
er= Toplevel(screen)
x = screen.winfo_x()+150
y = screen.winfo_y()+100
w = 450
h = 250
er.geometry("%dx%d+%d+%d" % (w, h, x, y))
er.title("FATAL ERROR")
label1=Label(er,text="INVALID INPUT",font=("Calibri", 22, "bold"), fg="red").place(x=130,y=25)
label2=Label(er,text="Enter a single word",font=("Calibri", 15, "bold")).place(x=140,y=70)
label3=Label(er,text="Input word must contain alphabets only",font =("Calibri", 15, "bold")).place(x=50,y=110)
but=Button(er,text="CONTINUE",bg="red",width="7",height="2",command=exit_popup).place(x=190,y=165)
er.mainloop()
else :
subprocess.call(["g++","edit.cpp","soundex.cpp","spell_check.cpp"])
value = a + '\n'
value = bytes(value, 'UTF-8') # Needed in Python 3.
proc = Popen("./a.out", stdin=PIPE, stdout=PIPE)
out, err = proc.communicate(input=value)
out = str(out)
out=out[2:len(out)-2]
out=out.split(',')
for i in out:
outputT.insert(END,i+'\t')
screen = Tk()
screen.geometry("750x500")
screen.configure(bg="black")
screen.title("SPELL CHECKER")
inputL=Label(screen, text="Enter the word", width="15", height="2", font=("Calibri",15, "bold"), bg="black", fg="white")
inputL.place(x=55, y=60)
outputL=Label(screen,text="Output",width="15",height="2",font=("Calibri",15, "bold"), bg="black", fg="white")
outputL.place(x=25, y=120)
inputT=Text(screen, width=45, height="2")
inputT.place(x=215, y=62)
outputT=Text(screen, width=80, height="12")
outputT.place(x=55,y=165)
checkb=Button(text="CHECK", width="7", height="2", bg="white", command=check)
checkb.place(x=600,y=58)
clearb=Button(text="CLEAR", width="7", bg="white", height="2", command=clear)
clearb.place(x=325,y=405)
exitb=Button(text="EXIT", width="5",bg="red", height="1", command=end)
exitb.place(x=650,y=430)
screen.mainloop()
| 3.390625
| 3
|
test/test_npu/test_ne.py
|
Ascend/pytorch
| 1
|
12782984
|
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import torch
import numpy as np
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNe(TestCase):
def cpu_op_exec_scalar(self, input1, other):
output = torch.ne(input1, other)
output = output.numpy()
return output
def npu_op_exec_scalar(self,input1, other):
output = torch.ne(input1, other)
output1 = output.to("cpu")
output2 = output1.numpy()
return output2
def cpu_op_exec(self, input1, other):
output = torch.ne(input1, other)
output = output.numpy()
return output
def npu_op_exec(self,input1, other):
output = torch.ne(input1, other)
output = output.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_(self,input1, other):
torch.ne_(input1,other)
output = input1.numpy()
return output
def npu_op_exec_(self,input1, other):
torch.ne_(input1, other)
output = input1.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_scalar_(self,input1, other):
torch.ne_(input1,other)
output = input1.numpy()
return output
def npu_op_exec_scalar_(self,input1, other):
torch.ne_(input1, other)
output = input1.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_scalar_out(self,input1,other, out):
torch.ne(input1,other, out=out)
output = out.numpy()
return output
def npu_op_exec_scalar_out(self,input1, other, out):
torch.ne(input1, other, out=out)
output = out.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_out(self,input1,other, out):
torch.ne(input1,other, out=out)
output = out.numpy()
return output
def npu_op_exec_out(self,input1, other, out):
torch.ne(input1, other, out=out)
output = out.to("cpu")
output = output.numpy()
return output
def test_ne_scalar_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2,4, 3)], 3],
[[np.float32, 3, (2, 3)], 2],
[[np.float32, 0, (3, 2)], 8],
[[np.int8, 0 , (4, 3)],3],
[[np.uint8, -1, (2,4, 3)],3],
[[np.int32, 0, (2, 6)],6]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_output = self.cpu_op_exec_scalar(cpu_input1, item[1])
npu_output = self.npu_op_exec_scalar(npu_input1, item[1])
self.assertRtolEqual(cpu_output, npu_output)
def test_ne_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2, 4, 3)], [np.float32,0 , (2, 4, 3)]],
[[np.float32, 3, (2, 3)], [np.float32, 3, (2, 3)]],
[[np.float32, 0, (3, 2)], [np.float32, 0, (3, 2)]],
[[np.int8, 0 , (4, 3)], [np.int8, 0 , (4, 3)]],
[[np.uint8, -1, (2,4, 3)], [np.uint8, -1, (2,4, 3)]],
[[np.int32, 0, (2, 6)], [np.int32, 0, (2, 6)]],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_input2, npu_input2 = create_common_tensor(item[1], 1, 10)
cpu_output = self.cpu_op_exec(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
def test_ne_scalar_out_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2, 4, 3)], 2, [np.bool, 0 , (2, 4, 3)]],
[[np.float32, 3, (2, 3)], 3, [np.bool, -1, (2, 3)]],
[[np.float32, 0, (3, 2)], 4, [np.bool, 0, (3, 2)]],
[[np.int8, 0 , (4, 3)], 5, [np.bool, 0 , (4, 3)]],
[[np.uint8, -1, (2,4, 3)], 6, [np.bool, -1, (2,4, 3)]],
[[np.int32, 0, (2, 6)], 7, [np.bool, 0, (2, 6)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_out, npu_out = create_common_tensor(item[2], 1, 10)
cpu_output = self.cpu_op_exec_scalar_out(cpu_input1, item[1], cpu_out)
npu_output = self.npu_op_exec_scalar_out(npu_input1, item[1], npu_out)
self.assertRtolEqual(cpu_output, npu_output)
def test_ne_out_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2, 4, 3)], [np.float32,0 , (2, 4, 3)], [np.bool, 0 , (2, 4, 3)]],
[[np.float32, 3, (2, 3)], [np.float32, 3, (2, 3)], [np.bool, -1, (2, 3)]],
[[np.float32, 0, (3, 2)], [np.float32, 0, (3, 2)], [np.bool, 0, (3, 2)]],
[[np.int8, 0 , (4, 3)], [np.int8, 0 , (4, 3)], [np.bool, 0 , (4, 3)]],
[[np.uint8, -1, (2,4, 3)], [np.uint8, -1, (2,4, 3)], [np.bool, -1, (2,4, 3)]],
[[np.int32, 0, (2, 6)], [np.int32, 0, (2, 6)], [np.bool, 0, (2, 6)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_input2, npu_input2 = create_common_tensor(item[1], 1, 10)
cpu_out, npu_out = create_common_tensor(item[2], 1, 10)
cpu_output = self.cpu_op_exec_out(cpu_input1, cpu_input2, cpu_out)
npu_output = self.npu_op_exec_out(npu_input1, npu_input2, npu_out)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestNe, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 2.109375
| 2
|
web.py
|
mukul-git/dbms
| 1
|
12782985
|
<reponame>mukul-git/dbms
from flask import Flask, escape, request
app = Flask(__name__)
@app.route('/')
def hello():
name = request.args.get("name", "World")
return f'Hello, {escape(name)}!'
@app.route('/stats')
def stats():
name = request.args.get("table", None)
if name is not None:
return f'Table {escape(table)} stats'
return 'All tables...'
| 2.265625
| 2
|
Lib/site-packages/win32/Demos/win32console_demo.py
|
raychorn/svn_Python-2.5.1
| 0
|
12782986
|
<gh_stars>0
import win32console, win32con
import traceback, time
virtual_keys={}
for k,v in win32con.__dict__.items():
if k.startswith('VK_'):
virtual_keys[v]=k
free_console=True
try:
win32console.AllocConsole()
except win32console.error, err_tuple:
if err_tuple[0]!=5:
raise
## only free console if one was created successfully
free_console=False
stdout=win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
stdin=win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
newbuffer=win32console.CreateConsoleScreenBuffer()
newbuffer.SetConsoleActiveScreenBuffer()
newbuffer.SetConsoleTextAttribute(win32console.FOREGROUND_RED|win32console.FOREGROUND_INTENSITY
|win32console.BACKGROUND_GREEN|win32console.BACKGROUND_INTENSITY)
newbuffer.WriteConsole('This is a new screen buffer\n')
## test setting screen buffer and window size
## screen buffer size cannot be smaller than window size
window_size=newbuffer.GetConsoleScreenBufferInfo()['Window']
coord=win32console.PyCOORDType(X=window_size.Right+20, Y=window_size.Bottom+20)
newbuffer.SetConsoleScreenBufferSize(coord)
window_size.Right+=10
window_size.Bottom+=10
newbuffer.SetConsoleWindowInfo(Absolute=True,ConsoleWindow=window_size)
## write some records to the input queue
x=win32console.PyINPUT_RECORDType(win32console.KEY_EVENT)
x.Char=u'X'
x.KeyDown=True
x.RepeatCount=1
x.VirtualKeyCode=0x58
x.ControlKeyState=win32con.SHIFT_PRESSED
z=win32console.PyINPUT_RECORDType(win32console.KEY_EVENT)
z.Char=u'Z'
z.KeyDown=True
z.RepeatCount=1
z.VirtualKeyCode=0x5a
z.ControlKeyState=win32con.SHIFT_PRESSED
stdin.WriteConsoleInput([x,z,x])
newbuffer.SetConsoleTextAttribute(win32console.FOREGROUND_RED|win32console.FOREGROUND_INTENSITY
|win32console.BACKGROUND_GREEN|win32console.BACKGROUND_INTENSITY)
newbuffer.WriteConsole('Press some keys, click some characters with the mouse\n')
newbuffer.SetConsoleTextAttribute(win32console.FOREGROUND_BLUE|win32console.FOREGROUND_INTENSITY
|win32console.BACKGROUND_RED|win32console.BACKGROUND_INTENSITY)
newbuffer.WriteConsole('Hit "End" key to quit\n')
breakout=False
while not breakout:
input_records=stdin.ReadConsoleInput(10)
for input_record in input_records:
if input_record.EventType==win32console.KEY_EVENT:
if input_record.KeyDown:
if input_record.Char=='\0':
newbuffer.WriteConsole(virtual_keys.get(input_record.VirtualKeyCode, 'VirtualKeyCode: %s' %input_record.VirtualKeyCode))
else:
newbuffer.WriteConsole(input_record.Char)
if input_record.VirtualKeyCode==win32con.VK_END:
breakout=True
break
elif input_record.EventType==win32console.MOUSE_EVENT:
if input_record.EventFlags==0: ## 0 indicates a button event
if input_record.ButtonState!=0: ## exclude button releases
pos=input_record.MousePosition
# switch the foreground and background colors of the character that was clicked
attr=newbuffer.ReadConsoleOutputAttribute(Length=1, ReadCoord=pos)[0]
new_attr=attr
if attr&win32console.FOREGROUND_BLUE:
new_attr=(new_attr&~win32console.FOREGROUND_BLUE)|win32console.BACKGROUND_BLUE
if attr&win32console.FOREGROUND_RED:
new_attr=(new_attr&~win32console.FOREGROUND_RED)|win32console.BACKGROUND_RED
if attr&win32console.FOREGROUND_GREEN:
new_attr=(new_attr&~win32console.FOREGROUND_GREEN)|win32console.BACKGROUND_GREEN
if attr&win32console.BACKGROUND_BLUE:
new_attr=(new_attr&~win32console.BACKGROUND_BLUE)|win32console.FOREGROUND_BLUE
if attr&win32console.BACKGROUND_RED:
new_attr=(new_attr&~win32console.BACKGROUND_RED)|win32console.FOREGROUND_RED
if attr&win32console.BACKGROUND_GREEN:
new_attr=(new_attr&~win32console.BACKGROUND_GREEN)|win32console.FOREGROUND_GREEN
newbuffer.WriteConsoleOutputAttribute((new_attr,),pos)
else:
newbuffer.WriteConsole(str(input_record))
time.sleep(0.1)
stdout.SetConsoleActiveScreenBuffer()
newbuffer.Close()
if free_console:
win32console.FreeConsole()
| 2.328125
| 2
|
filters/puzzle.py
|
s3h10r/egw-plugins
| 1
|
12782987
|
<filename>filters/puzzle.py
#!/usr/bin/env python
#coding=utf-8
"""
usage example:
egw --generator psychedelic -o /tmp/2.png --filter puzzle -c ./einguteswerkzeug/einguteswerkzeug.conf --template ./einguteswerkzeug/templates/square/roland-deason-tPWHTBzQVIM-unsplash.jpg -s 200 --alpha-blend 0.8 --border-size 1 --nopolaroid --noframe --title="hallo" && feh /tmp/2.png
egw --generator psychedelic -o /tmp/2.png --filter puzzle -c ./einguteswerkzeug/einguteswerkzeug.conf --template ./einguteswerkzeug/templates/square/roland-deason-tPWHTBzQVIM-unsplash.jpg -s 200 --alpha-blend 0.8 --border-size 0.8 --nopolaroid --noframe --title="hallo" && feh /tmp/2.png
"""
import logging
import math
import random
import string
import sys
from PIL import Image, ImageDraw
from PIL import Image
from einguteswerkzeug.plugins import EGWPluginFilter
# --- configure logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
handler = logging.StreamHandler() # console-handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
# ---
fmeta = {
"name" : "puzzle",
"version" : "0.0.8",
"description" : "",
"author" : ""
}
class Puzzle(EGWPluginFilter):
def __init__(self, **kwargs):
super().__init__(**fmeta)
# defining mandatory kwargs (addionals to the mandatory of the base-class)
add_kwargs = { 'seed' : random.randrange(sys.maxsize), 'block_size' : 100}
self._define_mandatory_kwargs(self, **add_kwargs)
self.kwargs = kwargs
def _generate_image(self):
return _puzzle(**self.kwargs)
filter = Puzzle()
assert isinstance(filter,EGWPluginFilter)
def _puzzle(image = None, seed = None, block_size = 100):
"""
10 <= block_size >= 2000
"""
min_block_size = 10
max_block_size = 2000
if not seed:
seed = random.randrange(sys.maxsize)
random.seed(seed)
log.info("seed: {}".format(seed))
im = image
width, height = im.size
if width != height:
raise Exception("sorry, only square-sized image can be processed.")
# --- adjusting block_size if necessary
if width % block_size != 0:
log.warning("{} % {} = {}. => adjusting blocksize.".format(width, block_size, width % block_size))
block_size_orig = block_size
while (width % block_size != 0) and block_size <= max_block_size:
block_size += 1
if (width % block_size != 0):
block_size = block_size_org
while (width % block_size != 0) and (block_size >= min_block_size):
block_size -= 1
assert(width % block_size == 0)
log.info("block_size adjusted to: {}".format(block_size))
# ---
im2 = Image.new("RGB", (width, height), "black")
blocks = []
for x in range(int(width / block_size)):
for y in range(int(height / block_size)):
blocks.append(im.crop((x * block_size, y * block_size, (x + 1) * block_size, (y + 1) * block_size)))
random.shuffle(blocks)
for x in range(int(width / block_size)):
for y in range(int(height / block_size)):
im2.paste(blocks.pop().rotate(90 * random.randint(0,3)), (x * block_size, y * block_size))
return im2
| 2.09375
| 2
|
solutions/python3/1202.py
|
sm2774us/amazon_interview_prep_2021
| 42
|
12782988
|
<filename>solutions/python3/1202.py
class Solution:
def smallestStringWithSwaps(self, s: str, pairs: List[List[int]]) -> str:
class UF:
def __init__(self, n): self.p = list(range(n))
def union(self, x, y): self.p[self.find(x)] = self.find(y)
def find(self, x):
if x != self.p[x]: self.p[x] = self.find(self.p[x])
return self.p[x]
uf, res, m = UF(len(s)), [], collections.defaultdict(list)
for x,y in pairs:
uf.union(x,y)
for i in range(len(s)):
m[uf.find(i)].append(s[i])
for comp_id in m.keys():
m[comp_id].sort(reverse=True)
for i in range(len(s)):
res.append(m[uf.find(i)].pop())
return ''.join(res)
| 3.359375
| 3
|
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_python_scalar.py
|
wangyum/anaconda
| 0
|
12782989
|
<filename>pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_python_scalar.py<gh_stars>0
import sys
import unittest
from dynd import nd, ndt
from datetime import date
if sys.version_info >= (3, 0):
unicode = str
class TestPythonScalar(unittest.TestCase):
def test_bool(self):
# Boolean true/false
a = nd.array(True)
self.assertEqual(nd.type_of(a), ndt.bool)
self.assertEqual(type(nd.as_py(a)), bool)
self.assertEqual(nd.as_py(a), True)
a = nd.array(False)
self.assertEqual(nd.type_of(a), ndt.bool)
self.assertEqual(type(nd.as_py(a)), bool)
self.assertEqual(nd.as_py(a), False)
def test_int(self):
# Integer that fits in 32 bits
a = nd.array(10)
self.assertEqual(nd.type_of(a), ndt.int32)
self.assertEqual(type(nd.as_py(a)), int)
self.assertEqual(nd.as_py(a), 10)
a = nd.array(-2000000000)
self.assertEqual(nd.type_of(a), ndt.int32)
self.assertEqual(type(nd.as_py(a)), int)
self.assertEqual(nd.as_py(a), -2000000000)
# Integer that requires 64 bits
a = nd.array(2200000000)
self.assertEqual(nd.type_of(a), ndt.int64)
self.assertEqual(nd.as_py(a), 2200000000)
a = nd.array(-2200000000)
self.assertEqual(nd.type_of(a), ndt.int64)
self.assertEqual(nd.as_py(a), -2200000000)
def test_float(self):
# Floating point
a = nd.array(5.125)
self.assertEqual(nd.type_of(a), ndt.float64)
self.assertEqual(type(nd.as_py(a)), float)
self.assertEqual(nd.as_py(a), 5.125)
def test_complex(self):
# Complex floating point
a = nd.array(5.125 - 2.5j)
self.assertEqual(nd.type_of(a), ndt.complex_float64)
self.assertEqual(type(nd.as_py(a)), complex)
self.assertEqual(nd.as_py(a), 5.125 - 2.5j)
def test_string(self):
a = nd.array('abcdef')
self.assertEqual(nd.type_of(a), ndt.string)
self.assertEqual(type(nd.as_py(a)), unicode)
self.assertEqual(nd.as_py(a), u'abcdef')
a = nd.array(u'abcdef')
self.assertEqual(nd.type_of(a), ndt.string)
self.assertEqual(type(nd.as_py(a)), unicode)
self.assertEqual(nd.as_py(a), u'abcdef')
def test_utf_encodings(self):
# Ensure all of the UTF encodings work ok for a basic string
x = u'\uc548\ub155 hello'
# UTF-8
a = nd.array(x)
a = a.cast(ndt.make_fixed_string(16, 'utf_8'))
a = a.eval()
self.assertEqual(nd.type_of(a), ndt.make_fixed_string(16, 'utf_8'))
self.assertEqual(type(nd.as_py(a)), unicode)
self.assertEqual(nd.as_py(a), x)
# UTF-16
a = nd.array(x)
a = a.cast(ndt.make_fixed_string(8, 'utf_16'))
a = a.eval()
self.assertEqual(nd.type_of(a), ndt.make_fixed_string(8, 'utf_16'))
self.assertEqual(type(nd.as_py(a)), unicode)
self.assertEqual(nd.as_py(a), x)
# UTF-32
a = nd.array(x)
a = a.cast(ndt.make_fixed_string(8, 'utf_32'))
a = a.eval()
self.assertEqual(nd.type_of(a), ndt.make_fixed_string(8, 'utf_32'))
self.assertEqual(type(nd.as_py(a)), unicode)
self.assertEqual(nd.as_py(a), x)
def test_len(self):
# Can't get the length of a zero-dimensional dynd array
a = nd.array(10)
self.assertRaises(ValueError, len, a)
if __name__ == '__main__':
unittest.main()
| 2.609375
| 3
|
exalt/search.py
|
DeepDarkOdyssey/exalt
| 0
|
12782990
|
<reponame>DeepDarkOdyssey/exalt<gh_stars>0
from typing import Iterable, Union, List, TypeVar
from collections import OrderedDict
import re
Node = TypeVar("TrieNode")
class TrieNode(object):
def __init__(self, key: str = ""):
self.key = key
self.next = {}
self.is_word = False
@property
def is_leaf(self):
return len(self.next) == 0
def insert(self, word: str):
node = self
for char in word:
if char not in node.next:
node.next[char] = TrieNode(char)
node = node.next[char]
node.is_word = True
def insert_many(self, words: Iterable[str]):
for word in words:
self.insert(word)
def find(self, target: str):
node = self
for char in target:
if char not in node.next:
return False
node = node.next[char]
if node.is_word:
return "FullMatch"
else:
return "Partial"
def delete(self, target: str):
nodes = []
node = self
for char in target:
if char not in node.next:
return False
node = node.next[char]
nodes.append(node)
node = nodes.pop(-1)
if not node.is_word:
return False
else:
node.is_word = False
removed_word = []
while True:
try:
node = nodes.pop(-1)
except IndexError:
break
if node.is_leaf:
removed_word.insert(0, nodes[-1].next.pop(node.key).key)
return removed_word
def show(self, word: str = ""):
if self.is_word:
print(word)
for node in self.next.values():
node.show(word + node.key)
class ACNode(TrieNode):
def __init__(self, key: str = "", depth: int = 0):
super().__init__(key)
self.depth = depth
self.fail = None
def insert(self, word: str):
curr = self
for char in word:
if char not in curr.next:
curr.next[char] = ACNode(char, curr.depth + 1)
curr = curr.next[char]
curr.is_word = True
class ACAutomaton(object):
def __init__(self, words: Iterable[str]):
self.root = ACNode()
self.root.insert_many(words)
self.add_fails()
def add_fails(self):
queue = []
for node in self.root.next.values():
node.fail = self.root
queue.append(node)
while len(queue) > 0:
curr: ACNode = queue.pop(0)
fail_to = curr.fail
for key, node in curr.next.items():
while True:
if fail_to is not None and key in fail_to.next:
node.fail = fail_to.next[key]
break
elif fail_to is None:
node.fail = self.root
break
else:
fail_to = fail_to.fail
queue.append(node)
def search(self, target: str):
result = []
curr = self.root
i = 0
while i < len(target):
char = target[i]
if char in curr.next:
curr = curr.next[char]
if curr.is_word:
result.append((i - curr.depth + 1, i))
i += 1
else:
if curr.fail is None:
curr = self.root
i += 1
else:
curr = curr.fail
return result
class FuzzyACAutomaton(ACAutomaton):
def __init__(self, words: Iterable[str], verbose=False):
super().__init__(words, None)
self.verbose = verbose
def search(self, target: str, max_skip: int = 2):
result = []
curr = self.root
matched_chars = []
num_chars_skipped = []
num_depth_skipped = []
i = 0
while i < len(target):
should_fail = True
char = target[i]
if char in curr.next:
should_fail = False
curr = curr.next[char]
assert target[i] == curr.key
matched_chars.append(char)
num_chars_skipped.append(0)
num_depth_skipped.append(0)
if self.verbose:
print(f"id:{i}\tkey:{curr.key}\tdepth:{curr.depth}\tmatched_chars:{matched_chars}\tnum_chars_skipped:{num_chars_skipped}\tnum_depth_skipped:{num_depth_skipped}")
if curr.is_word:
if self.verbose:
print('MATCHED!***********************')
result.append(
(
i - len(matched_chars), i,
"".join((node.key for node in curr.lineage[1:])),
)
)
matched_chars.clear()
num_chars_skipped.clear()
num_depth_skipped.clear()
i += 1
elif curr.depth >= 2:
previews = target[i : i + max_skip + 1]
wildcard = {}
nodes = list(curr.next.values())
for _ in range(max_skip + 1):
buffer = []
for node in nodes:
if node.key not in wildcard:
wildcard[node.key] = node
for k, n in node.next.items():
if k not in wildcard:
buffer.append(n)
nodes = buffer
preview_matched_chars = []
preview_num_chars_skipped = []
preview_num_depth_skipped = []
for j, p in enumerate(previews):
if p in wildcard:
should_fail = False
prev_depth = curr.depth
i += j
curr = wildcard[p]
assert target[i] == curr.key
preview_matched_chars.append(curr.key)
preview_num_chars_skipped.append(0)
preview_num_depth_skipped.append(curr.depth - prev_depth -1)
if self.verbose:
print(f"id:{i}\tkey:{curr.key}\tdepth:{curr.depth}\tmatched_chars:{matched_chars}\tnum_chars_skipped:{num_chars_skipped}\tnum_depth_skipped:{num_depth_skipped}")
if curr.is_word:
if self.verbose:
print('MATCHED!***********************')
print(''.join([node.key for node in curr.lineage[1:]]))
matched_chars.extend(preview_matched_chars)
num_chars_skipped.extend(preview_num_chars_skipped)
num_depth_skipped.extend(preview_num_depth_skipped)
result.append(
(
i - len(matched_chars), i,
"".join((node.key for node in curr.lineage[1:])),
)
)
matched_chars.clear()
num_chars_skipped.clear()
num_depth_skipped.clear()
i += 1
break
else:
preview_matched_chars.append('')
preview_num_chars_skipped.append(1)
preview_num_depth_skipped.append(0)
if should_fail:
if self.verbose:
print(
f"Match Failed\tchar:{char}\tnext:{list(curr.next.keys())}\tLineage:{[node.key for node in curr.lineage[1:]]}"
)
curr = curr.fail
if curr is None or curr.is_root:
if self.verbose:
print("Restart matching!")
curr = self.root
matched_chars.clear()
num_chars_skipped.clear()
num_depth_skipped.clear()
i += 1
return result
def brute_force(target: str, pattern: str) -> int:
for i in range(len(target)):
j = 0
while j < len(pattern):
if pattern[j] == target[i]:
i += 1
j += 1
else:
break
if j == len(pattern):
return i - j
class KMP(object):
def __init__(self, pattern: str):
next_array = [-1] * len(pattern)
i, j = 0, -1
while i < len(pattern) - 1:
if j == -1 or pattern[i] == pattern[j]:
i += 1
j += 1
next_array[i] = j
else:
j = next_array[j]
self.pattern = pattern
self.next = next_array
print(self.next)
def match(self, target: str) -> int:
i, j = 0, 0
while i < len(target) and j < len(self.pattern):
if j == -1 or target[i] == self.pattern[j]:
i += 1
j += 1
else:
j = self.next[j]
if j == len(self.pattern):
return i - j
if __name__ == "__main__":
# words = "banana bananas bandana band apple all beast".split()
# root = TrieNode()
# root.insert_many(words)
# root.show()
# root.show_tree()
# print('*' * 50)
# root.delete('bandana')
# root.show()
import time
words = ["abd", "abdk", "abchijn", "chnit", "ijabdf", "ijaij"]
automaton = FuzzyACAutomaton(words)
# automaton = ACAutomaton(words)
# automaton.root.show()
target = "abchnijab dfk"
tick = time.time()
for start, end in automaton.search(target):
print(target[start : end + 1])
tock = time.time()
print(f'{tock - tick:.4f}s')
| 2.8125
| 3
|
tests/potential/test__get_symbol_pairs.py
|
eragasa/mexm-base
| 1
|
12782991
|
<reponame>eragasa/mexm-base
import pytest
from collections import OrderedDict
from mexm.potential import get_symbol_pairs
cases = OrderedDict()
cases['1sym_str'] = OrderedDict([
('symbols','Ni'),
('expected_pairs',[['Ni','Ni']])
])
cases['1sym_list'] = OrderedDict([
('symbols', ['Ni']),
('expected_pairs',[['Ni','Ni']])
])
cases['2sym_list'] = OrderedDict([
('symbols', ['Ni','Al']),
('expected_pairs',[ ['Ni', 'Ni'],
['Ni', 'Al'],
['Al', 'Al'] ])
])
cases['3sym_list'] = OrderedDict([
('symbols', ['Fe', 'Ni', 'Cr']),
('expected_pairs',[['Fe', 'Fe'],
['Fe', 'Ni'],
['Fe', 'Cr'],
['Ni', 'Ni'],
['Ni', 'Cr'],
['Cr', 'Cr'] ])
])
@pytest.mark.parametrize(
"symbols,expected_pairs",
[tuple(v for v in case.values()) for case in cases.values()]
)
def test__get_symbol_pairs(symbols, expected_pairs):
assert expected_pairs == get_symbol_pairs(symbols)
if __name__ == "__main__":
pass
| 2.265625
| 2
|
assessment_plan_modeling/ap_parsing/augmentation_lib_test.py
|
pedersor/google-research
| 0
|
12782992
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for augmentation_lib."""
from typing import Tuple
from absl.testing import absltest
from assessment_plan_modeling.ap_parsing import ap_parsing_lib
from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib
def tuple_fragment(fragment):
return (str(fragment.labeled_char_span), fragment.text, fragment.prefix_delim,
fragment.suffix_delim)
def fragments_tuple(cluster):
return tuple(
set([tuple_fragment(fragment) for fragment in cluster.fragments]))
class StructuredAPTest(absltest.TestCase):
def test_build(self):
ap = "\n".join(
["50 yo m with hx of dm2, copd", "dm2: on insulin", "- RISS"])
labeled_char_spans = [
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=47,
end_char=51),
]
expected = aug_lib.StructuredAP(
prefix_text="50 yo m with hx of dm2, copd",
problem_clusters=[
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
text="dm2",
prefix_delim=aug_lib._DefaultDelims.PROBLEM_TITLE_PREFIX,
suffix_delim=aug_lib._DefaultDelims.PROBLEM_TITLE_SUFFIX),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
text="on insulin",
prefix_delim=aug_lib._DefaultDelims
.PROBLEM_DESCRIPTION_PREFIX,
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=47,
end_char=51),
text="RISS",
prefix_delim=aug_lib._DefaultDelims.ACTION_ITEM_PREFIX,
suffix_delim=""),
])
])
structured_ap = aug_lib.StructuredAP.build(ap, labeled_char_spans)
self.assertEqual(structured_ap, expected)
def test_compile(self):
structured_ap = aug_lib.StructuredAP(
prefix_text="50 yo m with hx of dm2, copd",
problem_clusters=[
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
text="dm2",
prefix_delim="\n*. ",
suffix_delim=": "),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
text="on insulin",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=47,
end_char=51),
text="RISS",
prefix_delim="\n- ",
suffix_delim=""),
])
])
expected = "50 yo m with hx of dm2, copd\n*. dm2: on insulin\n- RISS"
result, _ = structured_ap.compile()
self.assertEqual(result, expected)
def test_compile_with_labels(self):
structured_ap = aug_lib.StructuredAP(
prefix_text="50 yo m with hx of dm2, copd",
problem_clusters=[ # spans are kept from *original* text.
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
text="dm2",
prefix_delim="\n*. ",
suffix_delim=": "),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
text="on insulin",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=47,
end_char=51),
text="RISS",
prefix_delim="\n- ",
suffix_delim=""),
])
])
expected = (
"50 yo m with hx of dm2, copd\n*. dm2: on insulin\n- RISS",
[
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=32,
end_char=35), # span_text="dm2"
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION,
start_char=37,
end_char=47), # span_text="on insulin"
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=50,
end_char=54), # span_text="RISS"
])
result_ap_text, result_labeled_char_spans = structured_ap.compile()
self.assertEqual((result_ap_text, result_labeled_char_spans), expected)
class AugmentationsTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.problem_clusters = [
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
text="dm2",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
text="on insulin",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType.MEDICATIONS,
start_char=47,
end_char=51),
text="RISS",
prefix_delim="",
suffix_delim="")
]),
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=52,
end_char=58),
text="anemia",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType
.OBSERVATIONS_LABS,
start_char=59,
end_char=64),
text="trend",
prefix_delim="",
suffix_delim="")
]),
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=65,
end_char=69),
text="COPD",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType.MEDICATIONS,
start_char=70,
end_char=74),
text="nebs",
prefix_delim="",
suffix_delim="")
]),
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=75,
end_char=81),
text="sepsis",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=82,
end_char=93),
text="dd pna, uti",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=94,
end_char=117),
text="yesterday without fever",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType.MEDICATIONS,
start_char=118,
end_char=127),
text="cont. abx",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType
.OBSERVATIONS_LABS,
start_char=128,
end_char=131),
text="cis",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType.CONSULTS,
start_char=132,
end_char=142),
text="id consult",
prefix_delim="",
suffix_delim="")
])
]
self.ap = aug_lib.StructuredAP(
problem_clusters=self.problem_clusters, prefix_text="")
def test_shuffle_clusters(self):
aug = aug_lib.ShuffleClusters()
augmented_ap = aug(self.ap, seed=0)
set_problem_clusters = set(
[fragments_tuple(cluster) for cluster in self.problem_clusters])
set_aug_clusters = set(
[fragments_tuple(cluster) for cluster in augmented_ap.problem_clusters])
self.assertEqual(set_problem_clusters, set_aug_clusters)
def test_shuffle_fragments(self):
aug = aug_lib.ShuffleFragments()
augmented_ap = aug(self.ap, seed=0)
self.assertEqual(
fragments_tuple(self.problem_clusters[0]),
fragments_tuple(augmented_ap.problem_clusters[0]))
def test_number_title_augmentation(self):
aug = aug_lib.NumberTitlesAugmentation(["\n{:d})"])
augmented_ap = aug(self.ap, seed=0)
expected = self.ap
for i, cluster in enumerate(expected.problem_clusters):
cluster.fragments[0].prefix_delim = f"\n{i+1})"
self.assertEqual(expected, augmented_ap)
def test_change_delim_augmentation(self):
aug = aug_lib.ChangeDelimAugmentation(
fragment_types=[
ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION,
ap_parsing_lib.LabeledSpanType.ACTION_ITEM
],
delims=["*"])
augmented_ap = aug(self.ap, seed=0)
expected = self.ap
for cluster in expected.problem_clusters:
for fragment in cluster.fragments:
fragment.prefix_delim = "*"
self.assertEqual(expected, augmented_ap)
def test_apply_augmentations(self):
augs = aug_lib.AugmentationSequence(
name="test",
augmentation_sequence=[
aug_lib.NumberTitlesAugmentation(["\n{}."]),
aug_lib.ChangeDelimAugmentation(
[ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION],
["\n-- "]),
aug_lib.ChangeDelimAugmentation(
[ap_parsing_lib.LabeledSpanType.ACTION_ITEM], ["\n--- "])
])
results = aug_lib.apply_augmentations(self.ap, augs, seed=0)
expected = self.ap
for i, cluster in enumerate(expected.problem_clusters):
for fragment in cluster.fragments:
if fragment.labeled_char_span.span_type == ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE:
prefix_delim = f"\n{i+1}."
elif fragment.labeled_char_span.span_type == ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION:
prefix_delim = "\n-- "
elif fragment.labeled_char_span.span_type == ap_parsing_lib.LabeledSpanType.ACTION_ITEM:
prefix_delim = "\n--- "
fragment.prefix_delim = prefix_delim
self.assertEqual(expected, results)
if __name__ == "__main__":
absltest.main()
| 2.234375
| 2
|
INEGIpy/setup.py
|
andreslomeliv/DatosMex
| 1
|
12782993
|
<filename>INEGIpy/setup.py
# -*- coding: utf-8 -*-
from setuptools import setup
NAME = 'INEGIpy'
DESCRIPTION = 'Wrap de Python para los APIs del INEGI'
URL = 'https://github.com/andreslomeliv/DatosMex/tree/master/INEGIpy'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.1.0'
# librerías requeridas
REQUIRED = [
'requests','pandas','matplotlib','seaborn','geopandas','shapely'
]
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=['INEGIpy'],
install_requires=REQUIRED,
license='MIT'
)
| 1.148438
| 1
|
svbench/loaders.py
|
kcleal/svbench
| 10
|
12782994
|
<reponame>kcleal/svbench
from svbench import CallSet, Col
import numpy as np
from sys import stderr
__all__ = ["load_dysgu", "load_lumpy", "load_delly", "load_manta", "load_sniffles", "load_whamg"]
def load_dysgu(pth, dataset, caller="dysgu"):
c = Col("FORMAT", "PROB", bins=np.arange(0, 1.01, 0.025))
k = Col("FILTER", op="eq", thresh=None)
print(f"Loading dysgu, stratified using {c}, keep records with {k}", file=stderr)
return CallSet(dataset=dataset, caller=caller).load_vcf(pth, stratify=c, keep=[k])
def load_lumpy(pth, dataset, caller="lumpy"):
c = Col("INFO", "SU", bins=range(0, 30, 1))
print("Loading lumpy, stratified using. (All records kept)", c, file=stderr)
return CallSet(dataset=dataset, caller=caller).load_vcf(pth, stratify=c)
def load_delly(pth, dataset, caller="delly"):
c = Col("QUAL", bins=range(0, 1500, 50))
k = Col("FILTER", op="eq", thresh=None)
print(f"Loading delly, stratified using {c}, keep records with {k}", file=stderr)
return CallSet(dataset=dataset, caller=caller).load_vcf(pth, stratify=c, keep=[k])
def load_whamg(pth, dataset, caller="whamg"):
c = Col("INFO", "A", bins=range(0, 20, 1))
k = Col("FILTER", op="eq", thresh=None)
print(f"Loading whamg, stratified using {c}, keep records with {k}", file=stderr)
return CallSet(dataset=dataset, caller=caller).load_vcf(pth, stratify=c, keep=[k])
def load_manta(pth, dataset, caller="manta"):
c = Col("QUAL", bins=range(0, 1500, 50))
k = Col("FILTER", op="eq", thresh=None)
print(f"Loading manta, stratified using {c}, keep records with {k}", file=stderr)
return CallSet(dataset=dataset, caller=caller).load_vcf(pth, stratify=c, keep=[k])
def load_sniffles(pth, dataset, caller="sniffles"):
c = Col("INFO", "RE", bins=np.arange(0, 20, 1))
k = Col("FILTER", op="eq", thresh=None)
print(f"Loading sniffles, stratified using {c}, keep records with {k}", file=stderr)
return CallSet(dataset=dataset, caller=caller).load_vcf(pth, stratify=c, keep=[k])
def get_svim(pth, dataset, caller="svim"):
c = Col("QUAL", bins=np.arange(0, 20, 1))
k = Col("FILTER", op="eq", thresh=None)
print(f"Loading svim, stratified using {c}, keep records with {k}", file=stderr)
return CallSet(dataset=dataset, caller=caller).load_vcf(pth, stratify=c, keep=[k])
| 2.078125
| 2
|
Modules/LeetCode/Task9.py
|
Itsuke/Learning-Python
| 0
|
12782995
|
<gh_stars>0
'''
https://leetcode.com/discuss/interview-question/1667337/FacebookMeta-or-Phone-Screen-or-New-Grad-or-Binary-Tree-%2B-Backtrack-problem
https://leetcode.com/problems/expression-add-operators/
123456789 = 100
Using standard integer arithmetic operators +, -, what are those different solutions you can find by inserting the operators between some digits?
EX)
-1+2-3+4+5+6+78+9
123-45-67+89 # 100
Example 1:
Input: num = "123", target = 6
Output: ["1+2+3"]
Explanation: "1+2+3" evaluate to 6.
'''
#12:51 - gave up, nothing came to my mind
'''
funtion no idea
List<String> res = new ArrayList<>();
private int helper(String nums, int index, int sum, int S, Map<String, Integer> map, String path){
String encodeString = index + "->" + sum;
if (map.containsKey(encodeString)){
return map.get(encodeString);
}
if (index == nums.length){
if (sum == S){
res.add(path);
return 1;
}else {
return 0;
}
}
int total = 0;
for(int j=index+1; j<=nums.length(); j++) {
int curNum = Integer.parseInt(nums.substring(i,j));
total += helper(nums, j, sum - curNum, S, map, path + "-" + String.valueOf(curNum));
total += helper(nums, j, sum + curNum, S, map, path + "+" + String.valueOf(curNum));
}
map.put(encodeString,total);
return total;
}
'''
# 12:56
'''
def _calculate_possible_variants(number_string, curr_num, target, sum, index, formula, formula_table)
if index == (len(number_string) - 1) and sum == target
formula_table.append[forumla]
# add
sum += curr_num
formula += "+" + number_string[index]
calculate_possible_variants(number_string, number_string[index+1], target, sum, index, formula, formula_table)
# subtraction
sum -= int(number_string[index])
formula += "-" + number_string[index]
calculate_possible_variants(number_string, number_string[index+1], target, sum, index, formula, formula_table)
# higher number
curr_num = int(str(curr_num) + number_string[index+1])
calculate_possible_variants
def calculate_possible_variants(numbers, target)
_calculate_possible_variants(numbers, int(numbers[0]), target, 0, 0, "", [])
'''
def _calculate_possible_variants(number_string, curr_num, target, my_sum, index, formula,
formula_table):
def _add(my_sum, formula, formula_table):
my_sum += int(curr_num)
formula += "+" + number_string[index]
formula_table = _calculate_possible_variants(number_string, number_string[index + 1], target, my_sum, index + 1,
formula, formula_table)
return formula_table
def _subtract(my_sum, formula, formula_table):
my_sum -= int(number_string[index])
formula += "-" + number_string[index]
formula_table = _calculate_possible_variants(number_string, number_string[index + 1], target, my_sum, index +1,
formula, formula_table)
return formula_table
def _combine_numbers(curr_num, formula, formula_table):
curr_num = int(str(curr_num) + number_string[index + 1])
formula += curr_num
formula_table = _calculate_possible_variants(number_string, number_string[index + 1], target, my_sum, index+1,
formula, formula_table)
return formula_table
_add(my_sum, formula, formula_table)
_subtract(my_sum, formula, formula_table)
_combine_numbers(curr_num, formula, formula_table)
if index == (len(number_string) - 1) and my_sum == target:
formula_table.append[forumla]
return formula_table
elif index == (len(number_string) - 1):
return formula_table
return formula_table
def calculate_possible_variants(numbers, target):
_calculate_possible_variants(numbers, int(numbers[0]), target, 0, 0, "", [])
calculate_possible_variants("123", 6)
# 13:20 - not working
| 3.59375
| 4
|
tests/unit/core/test_test_helpers_unit.py
|
pranav-byte/tight
| 9
|
12782996
|
# Copyright (c) 2017 lululemon athletica Canada inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tight.core.test_helpers as test_helpers
from tight.providers.aws.clients import boto3_client
def test_no_boom():
assert True, 'Module can be imported.'
def test_prepare_pills_record():
test_helpers.prepare_pills('record', 'some/path', boto3_client.session())
boto3_pill = getattr(test_helpers, 'boto3_pill')
dynamo_pill = getattr(test_helpers, 'pill')
# Do it again and make sure objects are the same
test_helpers.prepare_pills('record', 'some/path', boto3_client.session())
boto3_pill_cached = getattr(test_helpers, 'boto3_pill')
dynamo_pill_cached = getattr(test_helpers, 'pill')
assert boto3_pill == boto3_pill_cached, 'boto3 pill is cached'
assert dynamo_pill == dynamo_pill_cached, 'dynamo pill is cached'
def test_prepare_pills_playback():
test_helpers.prepare_pills('playback', 'some/path', boto3_client.session())
boto3_pill = getattr(test_helpers, 'boto3_pill')
dynamo_pill = getattr(test_helpers, 'pill')
# Do it again and make sure objects are the same
test_helpers.prepare_pills('playback', 'some/path', boto3_client.session())
boto3_pill_cached = getattr(test_helpers, 'boto3_pill')
dynamo_pill_cached = getattr(test_helpers, 'pill')
assert boto3_pill == boto3_pill_cached, 'boto3 pill is cached'
assert dynamo_pill == dynamo_pill_cached, 'dynamo pill is cached'
def test_placebos_path_playback():
result = test_helpers.placebos_path('/some/absolute/path.py', 'my_namespace')
assert result == '/some/absolute/placebos/my_namespace'
def test_placebos_path_record(tmpdir):
test_file = '{}/some_test.py'.format(tmpdir)
with open(test_file, 'w') as tmp_test_file:
tmp_test_file.write('')
tmpdir.mkdir('placebos')
result = test_helpers.placebos_path(test_file, 'some_test', mode='record')
assert result == '{}/placebos/some_test'.format(tmpdir)
assert os.path.isdir(result), 'Namespaced placebos directory exists'
def test_placebos_path_record_placebos_exist(tmpdir):
test_file = '{}/some_test.py'.format(tmpdir)
with open(test_file, 'w') as tmp_test_file:
tmp_test_file.write('')
tmpdir.mkdir('placebos')
result = test_helpers.placebos_path(test_file, 'some_test', mode='record')
assert result == '{}/placebos/some_test'.format(tmpdir)
assert os.path.isdir(result), 'Namespaced placebos directory exists'
disappearing_file = '{}/i_should_not_exist.txt'.format(result)
with open(disappearing_file, 'w') as file_to_make_disappear:
file_to_make_disappear.write('make me disappear')
assert os.listdir(result)[0] == 'i_should_not_exist.txt'
result2 = test_helpers.placebos_path(test_file, 'some_test', mode='record')
assert len(os.listdir(result2)) == 0
| 1.984375
| 2
|
relevanceai/dataset/io/export/dict.py
|
RelevanceAI/RelevanceAI
| 21
|
12782997
|
from relevanceai.utils.decorators.analytics import track
from relevanceai.dataset.read import Read
class DictExport(Read):
@track
def to_dict(self, orient: str = "records"):
"""
Returns the raw list of dicts from Relevance AI
Parameters
----------
None
Returns
-------
list of documents in dictionary format
Example
-------
.. code-block::
from relevanceai import Client
client = Client()
dataset_id = "sample_dataset_id"
df = client.Dataset(dataset_id)
dict = df.to_dict(orient="records")
"""
if orient == "records":
return self.get_all_documents()
else:
raise NotImplementedError
| 3.03125
| 3
|
models/modules.py
|
LaudateCorpus1/learning-compressible-subspaces
| 6
|
12782998
|
<filename>models/modules.py
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2021 Apple Inc. All Rights Reserved.
#
from typing import Union
import torch
import torch.nn as nn
import torch.nn.functional as F
# Convolutions
StandardConv = nn.Conv2d
class SubspaceConv(nn.Conv2d):
def forward(self, x):
# call get_weight, which samples from the subspace, then use the
# corresponding weight.
w = self.get_weight()
x = F.conv2d(
x,
w,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
return x
class TwoParamConv(SubspaceConv):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight1 = nn.Parameter(torch.zeros_like(self.weight))
def initialize(self, initialize_fn):
initialize_fn(self.weight)
initialize_fn(self.weight1)
class LinesConv(TwoParamConv):
def get_weight(self):
w = (1 - self.alpha) * self.weight + self.alpha * self.weight1
return w
# BatchNorms
StandardBN = nn.BatchNorm2d
class SubspaceBN(nn.BatchNorm2d):
def forward(self, input):
# call get_weight, which samples from the subspace, then use the
# corresponding weight.
w, b = self.get_weight()
# The rest is code in the PyTorch source forward pass for batchnorm.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(
self.num_batches_tracked
)
else: # use exponential moving average
exponential_average_factor = self.momentum
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (
self.running_var is None
)
return F.batch_norm(
input,
# If buffers are not to be tracked, ensure that they won't be
# updated
self.running_mean
if not self.training or self.track_running_stats
else None,
self.running_var
if not self.training or self.track_running_stats
else None,
w,
b,
bn_training,
exponential_average_factor,
self.eps,
)
class TwoParamBN(SubspaceBN):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight1 = nn.Parameter(torch.empty([self.num_features]))
self.bias1 = nn.Parameter(torch.empty([self.num_features]))
torch.nn.init.ones_(self.weight1)
torch.nn.init.zeros_(self.bias1)
class LinesBN(TwoParamBN):
def get_weight(self):
w = (1 - self.alpha) * self.weight + self.alpha * self.weight1
b = (1 - self.alpha) * self.bias + self.alpha * self.bias1
return w, b
# InstanceNorm
def StandardIN(*args, affine=True, **kwargs):
return nn.InstanceNorm2d(*args, affine=affine, **kwargs)
def _process_num_groups(num_groups: Union[str, int], num_channels: int) -> int:
if num_groups == "full":
num_groups = num_channels # Set it equal to num_features.
else:
num_groups = int(num_groups)
# If num_groups is greater than num_features, we reduce it.
num_groups = min(num_channels, num_groups)
return num_groups
class SubspaceIN(nn.InstanceNorm2d):
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = False,
track_running_stats: bool = False,
) -> None:
# Override @affine to be true.
super().__init__(
num_features,
eps=eps,
momentum=momentum,
affine=True,
track_running_stats=track_running_stats,
)
def forward(self, input):
# call get_weight, which samples from the subspace, then use the
# corresponding weight.
w, b = self.get_weight()
# The rest is code in the PyTorch source forward pass for instancenorm.
assert self.running_mean is None or isinstance(
self.running_mean, torch.Tensor
)
assert self.running_var is None or isinstance(
self.running_var, torch.Tensor
)
return F.instance_norm(
input,
self.running_mean,
self.running_var,
w,
b,
self.training or not self.track_running_stats,
self.momentum,
self.eps,
)
class TwoParamIN(SubspaceIN):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight1 = nn.Parameter(torch.empty([self.num_features]))
self.bias1 = nn.Parameter(torch.empty([self.num_features]))
torch.nn.init.ones_(self.weight1)
torch.nn.init.zeros_(self.bias1)
class LinesIN(TwoParamIN):
def get_weight(self):
w = (1 - self.alpha) * self.weight + self.alpha * self.weight1
b = (1 - self.alpha) * self.bias + self.alpha * self.bias1
return w, b
# GroupNorm
def StandardGN(*args, affine=True, **kwargs):
num_groups = kwargs.pop("num_groups", "full")
num_groups = _process_num_groups(num_groups, args[0])
return nn.GroupNorm(num_groups, *args, affine=affine, **kwargs)
class SubspaceGN(nn.GroupNorm):
def __init__(
self,
num_features: int,
eps: float = 1e-5,
*,
num_groups: Union[str, int],
) -> None:
num_groups = _process_num_groups(num_groups, num_features)
# Override @affine to be true.
super().__init__(
num_groups,
num_features,
eps=eps,
affine=True,
)
self.num_features = num_features
def forward(self, input):
# call get_weight, which samples from the subspace, then use the
# corresponding weight.
w, b = self.get_weight()
return F.group_norm(input, self.num_groups, w, b, self.eps)
class TwoParamGN(SubspaceGN):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight1 = nn.Parameter(torch.empty([self.num_features]))
self.bias1 = nn.Parameter(torch.empty([self.num_features]))
torch.nn.init.ones_(self.weight1)
torch.nn.init.zeros_(self.bias1)
class LinesGN(TwoParamGN):
def get_weight(self):
w = (1 - self.alpha) * self.weight + self.alpha * self.weight1
b = (1 - self.alpha) * self.bias + self.alpha * self.bias1
return w, b
def _get_num_parameters(conv):
in_channels = conv.in_channels
out_channels = conv.out_channels
if hasattr(conv, "in_channels_list"):
in_channels_ratio = in_channels / max(conv.in_channels_list)
out_channels_ratio = out_channels / max(conv.out_channels_list)
else:
in_channels_ratio = in_channels / conv.in_channels_max
out_channels_ratio = out_channels / conv.out_channels_max
ret = conv.weight.numel()
ret = max(1, round(ret * in_channels_ratio * out_channels_ratio))
return ret
# Adaptive modules (which adjust their number of channels at inference time).
# This code contains the norm implementation used in our unstructured sparsity
# experiments and baselines. Note that normally, we disallow storing or
# recomputing BatchNorm statistics. However, we retain the ability to store
# individual BatchNorm statistics purely for sanity-checking purposes (to ensure
# our implementation produces similar results to the Universal Slimming paper,
# when BatchNorms are stored). But, we don't use these results in any analysis.
class AdaptiveNorm(nn.modules.batchnorm._NormBase):
def __init__(
self,
bn_class,
bn_func,
mode,
*args,
ratio=1,
width_factors_list=None,
**kwargs,
):
assert mode in ("BatchNorm", "InstanceNorm", "GroupNorm")
kwargs_cpy = kwargs.copy()
try:
track_running_stats = kwargs_cpy.pop("track_running_stats")
except KeyError:
track_running_stats = False
try:
self.num_groups = kwargs_cpy.pop("num_groups")
except KeyError:
self.num_groups = None
super().__init__(
*args,
affine=True,
track_running_stats=track_running_stats,
**kwargs_cpy,
)
num_features = args[0]
self.width_factors_list = width_factors_list
self.num_features_max = num_features
if mode == "BatchNorm" and self.width_factors_list is not None:
print(
f"Storing extra BatchNorm layers. This should only be used"
f"for sanity checking, since it violates our goal of"
f"arbitrarily fine-grained compression levels at inference"
f"time."
)
self.bn = nn.ModuleList(
[
bn_class(i, affine=False)
for i in [
max(1, round(self.num_features_max * width_factor))
for width_factor in self.width_factors_list
]
]
)
if mode == "GroupNorm":
if self.num_groups is None:
raise ValueError("num_groups is required")
if self.num_groups not in ("full", 1):
# This must be "full" or 1, or the tensor might not be divisible
# by @self.num_groups.
raise ValueError(f"Invalid num_groups={self.num_groups}")
self.ratio = ratio
self.width_factor = None
self.ignore_model_profiling = True
self.bn_func = bn_func
self.mode = mode
def get_weight(self):
return self.weight, self.bias
def forward(self, input):
weight, bias = self.get_weight()
c = input.shape[1]
if (
self.mode == "BatchNorm"
and self.width_factors_list is not None
and self.width_factor in self.width_factors_list
):
# Normally, we expect width_factors_list to be empty, because we
# only want to use it if we are running sanity checks (e.g.
# recreating the original performance or something).
idx = self.width_factors_list.index(self.width_factor)
kwargs = {
"input": input,
"running_mean": self.bn[idx].running_mean[:c],
"running_var": self.bn[idx].running_var[:c],
"weight": weight[:c],
"bias": bias[:c],
"training": self.training,
"momentum": self.momentum,
"eps": self.eps,
}
elif self.mode in ("InstanceNorm", "BatchNorm"):
# Sanity check, since we're not tracking running stats.
running_mean = self.running_mean
if self.running_mean is not None:
running_mean = running_mean[:c]
running_var = self.running_var
if self.running_var is not None:
running_var = running_var[:c]
kwargs = {
"input": input,
"running_mean": running_mean,
"running_var": running_var,
"weight": weight[:c],
"bias": bias[:c],
"momentum": self.momentum,
"eps": self.eps,
}
if self.mode == "BatchNorm":
kwargs["training"] = self.training
elif self.mode == "GroupNorm":
num_groups = self.num_groups
if num_groups == "full":
num_groups = c
kwargs = {
"input": input,
"num_groups": num_groups,
"weight": weight[:c],
"bias": bias[:c],
"eps": self.eps,
}
else:
raise NotImplementedError(f"Invalid mode {self.mode}.")
return self.bn_func(**kwargs)
class AdaptiveBN(AdaptiveNorm):
def __init__(self, *args, **kwargs):
norm_class = nn.BatchNorm2d
norm_func = F.batch_norm
super().__init__(norm_class, norm_func, "BatchNorm", *args, **kwargs)
class AdaptiveIN(AdaptiveNorm):
def __init__(self, *args, **kwargs):
norm_class = nn.InstanceNorm2d
norm_func = F.instance_norm
super().__init__(norm_class, norm_func, "InstanceNorm", *args, **kwargs)
class LinesAdaptiveIN(AdaptiveIN):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight1 = nn.Parameter(torch.Tensor(self.num_features))
self.bias1 = nn.Parameter(torch.Tensor(self.num_features))
torch.nn.init.ones_(self.weight1)
torch.nn.init.zeros_(self.bias1)
def get_weight(self):
w = (1 - self.alpha) * self.weight + self.alpha * self.weight1
b = (1 - self.alpha) * self.bias + self.alpha * self.bias1
return w, b
class AdaptiveConv2d(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
first_layer=False,
last_layer=False,
ratio=None,
):
self.first_layer = first_layer
self.last_layer = last_layer
if ratio is None:
ratio = [1, 1]
super(AdaptiveConv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
if groups == in_channels:
assert in_channels == out_channels
self.depthwise = True
else:
self.depthwise = False
self.in_channels_max = in_channels
self.out_channels_max = out_channels
self.width_factor = None
self.ratio = ratio
def get_weight(self):
return self.weight
def forward(self, input):
if not self.first_layer:
self.in_channels = input.shape[1]
if not self.last_layer:
self.out_channels = max(
1, round(self.out_channels_max * self.width_factor)
)
self.groups = self.in_channels if self.depthwise else 1
weight = self.get_weight()
weight = weight[: self.out_channels, : self.in_channels, :, :]
assert self.bias is None
bias = None
y = nn.functional.conv2d(
input,
weight,
bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
return y
def get_num_parameters(self):
return _get_num_parameters(self)
class LinesAdaptiveConv2d(AdaptiveConv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight1 = nn.Parameter(torch.empty_like(self.weight))
assert self.bias is None
torch.nn.init.ones_(self.weight1)
def get_weight(self):
w = (1 - self.alpha) * self.weight + self.alpha * self.weight1
return w
| 2.671875
| 3
|
scripts/gen_chainercv_test.py
|
disktnk/chainer-compiler
| 116
|
12782999
|
<reponame>disktnk/chainer-compiler
"""Tests for ChainerCV related custom ops."""
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
import onnx
import onnx_script
import test_case
_has_chnainercv = True
try:
import chainercv_rpn
except ImportError:
_has_chnainercv = False
def aranges(*shape):
r = np.prod(shape)
v = np.arange(r).reshape(shape).astype(np.float32)
v -= r / 2 + 0.1
return v
def _get_scales():
return (1 / 4, 1 / 8, 1 / 16, 1 / 32, 1 / 64)
def _get_hs(num_channels):
hs = []
for h, w in [(200, 272), (100, 136), (50, 68), (25, 34), (13, 17)]:
hs.append(aranges(1, num_channels, h, w))
return hs
def _get_rpn_locs_confs():
locs = []
confs = []
for i in [163200, 40800, 10200, 2550, 663]:
locs.append(aranges(1, i, 4))
confs.append(aranges(1, i))
return locs, confs
def chainercv_test_rpn_decode(test_name):
rpn = chainercv_rpn.RPN(_get_scales())
hs = _get_hs(1)
locs, confs = _get_rpn_locs_confs()
anchors = rpn.anchors(h.shape[2:] for h in hs)
in_shape = (1, 3, 800, 1088)
rois, roi_indices = rpn.decode(
[chainer.Variable(l) for l in locs],
[chainer.Variable(c) for c in confs],
anchors, in_shape)
gb = onnx_script.GraphBuilder(test_name)
hs_v = [gb.input('hs_%d' % i, h) for i, h in enumerate(hs)]
locs_v = [gb.input('loc_%d' % i, l) for i, l in enumerate(locs)]
confs_v = [gb.input('conf_%d' % i, c) for i, c in enumerate(confs)]
in_shape_v = gb.input('in_shape', np.array(in_shape))
rois_v = 'rois'
roi_indices_v = 'roi_indices'
gb.ChainerDoSomething(hs_v + locs_v + confs_v + [in_shape_v],
outputs=[rois_v, roi_indices_v],
function_name='ChainerCVRPNDecode')
gb.output(rois_v, rois)
gb.output(roi_indices_v, roi_indices)
gb.gen_test()
class TestCase(test_case.TestCase):
def __init__(self, name, func, **kwargs):
super(TestCase, self).__init__('out', name, **kwargs)
self.func = func
def get_tests():
if not _has_chnainercv:
return []
tests = []
def test(name, func, **kwargs):
tests.append(TestCase(name, func, **kwargs))
test('chainercv_test_rpn_decode', chainercv_test_rpn_decode)
return tests
| 2.1875
| 2
|
project/server/__init__.py
|
mjhea0/flask-challenge
| 2
|
12783000
|
# project/server/__init__.py
import os
from flask import Flask, make_response, jsonify
app = Flask(__name__)
app_settings = os.getenv(
'APP_SETTINGS',
'project.server.config.DevelopmentConfig'
)
app.config.from_object(app_settings)
from project.server.api.routes import api_blueprint
app.register_blueprint(api_blueprint)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST')
return response
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify({
'status': '400', 'error': 'Not found'}), 400)
@app.errorhandler(404)
def page_not_found(error):
return make_response(jsonify({
'status': '404', 'error': 'Not Found'}), 404)
@app.errorhandler(500)
def internal_server(error):
return make_response(jsonify({
'status': '500', 'error': 'Something went wrong'}), 500)
| 2.546875
| 3
|
src/full_constraint.py
|
LC-John/RangeAnalysis
| 4
|
12783001
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 13:10:21 2018
@author: DrLC
"""
from symtab import build_symtab
import constraint
import cfg
import os, sys
from interval import interval
class FCG(object):
def __init__(self, _cfg={}, _main="", _symtab={}):
assert _main in _cfg.keys()
self.__entry_set_enable = True
self.__name = _main
main = constraint.CG(_cfg[_main], _symtab[_main], _main)
self.__main = main.get_constraint_nodes()
self.__entry = self.__entry_reorder(main.get_entry_nodes(), _symtab[_main]['decl'])
self.__return = main.get_return_node()
self.__call = []
tb_del = []
for i in self.__main:
if type(i) is constraint.CallNode:
tb_del.append(i)
tmp_func_name = i.get_name().strip().split("(")[0].strip()
assert tmp_func_name in _cfg.keys()
tmp_func_cg = constraint.CG(_cfg[tmp_func_name],
_symtab[tmp_func_name],
tmp_func_name)
self.__call += tmp_func_cg.get_constraint_nodes()
tmp_func_entry = self.__entry_reorder(tmp_func_cg.get_entry_nodes(),
_symtab[tmp_func_name]['decl'])
for en, prv in zip(tmp_func_entry, i.get_prev()):
if en is None:
continue
prv.del_next(i)
prv.add_next(en)
en.add_prev(prv)
tmp_func_return = tmp_func_cg.get_return_node()
assert len(i.get_next()) == 1
nxt = i.get_next()[0]
nxt.del_prev(i)
nxt.add_prev(tmp_func_return)
tmp_func_return.add_next(nxt)
for i in tb_del:
self.__main.remove(i)
self.__constraint = self.__main + self.__call
self.__simplify()
tb_del = []
for i in self.__main:
if i not in self.__constraint:
tb_del.append(i)
for i in tb_del:
self.__main.remove(i)
tb_del = []
for i in self.__call:
if i not in self.__constraint:
tb_del.append(i)
for i in tb_del:
self.__call.remove(i)
def __entry_reorder(self, _entry=[], _funcdecl=None):
ret = []
for a in _funcdecl.get_args():
match_flag = False
for e in _entry:
if e.get_name().startswith(a.get_name()):
ret.append(e)
match_flag = True
break
if not match_flag:
ret.append(None)
return ret
def __simplify(self):
while self.__backward_simplify_iter():
pass
while self.__forward_simplify_iter():
pass
tb_del = []
for c in self.__constraint:
if len(c.get_next()) == 0 and len(c.get_prev()) == 0:
tb_del.append(c)
for c in tb_del:
self.__constraint.remove(c)
def __backward_simplify_iter(self):
ret = []
for c in self.__constraint:
if c.get_prev() == []:
continue
tb_del_ = []
for prv in c.get_prev():
if prv not in self.__constraint:
tb_del_.append(prv)
for i in tb_del_:
c.del_prev(i)
if c.get_prev() == []:
ret.append(c)
for i in ret:
self.__constraint.remove(i)
if i in self.__entry:
self.__entry.remove(i)
if len(ret) == 0:
return False
else:
return True
def __forward_simplify_iter(self):
ret = []
if self.__return is None:
ret = self.__constraint
else:
for c in self.__constraint:
if (type(c) is constraint.VarNode and len(c.get_next()) == 0
and (not self.__return.get_number() == c.get_number())):
ret.append(c)
for prv in c.get_prev():
prv.del_next(c)
for i in ret:
self.__constraint.remove(i)
if i in self.__entry:
self.__entry.remove(i)
if len(ret) == 0:
return False
else:
return True
def debug(self):
print ()
print ('Full constraint graph ' + self.__name)
if len(self.__entry) == 0:
print ('No entry')
else:
print ('Entry: ', end="")
for i in self.__entry:
print (str(i.get_number()), end=" ")
print ()
if self.__return is None:
print ("No return")
else:
print ("Return: "+str(self.__return.get_number()))
if len(self.__constraint) == 0:
print ("No constraint")
else:
print ("Constraint:")
for c in self.__constraint:
c.debug()
def set_entry_range(self, _range={}):
assert len(_range) == len(self.__entry)
assert self.__entry_set_enable
for en, r in zip(self.__entry, _range):
if en is None:
continue
assert en.get_name().startswith(r[2])
for prv in en.get_prev():
self.__constraint.remove(prv)
en.clr_prev()
tmp_node = constraint.RangeNode(r[0], r[1])
self.__constraint.append(tmp_node)
en.add_prev(tmp_node)
tmp_node.add_next(en)
self.__entry = []
self.__entry_set_enable = False
for c in self.__constraint:
if type(c) is constraint.RangeNode and len(c.get_prev()) == 0:
assert (len(c.get_next()) == 1)
if type(c.get_next()[0]) is constraint.VarNode:
c.get_next()[0].set_minmax_widen(c.get_interval())
self.__entry.append(c.get_next()[0])
def get_name(self): return self.__name
def get_constraint_nodes(self): return self.__main + self.__call
def get_entry_nodes(self): return self.__entry
def get_return_node(self): return self.__return
def print_help():
print ()
print ("+---------------------------------+")
print ("| |")
print ("| Full Constraint Graph |")
print ("| by DrLC |")
print ("| |")
print ("+---------------------------------+")
print ()
print ("Transfer .ssa file to constraint graph, and embed the function calling.")
print ()
print ("Use this command to run.")
print (" python3 %s [-P|--path SSA_FILE_PATH]" % sys.argv[0])
print ()
exit(0)
def get_op():
args = sys.argv
if '-h' in args or '--help' in args:
print_help()
if len(args) == 1:
path = '../benchmark/t9.ssa'
elif len(args) == 3 and args[1] in ['-P', '--path']:
path = args[2]
else:
print_help()
return path
if __name__ == "__main__":
path = get_op()
sym_tab = build_symtab(path)
with open(path, 'r') as f:
lines = f.readlines()
_cfg_ = {}
for key in sym_tab.keys():
_cfg_[key] = cfg.CFG(lines[sym_tab[key]["lines"][1]:sym_tab[key]["lines"][2]],
key)
cg = FCG(_cfg_, "foo", sym_tab)
cg.debug()
| 2.265625
| 2
|
setup.py
|
FrankLeeeee/powerpack
| 0
|
12783002
|
<reponame>FrankLeeeee/powerpack
#!/usr/bin/env python
import os
import subprocess
import time
from setuptools import find_packages, setup
import omnipack
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
if __name__ == '__main__':
setup(
name='omnipack',
version=omnipack.__version__,
description='A robust collection of useful scripts',
long_description=readme(),
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/FrankLeeeee/powerpack',
keywords='Python, scripts',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='Apache License 2.0',
install_requires=omnipack.read_lines('requirements/requirements.txt'),
zip_safe=False,
entry_points={'console_scripts': ['omnipack = omnipack.command:cli']})
| 1.382813
| 1
|
Python/DP/ Word Break II/Word Break II DP BOTTOM UP.py
|
khanhhuynguyenvu/LeetcodeDaily
| 0
|
12783003
|
<filename>Python/DP/ Word Break II/Word Break II DP BOTTOM UP.py
from functools import lru_cache
class Solution(object):
def wordBreak(self, s, wordDict):
wordDict = set(wordDict)
n = len(s)
dp = [[] for i in range(n + 1)]
dp[n] = [[]]
ans = []
for i in range(n - 1, -1, -1):
for j in range(n, -1, -1):
word = s[i:j]
if word in wordDict:
for post_text in dp[j]:
dp[i].append([word] + post_text)
return [" ".join(track) for track in dp[0]]
main = Solution()
print(main.wordBreak("pineapplepenapple",
["apple", "pen", "applepen", "pine", "pineapple"]))
| 3.484375
| 3
|
dleamse/dleamse_faiss_index_writer.py
|
luoxi123/DLEAMSE
| 3
|
12783004
|
<gh_stars>1-10
# -*- coding:utf-8 -*-
import os
import sys
import unittest
import numpy as np
import pandas as pd
import faiss
import ast
import click
DEFAULT_IVF_NLIST = 100
class FaissWriteIndex:
def __init__(self):
self.tmp = None
print("Initialized a faiss index class.")
def create_index_for_embedded_spectra(self, database_usi_ids_file, ids_embedded_spectra_path,output_path):
"""
Create index embedded spectra
:param database_ids_file:
:param ids_embedded_spectra_path:
:param output_path:
:return:
"""
raw_ids, raw_usi, embedded_file_list = [], [], []
if os.path.exists(database_usi_ids_file):
database_data = pd.read_csv(database_usi_ids_file, index_col=None)
database_ids = database_data["ids"].values.tolist()
database_usi = database_data["usi"].values.tolist()
# database_ids = np.load(database_ids_file).tolist()
else:
database_ids, database_usi = [], []
print("\"" + database_usi_ids_file + " \" does not exist, it will be created!")
index = self.make_faiss_index_ivf64()
if str(ids_embedded_spectra_path).endswith("/"):
dir_path = ids_embedded_spectra_path
else:
dir_path = ids_embedded_spectra_path + "/"
embedded_spectra_file_list = os.listdir(ids_embedded_spectra_path)
for i in range(len(embedded_spectra_file_list)):
if embedded_spectra_file_list[i].endswith("_embedded.txt"):
embedded_file_list.append(embedded_spectra_file_list[i])
for j in range(len(embedded_file_list)):
embedded_spectra_data = pd.read_csv(dir_path + embedded_file_list[j], sep="\t", index_col=None)
ids_data = embedded_spectra_data["ids"].values
usi_data = embedded_spectra_data["usi"].values.tolist()
spectra_vectors = embedded_spectra_data["embedded_spectra"].values
tmp_data = []
for vec in spectra_vectors:
tmp_data.append(ast.literal_eval(vec))
tmp_spectra_vectors = np.vstack(tmp_data)
tmp_data.clear()
# Self checking
self_update_id_bool = False
if len(ids_data.tolist()) != len(set(ids_data.tolist())):
self_update_new_ids = []
self_raw_ids_dict = dict.fromkeys(ids_data.tolist())
for self_new_id in ids_data:
self_tmp_id = self_new_id
while self_raw_ids_dict.keys().__contains__(self_tmp_id):
self_tmp_id += 1
if self_tmp_id != self_new_id:
self_update_id_bool = True
self_update_new_ids.append(self_tmp_id)
print("Need to self-update ids? {}".format(self_update_id_bool))
# Check with database_ids
final_ids, update_id_bool = self.check_ids_with_database(database_ids, self_update_new_ids)
else:
# Check with database_ids
final_ids, update_id_bool = self.check_ids_with_database(database_ids, ids_data.tolist())
if update_id_bool is True or self_update_id_bool is True:
update_ids_df = pd.DataFrame({"ids": final_ids})
ids_vstack_df = pd.concat([update_ids_df, embedded_spectra_data["usi"], embedded_spectra_data["embedded_spectra"]], axis=1)
store_embed_new_file = dir_path + str(embedded_file_list[j]).strip('embedded.txt') + 'new_ids_embedded.txt'
ids_vstack_df.to_csv(store_embed_new_file, sep="\t", header=True, index=None,
columns=["ids", "usi", "embedded_spectra"])
print("Update ids for " + str(embedded_file_list[j]) + ", and save in new file:" + store_embed_new_file)
# index train and add_with_ids
index.train(tmp_spectra_vectors.astype('float32'))
index.add_with_ids(tmp_spectra_vectors.astype('float32'), np.array(final_ids))
raw_ids.extend(final_ids)
raw_usi.extend(usi_data)
database_ids.extend(final_ids)
database_usi.extend(usi_data)
print("Wrote all database usi and ids to {}".format(database_usi_ids_file))
new_database_data = pd.DataFrame({"ids": database_ids, "usi": database_usi}, columns=["ids", "usi"])
new_database_data.to_csv(database_usi_ids_file, header=True, index=False)
print("output_path***",output_path)
if "../" in output_path:
ids_save_file = ".."+output_path.strip('.index') + '_ids_usi.csv'
else:
ids_save_file = output_path.strip('index').strip(".") + '_ids_usi.csv'
print("ids_save_file***",ids_save_file)
print("Wrote FAISS index usi and ids to {}".format(ids_save_file))
new_data_df = pd.DataFrame({"ids": raw_ids, "usi": raw_usi})
new_data_df.to_csv(ids_save_file, header=True, index=False)
self.write_faiss_index(index, output_path)
def merge_indexes(self, input_indexes, output):
"""
:param input_indexes:
:param output:
:return:
"""
all_ids_usi = None
index = None
i = 0
for input_index in input_indexes:
dirname, filename = os.path.split(os.path.abspath(input_index))
# ids
# ids_file = input_index.strip(".index")+ "_ids.npy"
ids_file = dirname + "/" + filename.strip('.index') + "_ids_usi.csv"
ids_usi_data = pd.read_csv(ids_file, index_col=None)
if i == 0:
all_ids_usi = ids_usi_data
i += 1
else:
all_ids_usi = pd.concat([all_ids_usi, ids_usi_data])
# index
input_index_data = faiss.read_index(input_index)
if not index:
index = input_index_data
else:
num = ids_usi_data.shape[0]
index.merge_from(input_index_data, num)
# Wrote to output file
# output_path, output_file = os.path.split(os.path.abspath(output))
dirname, filename = os.path.split(os.path.abspath(output))
ids_save_file = dirname + "/" + filename.strip('.index') + '_ids_usi.csv'
# ids_save_file = output_path + "/" + output.strip('.index') + '_ids.npy'
all_ids_usi.to_csv(ids_save_file, index=None)
print("Wrote FAISS index database ids to {}".format(ids_save_file))
self.write_faiss_index(index, output)
def check_ids_with_database(self, database_ids, self_update_new_ids):
# Check with database_ids
update_id_bool = False
final_ids, update_new_ids = [], []
if len(database_ids) != 0:
raw_ids_dict = dict.fromkeys(database_ids)
for new_id in self_update_new_ids:
tmp_id = new_id
while raw_ids_dict.keys().__contains__(tmp_id):
tmp_id += 1
if tmp_id != new_id:
update_id_bool = True
update_new_ids.append(tmp_id)
final_ids = update_new_ids
print("Need to update ids? {}".format(update_id_bool))
else:
final_ids = self_update_new_ids
return final_ids, update_id_bool
def make_faiss_index_flat(self, n_dimensions, index_type='ivfflat'):
"""
Make a fairly general-purpose FAISS index
:param n_dimensions:
:param index_type: Type of index to build: flat or ivfflat. ivfflat is much faster.
:return:
"""
print("Making index of type {}".format(index_type))
# if faiss.get_num_gpus():
# gpu_resources = faiss.StandardGpuResources()
# if index_type == 'flat':
# config = faiss.GpuIndexFlatConfig()
# index = faiss.GpuIndexFlatL2(gpu_resources, n_dimensions, config)
# elif index_type == 'ivfflat':
# config = faiss.GpuIndexIVFFlatConfig()
# index = faiss.GpuIndexIVFFlat(gpu_resources, n_dimensions, DEFAULT_IVF_NLIST, faiss.METRIC_L2, config)
# else:
# raise ValueError("Unknown index_type %s" % index_type)
# else:
print("Using CPU.")
if index_type == 'flat':
index = faiss.IndexFlatL2(n_dimensions)
elif index_type == 'ivfflat':
quantizer = faiss.IndexFlatL2(n_dimensions)
index = faiss.IndexIVFFlat(quantizer, n_dimensions, DEFAULT_IVF_NLIST, faiss.METRIC_L2)
else:
raise ValueError("Unknown index_type %s" % index_type)
return index
def make_faiss_index_idmap(self, n_dimensions):
"""
Make a fairly general-purpose FAISS index
:param n_dimensions:
:return:
"""
print("Making index ...")
tmp_index = faiss.IndexFlatL2(n_dimensions)
index = faiss.IndexIDMap(tmp_index)
return index
def make_faiss_index_ivf64(self):
"""
Save a FAISS index. If we're on GPU, have to convert to CPU index first
:return:
"""
index = faiss.index_factory(32, "IVF64,Flat")
return index
def write_faiss_index(self, index, out_filepath):
"""
Save a FAISS index. If we're on GPU, have to convert to CPU index first
:param out_filepath:
:param index:
:return:
"""
# if faiss.get_num_gpus():
# print("Converting index from GPU to CPU...")
# index = faiss.index_gpu_to_cpu(index)
faiss.write_index(index, out_filepath)
print("Wrote FAISS index to {}".format(out_filepath))
def read_faiss_index_gpu(self, index_filepath):
"""
Load a FAISS index. If we're on GPU, then convert it to GPU index
:param index_filepath:
:return:
"""
print("read_faiss_index start.")
index = faiss.read_index(index_filepath)
if faiss.get_num_gpus():
print("read_faiss_index: Converting FAISS index from CPU to GPU.")
index = faiss.index_cpu_to_gpu(faiss.StandardGpuResources(), 0, index)
return index
| 2.515625
| 3
|
flyrc/numeric.py
|
mrflea/flyrc
| 0
|
12783005
|
# This is based upon numerics returned from the Charybdis ircd, which
# complies with RFC numerics.
# Numerics 001 .. 099 are sent from the server to a local client.
RPL_WELCOME = "001"
RPL_YOURHOST = "002"
RPL_CREATED = "003"
RPL_MYINFO = "004"
RPL_ISUPPORT = "005"
RPL_SNOMASK = "008"
RPL_REDIR = "010"
RPL_MAP = "015"
RPL_MAPMORE = "016"
RPL_MAPEND = "017"
RPL_SAVENICK = "043"
# Numerics 200 .. 399 are sent as replies to commands executed by the client.
RPL_TRACELINK = "200"
RPL_TRACECONNECTING = "201"
RPL_TRACEHANDSHAKE = "202"
RPL_TRACEUNKNOWN = "203"
RPL_TRACEOPERATOR = "204"
RPL_TRACEUSER = "205"
RPL_TRACESERVER = "206"
RPL_TRACENEWTYPE = "208"
RPL_TRACECLASS = "209"
RPL_STATSLINKINFO = "211"
RPL_STATSCOMMANDS = "212"
RPL_STATSCLINE = "213"
RPL_STATSNLINE = "214"
RPL_STATSILINE = "215"
RPL_STATSKLINE = "216"
RPL_STATSQLINE = "217"
RPL_STATSYLINE = "218"
RPL_ENDOFSTATS = "219"
RPL_STATSPLINE = "220"
RPL_UMODEIS = "221"
RPL_STATSFLINE = "224"
RPL_STATSDLINE = "225"
RPL_SERVLIST = "234"
RPL_SERVLISTEND = "235"
RPL_STATSLLINE = "241"
RPL_STATSUPTIME = "242"
RPL_STATSOLINE = "243"
RPL_STATSHLINE = "244"
RPL_STATSSLINE = "245"
RPL_STATSXLINE = "247"
RPL_STATSULINE = "248"
RPL_STATSDEBUG = "249"
RPL_STATSCONN = "250"
RPL_LUSERCLIENT = "251"
RPL_LUSEROP = "252"
RPL_LUSERUNKNOWN = "253"
RPL_LUSERCHANNELS = "254"
RPL_LUSERME = "255"
RPL_ADMINME = "256"
RPL_ADMINLOC1 = "257"
RPL_ADMINLOC2 = "258"
RPL_ADMINEMAIL = "259"
RPL_TRACELOG = "261"
RPL_ENDOFTRACE = "262"
RPL_LOAD2HI = "263"
RPL_LOCALUSERS = "265"
RPL_GLOBALUSERS = "266"
RPL_PRIVS = "270"
RPL_WHOISCERTFP = "276"
RPL_ACCEPTLIST = "281"
RPL_ENDOFACCEPT = "282"
RPL_NONE = "300"
RPL_AWAY = "301"
RPL_USERHOST = "302"
RPL_ISON = "303"
RPL_TEXT = "304"
RPL_UNAWAY = "305"
RPL_NOWAWAY = "306"
RPL_WHOISUSER = "311"
RPL_WHOISSERVER = "312"
RPL_WHOISOPERATOR = "313"
RPL_WHOWASUSER = "314"
RPL_ENDOFWHOWAS = "369"
RPL_WHOISCHANOP = "316"
RPL_WHOISIDLE = "317"
RPL_ENDOFWHOIS = "318"
RPL_WHOISCHANNELS = "319"
RPL_LISTSTART = "321"
RPL_LIST = "322"
RPL_LISTEND = "323"
RPL_CHANNELMODEIS = "324"
RPL_CHANNELMLOCK = "325"
RPL_CHANNELURL = "328"
RPL_CREATIONTIME = "329"
RPL_WHOISLOGGEDIN = "330"
RPL_NOTOPIC = "331"
RPL_TOPIC = "332"
RPL_TOPICWHOTIME = "333"
RPL_WHOISACTUALLY = "338"
RPL_INVITING = "341"
RPL_SUMMONING = "342"
RPL_INVITELIST = "346"
RPL_ENDOFINVITELIST = "347"
RPL_EXCEPTLIST = "348"
RPL_ENDOFEXCEPTLIST = "349"
RPL_VERSION = "351"
RPL_WHOREPLY = "352"
RPL_WHOSPCRPL = "354"
RPL_ENDOFWHO = "315"
RPL_NAMREPLY = "353"
RPL_WHOWASREAL = "360"
RPL_ENDOFNAMES = "366"
RPL_KILLDONE = "361"
RPL_CLOSING = "362"
RPL_CLOSEEND = "363"
RPL_LINKS = "364"
RPL_ENDOFLINKS = "365"
RPL_BANLIST = "367"
RPL_ENDOFBANLIST = "368"
RPL_INFO = "371"
RPL_MOTD = "372"
RPL_INFOSTART = "373"
RPL_ENDOFINFO = "374"
RPL_MOTDSTART = "375"
RPL_ENDOFMOTD = "376"
RPL_WHOISHOST = "378"
RPL_YOUREOPER = "381"
RPL_REHASHING = "382"
RPL_MYPORTIS = "384"
RPL_NOTOPERANYMORE = "385"
RPL_RSACHALLENGE = "386"
RPL_TIME = "391"
RPL_USERSSTART = "392"
RPL_USERS = "393"
RPL_ENDOFUSERS = "394"
RPL_NOUSERS = "395"
RPL_HOSTHIDDEN = "396"
# Numerics 400 .. 599 are errors for commands.
ERR_NOSUCHNICK = "401"
ERR_NOSUCHSERVER = "402"
ERR_NOSUCHCHANNEL = "403"
ERR_CANNOTSENDTOCHAN = "404"
ERR_TOOMANYCHANNELS = "405"
ERR_WASNOSUCHNICK = "406"
ERR_TOOMANYTARGETS = "407"
ERR_NOORIGIN = "409"
ERR_INVALIDCAPCMD = "410"
ERR_NORECIPIENT = "411"
ERR_NOTEXTTOSEND = "412"
ERR_NOTOPLEVEL = "413"
ERR_WILDTOPLEVEL = "414"
ERR_TOOMANYMATCHES = "416"
ERR_UNKNOWNCOMMAND = "421"
ERR_NOMOTD = "422"
ERR_NOADMININFO = "423"
ERR_FILEERROR = "424"
ERR_NONICKNAMEGIVEN = "431"
ERR_ERRONEUSNICKNAME = "432"
ERR_NICKNAMEINUSE = "433"
ERR_BANNICKCHANGE = "435"
ERR_NICKCOLLISION = "436"
ERR_UNAVAILRESOURCE = "437"
ERR_NICKTOOFAST = "438"
ERR_SERVICESDOWN = "440"
ERR_USERNOTINCHANNEL = "441"
ERR_NOTONCHANNEL = "442"
ERR_USERONCHANNEL = "443"
ERR_NOLOGIN = "444"
ERR_SUMMONDISABLED = "445"
ERR_USERSDISABLED = "446"
ERR_NOTREGISTERED = "451"
ERR_ACCEPTFULL = "456"
ERR_ACCEPTEXIST = "457"
ERR_ACCEPTNOT = "458"
ERR_NEEDMOREPARAMS = "461"
ERR_ALREADYREGISTRED = "462"
ERR_NOPERMFORHOST = "463"
ERR_PASSWDMISMATCH = "464"
ERR_YOUREBANNEDCREEP = "465"
ERR_YOUWILLBEBANNED = "466"
ERR_KEYSET = "467"
ERR_LINKCHANNEL = "470"
ERR_CHANNELISFULL = "471"
ERR_UNKNOWNMODE = "472"
ERR_INVITEONLYCHAN = "473"
ERR_BANNEDFROMCHAN = "474"
ERR_BADCHANNELKEY = "475"
ERR_BADCHANMASK = "476"
ERR_NEEDREGGEDNICK = "477"
ERR_BANLISTFULL = "478"
ERR_BADCHANNAME = "479"
ERR_THROTTLE = "480"
ERR_NOPRIVILEGES = "481"
ERR_CHANOPRIVSNEEDED = "482"
ERR_CANTKILLSERVER = "483"
ERR_ISCHANSERVICE = "484"
ERR_BANNEDNICK = "485"
ERR_NONONREG = "486"
ERR_VOICENEEDED = "489"
ERR_NOOPERHOST = "491"
ERR_OWNMODE = "494"
ERR_UMODEUNKNOWNFLAG = "501"
ERR_USERSDONTMATCH = "502"
ERR_GHOSTEDCLIENT = "503"
ERR_USERNOTONSERV = "504"
ERR_WRONGPONG = "513"
ERR_DISABLED = "517"
ERR_HELPNOTFOUND = "524"
# Numerics 600 .. 999: assorted extended numerics.
RPL_WHOISSECURE = "671"
RPL_MODLIST = "702"
RPL_ENDOFMODLIST = "703"
RPL_HELPSTART = "704"
RPL_HELPTXT = "705"
RPL_ENDOFHELP = "706"
ERR_TARGCHANGE = "707"
RPL_ETRACEFULL = "708"
RPL_ETRACE = "709"
RPL_KNOCK = "710"
RPL_KNOCKDLVR = "711"
ERR_TOOMANYKNOCK = "712"
ERR_CHANOPEN = "713"
ERR_KNOCKONCHAN = "714"
ERR_KNOCKDISABLED = "715"
ERR_TARGUMODEG = "716"
RPL_TARGNOTIFY = "717"
RPL_UMODEGMSG = "718"
RPL_OMOTDSTART = "720"
RPL_OMOTD = "721"
RPL_ENDOFOMOTD = "722"
ERR_NOPRIVS = "723"
RPL_TESTMASK = "724"
RPL_TESTLINE = "725"
RPL_NOTESTLINE = "726"
RPL_TESTMASKGECOS = "727"
RPL_QUIETLIST = "728"
RPL_ENDOFQUIETLIST = "729"
RPL_MONONLINE = "730"
RPL_MONOFFLINE = "731"
RPL_MONLIST = "732"
RPL_ENDOFMONLIST = "733"
ERR_MONLISTFULL = "734"
RPL_RSACHALLENGE2 = "740"
RPL_ENDOFRSACHALLENGE2 = "741"
ERR_MLOCKRESTRICTED = "742"
RPL_SCANMATCHED = "750"
RPL_SCANUMODES = "751"
RPL_LOGGEDIN = "900"
RPL_LOGGEDOUT = "901"
ERR_NICKLOCKED = "902"
RPL_SASLSUCCESS = "903"
ERR_SASLFAIL = "904"
ERR_SASLTOOLONG = "905"
ERR_SASLABORTED = "906"
ERR_SASLALREADY = "907"
ERR_LAST_ERR_MSG = "999"
| 1.625
| 2
|
crc/scripts/failing_script.py
|
sartography/cr-connect-workflow
| 2
|
12783006
|
from crc.scripts.script import Script
from crc.services.failing_service import FailingService
class FailingScript(Script):
def get_description(self):
return """It fails"""
def do_task_validate_only(self, task, *args, **kwargs):
pass
def do_task(self, task, *args, **kwargs):
FailingService.fail_as_service()
| 2.359375
| 2
|
booktoshare.py
|
gozik/booktoshare
| 0
|
12783007
|
<filename>booktoshare.py
import click
from flask import current_app
from app import db, create_app
from app.models.auth import User, Role
from app.models.books import Book
def register(app):
@app.cli.command("create_admin")
@click.argument("password")
def create_admin(password):
pass # TODO
app = create_app()
register(app)
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User,
'Book': Book}
| 2.09375
| 2
|
distances/migrations/0001_initial.py
|
tkettu/rokego
| 0
|
12783008
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-21 14:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Exercise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sport', models.CharField(choices=[('SK', 'Skiing'), ('RU', 'Running'), ('CY', 'Cycling')], default='RU', max_length=2)),
('time', models.TimeField()),
('distance', models.DecimalField(decimal_places=2, max_digits=6)),
],
),
]
| 1.78125
| 2
|
server/models/base.py
|
AndrewIjano/distributed-tic-tac-toe
| 0
|
12783009
|
class BaseModel:
@property
def key(self):
raise NotImplementedError()
def serialize(self):
return vars(self)
| 2.296875
| 2
|
matrix operation.py
|
Arjitg450/Python-Programs
| 0
|
12783010
|
<reponame>Arjitg450/Python-Programs<filename>matrix operation.py<gh_stars>0
import numpy as np
names=['arjit','brett','chetan','deval','farukh','govind','harshit']
ndict={'arjit':'0','brett':'1','chetan':'2','deval':'3','farukh':'4',
'govind':'5','harshit':'6'}
year=['2010','2011','2012','2013','2014','2015','2016']
ydict={'2010':'0','2011':'1','2012':'2','2013':'3','2014':'4','2015':'5',
'2016':'6'}
#marksobtained_in_english
arjit=[90.3,88,22,56,78,89,85]
brett=[23,81,23,56,73,86,84]
chetan=[23,81,23,56,73,86,84]
deval=[23,81,23,56,73,86,84]
farukh=[23,81,23,56,73,86,84]
govind=[23,81,23,56,73,86,84]
harshit=[23,81,23,56,73,86,84]
marksobtained_in_english=np.array([arjit,brett,chetan,deval,farukh,govind,harshit])
#marksobtained_in_maths
arjit=[90.3,88,22,56,78,89,85]
brett=[23,81,23,56,73,86,84]
chetan=[23,81,23,56,73,86,84]
deval=[23,81,23,56,73,86,84]
farukh=[23,81,23,56,73,86,84]
govind=[23,81,23,56,73,86,84]
harshit=[23,81,23,56,73,86,84]
marksobtained_in_maths=np.array([arjit,brett,chetan,deval,farukh,govind,harshit])
print((marksobtained_in_maths + marksobtained_in_english/200)*100)
print(marksobtained[0][4])
| 2.953125
| 3
|
libhandy/lists/switches.py
|
jeteokeeffe/gtk-python-examples
| 0
|
12783011
|
import gi
gi.require_version('Handy', '0.0')
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Handy
class MyWindow(Gtk.Window):
def __init__(self):
# https://lazka.github.io/pgi-docs/Gtk-3.0/classes/Window.html
Gtk.Window.__init__(self)
self.set_title("Switches Example")
self.connect("destroy", Gtk.main_quit)
self.set_size_request(350, 350)
# Create List Box
# https://lazka.github.io/pgi-docs/Gtk-3.0/classes/ListBox.html
box = Gtk.ListBox()
box.set_selection_mode(Gtk.SelectionMode.NONE)
# use the libhandy function to add separators to listbox rows
# https://lazka.github.io/pgi-docs/#Handy-0.0/functions.html#Handy.list_box_separator_header
box.set_header_func(Handy.list_box_separator_header)
# Add some rows
box.add(self.addrow("London"))
box.add(self.addrow("Berlin"))
box.add(self.addrow("Prague"))
# Add List box to main window
self.add(box)
def addrow(self, title):
# https://lazka.github.io/pgi-docs/#Handy-0.0/classes/ActionRow.html
row = Handy.ActionRow()
row.set_title(title)
# Add action to row
switch = Gtk.Switch.new()
switch.set_valign(Gtk.Align.CENTER)
row.add_action(switch)
return row
# https://lazka.github.io/pgi-docs/#Handy-0.0/functions.html#Handy.init
Handy.init()
window = MyWindow()
window.show_all()
Gtk.main()
| 2.671875
| 3
|
convert sph.py
|
mn270/Recognition-phenomena-TIMIT-
| 0
|
12783012
|
<reponame>mn270/Recognition-phenomena-TIMIT-<filename>convert sph.py
from sphfile import SPHFile
import glob
import os
""""Convert SPH file to wav"""
dialects_path = "/home/marcin/Pobrane/TIMIT"
root_dir = os.path.join(dialects_path, '**/*.WAV')
wav_files = glob.glob(root_dir, recursive=True)
for wav_file in wav_files:
sph = SPHFile(wav_file)
txt_file = ""
txt_file = wav_file[:-3] + "TXT"
f = open(txt_file,'r')
for line in f:
words = line.split(" ")
start_time = (int(words[0])/16000)
end_time = (int(words[1])/16000)
print("writing file ", wav_file)
sph.write_wav(wav_file.replace(".WAV",".wav"),start_time,end_time)
| 3.078125
| 3
|
background/urls.py
|
Ultimatum22/MediaPanel
| 0
|
12783013
|
<reponame>Ultimatum22/MediaPanel<filename>background/urls.py
from django.conf.urls import url, patterns
from background import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^update/', views.update),
)
| 1.6875
| 2
|
arachne/resources/interface.py
|
shrine-maiden-heavy-industries/arachne
| 3
|
12783014
|
<filename>arachne/resources/interface.py<gh_stars>1-10
# SPDX-License-Identifier: BSD-3-Clause
from amaranth.build import *
from . import assert_width
__all__ = (
'JTAGResource',
'EthernetResource',
'CANResource',
)
def JTAGResource(*args, tck, tms, tdi, tdo, conn = None, attrs = None):
ios = [
Subsignal('tck', Pins(tck, dir = 'i', conn = conn, assert_width = 1)),
Subsignal('tms', Pins(tms, dir = 'i', conn = conn, assert_width = 1)),
Subsignal('tdi', Pins(tdi, dir = 'i', conn = conn, assert_width = 1)),
Subsignal('tdo', Pins(tdo, dir = 'oe', conn = conn, assert_width = 1)),
]
if attrs is not None:
ios.append(attrs)
return Resource.family(*args, default_name = 'jtag', ios = ios)
def EthernetResource(*args, rxck, rxd, txck, txd, rx_dv = None, rx_err = None, rx_ctl = None,
tx_en = None, tx_err = None, tx_ctl = None, col = None, crs = None,
mdc = None, mdio = None, conn = None, attrs = None, mdio_attrs = None):
assert_width(rxd, (4, 8))
assert_width(txd, (4, 8))
ios = [
Subsignal('rx_clk', Pins(rxck, dir = 'i', conn = conn, assert_width = 1)),
Subsignal('rx_dat', Pins(rxd, dir = 'i', conn = conn)),
Subsignal('tx_clk', Pins(txck, dir = 'i', conn = conn, assert_width = 1)),
Subsignal('tx_dat', Pins(txd, dir = 'o', conn = conn)),
]
if rx_dv is not None and rx_err is not None:
assert rx_ctl is None
ios.append(Subsignal('rx_dv', Pins(rx_dv, dir = 'i', conn = conn, assert_width = 1)))
ios.append(Subsignal('rx_err', Pins(rx_err, dir = 'i', conn = conn, assert_width = 1)))
elif rx_ctl is not None:
ios.append(Subsignal('rx_ctl', Pins(rx_ctl, dir = 'i', conn = conn, assert_width = 1)))
else:
raise AssertionError('Must specify either MII RXDV + RXER pins or RGMII RXCTL')
if tx_en is not None and tx_err is not None:
assert tx_ctl is None
ios.append(Subsignal('tx_en', Pins(tx_en, dir = 'o', conn = conn, assert_width = 1)))
ios.append(Subsignal('tx_err', Pins(tx_err, dir = 'o', conn = conn, assert_width = 1)))
if col is not None:
ios.append(Subsignal('col', Pins(col, dir = 'i', conn = conn, assert_width = 1)))
if crs is not None:
ios.append(Subsignal('crs', Pins(crs, dir = 'i', conn = conn, assert_width = 1)))
elif tx_ctl is not None:
assert col is None and crs is None
ios.append(Subsignal('tx_ctl', Pins(tx_ctl, dir = 'o', conn = conn, assert_width = 1)))
else:
raise AssertionError('Must specify either MII TXDV + TXER pins or RGMII TXCTL')
assert (rx_dv is not None and rx_err is not None) == (tx_en is not None and tx_err is not None)
assert (rx_ctl is not None) == (tx_ctl is not None)
if mdc is not None and mdio is not None:
ios.append(Subsignal('mdc', Pins(mdc, dir = 'o', conn = conn, assert_width = 1), mdio_attrs))
ios.append(Subsignal('mdio', Pins(mdio, dir = 'io', conn = conn, assert_width = 1), mdio_attrs))
if attrs is not None:
ios.append(attrs)
return Resource.family(*args, default_name = 'eth', ios = ios)
def CANResource(*args, rx, tx, conn = None, attrs = None):
ios = [
Subsignal('rx', Pins(rx, dir = 'o', conn = conn)),
Subsignal('tx', Pins(tx, dir = 'o', conn = conn)),
]
if attrs is not None:
ios.append(attrs)
return Resource.family(*args, default_name = 'can', ios = ios)
| 1.96875
| 2
|
src/common/lie/numpy/__init__.py
|
yewzijian/MultiReg
| 3
|
12783015
|
from .liegroupbase import LieGroupBase
from .so3 import SO3
from .so3q import SO3q
from .se3 import SE3
from .se3q import SE3q
| 1.007813
| 1
|
events/migrations/0001_initial.py
|
Akash1S/meethub
| 428
|
12783016
|
# Generated by Django 2.0.4 on 2018-04-21 15:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('description', models.TextField(max_length=500)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.TextField(max_length=500)),
('created_date', models.DateField(auto_now=True)),
('created_time', models.TimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('created_date', 'created_time'),
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('details', models.TextField(max_length=1000)),
('venue', models.CharField(max_length=50)),
('date', models.DateField(help_text='Please use the following format: <em>YYYY-MM-DD</em>.')),
('time', models.TimeField()),
('attendees', models.ManyToManyField(blank=True, related_name='attending', to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='events.Category')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'events',
'verbose_name': 'event',
},
),
migrations.AddField(
model_name='comment',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='events.Event'),
),
]
| 1.867188
| 2
|
src/validate_plist_xml/__init__.py
|
jgstew/validate_plist_xml
| 0
|
12783017
|
"""
To support validate_plist_xml as module
"""
from .validate_plist_xml import main
__version__ = "1.0.4"
| 1.164063
| 1
|
kenchi/plotting.py
|
Y-oHr-N/kenchi
| 19
|
12783018
|
<filename>kenchi/plotting.py
import numpy as np
from scipy.stats import gaussian_kde
from sklearn.metrics import auc, roc_curve
from sklearn.utils.validation import check_array, check_symmetric, column_or_1d
__all__ = [
'plot_anomaly_score', 'plot_graphical_model',
'plot_partial_corrcoef', 'plot_roc_curve'
]
def plot_anomaly_score(
anomaly_score, ax=None, bins='auto', figsize=None,
filename=None, hist=True, kde=True, threshold=None,
title=None, xlabel='Samples', xlim=None, ylabel='Anomaly score',
ylim=None, **kwargs
):
"""Plot the anomaly score for each sample.
Parameters
----------
anomaly_score : array-like of shape (n_samples,)
Anomaly score for each sample.
ax : matplotlib Axes, default None
Target axes instance.
bins : int, str or array-like, default 'auto'
Number of hist bins.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
hist : bool, default True
If True, plot a histogram of anomaly scores.
kde : bool, default True
If True, plot a gaussian kernel density estimate.
threshold : float, default None
Threshold.
title : string, default None
Axes title. To disable, pass None.
xlabel : string, default 'Samples'
X axis title label. To disable, pass None.
xlim : tuple, default None
Tuple passed to ``ax.xlim``.
ylabel : string, default 'Anomaly score'
Y axis title label. To disable, pass None.
ylim : tuple, default None
Tuple passed to ``ax.ylim``.
**kwargs : dict
Other keywords passed to ``ax.plot``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from kenchi.datasets import load_wdbc
>>> from kenchi.outlier_detection import MiniBatchKMeans
>>> from kenchi.plotting import plot_anomaly_score
>>> X, _ = load_wdbc(random_state=0, return_X_y=True)
>>> det = MiniBatchKMeans(random_state=0).fit(X)
>>> anomaly_score = det.anomaly_score(X, normalize=True)
>>> plot_anomaly_score(
... anomaly_score, threshold=det.threshold_, linestyle='', marker='.'
... ) # doctest: +ELLIPSIS
<matplotlib.axes._subplots.AxesSubplot object at 0x...>
>>> plt.show() # doctest: +SKIP
.. figure:: images/plot_anomaly_score.png
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def _get_ax_hist(ax):
locator = ax.get_axes_locator()
if locator is None:
# Create an axes on the right side of ax
divider = make_axes_locatable(ax)
ax_hist = divider.append_axes(
'right', '20%', pad=0.1, sharey=ax
)
return ax_hist
for ax_hist in ax.get_figure().get_axes():
locator_hist = ax_hist.get_axes_locator()
if ax_hist is ax:
continue
if locator_hist is None:
continue
if locator_hist._axes_divider is locator._axes_divider:
return ax_hist
anomaly_score = column_or_1d(anomaly_score)
if ax is None:
_, ax = plt.subplots(figsize=figsize)
ax.grid(True, linestyle=':')
if xlim is None:
n_samples, = anomaly_score.shape
xlim = (0., n_samples - 1.)
ax.set_xlim(xlim)
if ylim is None:
ylim = (0., 1.05 * np.max(anomaly_score))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
line, = ax.plot(anomaly_score, **kwargs)
color = line.get_color()
if threshold is not None:
ax.hlines(threshold, xlim[0], xlim[1], color=color)
if hist or kde:
ax_hist = _get_ax_hist(ax)
ax_hist.grid(True, linestyle=':')
ax_hist.tick_params(axis='y', labelleft=False)
ax_hist.set_ylim(ylim)
if hist:
# Draw a histogram
ax_hist.hist(
anomaly_score,
alpha = 0.4,
bins = bins,
color = color,
density = True,
orientation = 'horizontal'
)
if kde:
kernel = gaussian_kde(anomaly_score)
ylocs = np.linspace(ylim[0], ylim[1])
# Draw a gaussian kernel density estimate
ax_hist.plot(kernel(ylocs), ylocs, color=color)
if 'label' in kwargs:
ax.legend(loc='upper left')
if filename is not None:
ax.get_figure().savefig(filename)
return ax
def plot_roc_curve(
y_true, y_score, ax=None, figsize=None,
filename=None, title='ROC curve', xlabel='FPR', ylabel='TPR',
**kwargs
):
"""Plot the Receiver Operating Characteristic (ROC) curve.
Parameters
----------
y_true : array-like of shape (n_samples,)
True Labels.
y_score : array-like of shape (n_samples,)
Target scores.
ax : matplotlib Axes, default None
Target axes instance.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
title : string, default 'ROC curve'
Axes title. To disable, pass None.
xlabel : string, default 'FPR'
X axis title label. To disable, pass None.
ylabel : string, default 'TPR'
Y axis title label. To disable, pass None.
**kwargs : dict
Other keywords passed to ``ax.plot``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from kenchi.datasets import load_wdbc
>>> from kenchi.outlier_detection import MiniBatchKMeans
>>> from kenchi.plotting import plot_roc_curve
>>> X, y = load_wdbc(random_state=0, return_X_y=True)
>>> det = MiniBatchKMeans(random_state=0).fit(X)
>>> score_samples = det.score_samples(X)
>>> plot_roc_curve(y, score_samples) # doctest: +ELLIPSIS
<matplotlib.axes._subplots.AxesSubplot object at 0x...>
>>> plt.show() # doctest: +SKIP
.. figure:: images/plot_roc_curve.png
"""
import matplotlib.pyplot as plt
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
if ax is None:
_, ax = plt.subplots(figsize=figsize)
ax.grid(True, linestyle=':')
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.05)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if 'label' in kwargs:
kwargs['label'] += f' (area={roc_auc:1.3f})'
else:
kwargs['label'] = f'area={roc_auc:1.3f}'
ax.plot(fpr, tpr, **kwargs)
ax.legend(loc='lower right')
if filename is not None:
ax.get_figure().savefig(filename)
return ax
def plot_graphical_model(
G, ax=None, figsize=None, filename=None,
random_state=None, title='GGM', **kwargs
):
"""Plot the Gaussian Graphical Model (GGM).
Parameters
----------
G : networkx Graph
GGM.
ax : matplotlib Axes, default None
Target axes instance.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
random_state : int, RandomState instance, default None
Seed of the pseudo random number generator.
title : string, default 'GGM'
Axes title. To disable, pass None.
**kwargs : dict
Other keywords passed to ``nx.draw_networkx``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> from kenchi.plotting import plot_graphical_model
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> A = make_sparse_spd_matrix(dim=20, norm_diag=True, random_state=0)
>>> G = nx.from_numpy_matrix(A)
>>> plot_graphical_model(G, random_state=0) # doctest: +ELLIPSIS
<matplotlib.axes._subplots.AxesSubplot object at 0x...>
>>> plt.show() # doctest: +SKIP
.. figure:: images/plot_graphical_model.png
"""
import matplotlib.pyplot as plt
import networkx as nx
if ax is None:
_, ax = plt.subplots(figsize=figsize)
if title is not None:
ax.set_title(title)
node_size = np.array([30. * (d + 1.) for _, d in G.degree])
pos = nx.spring_layout(G, seed=random_state)
width = np.abs([3. * w for _, _, w in G.edges(data='weight')])
# Add the draw_networkx kwargs here
kwargs.setdefault('cmap', 'Spectral')
kwargs.setdefault('node_size', node_size)
kwargs.setdefault('pos', pos)
kwargs.setdefault('width', width)
# Draw the Gaussian grapchical model
nx.draw_networkx(G, ax=ax, **kwargs)
# Turn off tick visibility
ax.tick_params('x', labelbottom=False, bottom=False)
ax.tick_params('y', labelleft=False, left=False)
if filename is not None:
ax.get_figure().savefig(filename)
return ax
def plot_partial_corrcoef(
partial_corrcoef, ax=None, cbar=True, figsize=None,
filename=None, title='Partial correlation', **kwargs
):
"""Plot the partial correlation coefficient matrix.
Parameters
----------
partial_corrcoef : array-like of shape (n_features, n_features)
Partial correlation coefficient matrix.
ax : matplotlib Axes, default None
Target axes instance.
cbar : bool, default True.
If True, draw a colorbar.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
title : string, default 'Partial correlation'
Axes title. To disable, pass None.
**kwargs : dict
Other keywords passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from kenchi.plotting import plot_partial_corrcoef
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> A = make_sparse_spd_matrix(dim=20, norm_diag=True, random_state=0)
>>> plot_partial_corrcoef(A) # doctest: +ELLIPSIS
<matplotlib.axes._subplots.AxesSubplot object at 0x...>
>>> plt.show() # doctest: +SKIP
.. figure:: images/plot_partial_corrcoef.png
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
partial_corrcoef = check_array(partial_corrcoef)
partial_corrcoef = check_symmetric(partial_corrcoef, raise_exception=True)
if ax is None:
_, ax = plt.subplots(figsize=figsize)
if title is not None:
ax.set_title(title)
# Add the pcolormesh kwargs here
kwargs.setdefault('cmap', 'RdBu')
kwargs.setdefault('edgecolors', 'white')
kwargs.setdefault('vmin', -1.)
kwargs.setdefault('vmax', 1.)
# Draw the heatmap
mesh = ax.pcolormesh(
np.ma.masked_equal(partial_corrcoef, 0.), **kwargs
)
ax.set_aspect('equal')
ax.set_facecolor('grey')
# Invert the y axis to show the plot in matrix form
ax.invert_yaxis()
if cbar:
# Create an axes on the right side of ax
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', '5%', pad=0.1)
ax.get_figure().colorbar(mesh, cax=cax)
if filename is not None:
ax.get_figure().savefig(filename)
return ax
| 2.578125
| 3
|
django_backend/fleet/serializers.py
|
CapacitacionDesoft/travels-log
| 2
|
12783019
|
from rest_framework import serializers
from .models import Driver, Vehicle
class DriverSerializers(serializers.ModelSerializer):
class Meta:
model = Driver
fields = '__all__'
class VehicleSerializers(serializers.ModelSerializer):
class Meta:
model = Vehicle
fields = '__all__'
| 2.15625
| 2
|
first.py
|
FilipCvetko/Testingrepo
| 0
|
12783020
|
<gh_stars>0
print("H222I.")
| 1.101563
| 1
|
easistrain/func_get_image_matrix.py
|
woutdenolf/easistrain
| 0
|
12783021
|
<gh_stars>0
import h5py
import numpy as np
### This function get the image matrix from the h5 file and convert it to float64 ###
### root_data: the path of the folder where the h5 file is saved
### h5file: The name of the h5 file from which the matrix of the image will be extracted
### scan: The name of the group on which the concerned measurement are saved
### detector name: The name of the detector
def get_image_matrix(root_data, h5file, scan, detector_name):
r_h5file = h5py.File(root_data + "/" + h5file, "r")
image = r_h5file["/" + scan + "/measurement/" + detector_name]
if np.ndim(image) == 2:
print(np.shape(image))
image_matrix = np.float64(image)
else:
print(np.shape(image))
print("### The image matrix is not a 2D squared matrix")
image_matrix = np.float64(image[0, :, :])
return image_matrix
| 3.265625
| 3
|
useful/text_to_mp3/test04.py
|
xmark2/practice
| 0
|
12783022
|
<gh_stars>0
from gtts import gTTS
import os
import glob
from pathlib import Path
def readfile(file):
with open(file, encoding="utf-8") as f:
data = f.read()
return data
def list_files(path,file_type):
# path = './input'
# files = [f for f in glob.glob(path + "**/*."+file_type, recursive=True)]
files = glob.glob(path + "/**/*."+file_type, recursive=True)
# for subpath in subpaths:
# subfiles = [f for f in glob.glob(subpath + "**/*."+file_type, recursive=True)]
# # files = files +subfiles
# print(subfiles)
return files
# def list_files2(path,file_type):
# for file_type in Path(path).rglob('*'):
# print(file_type)
def textmp3(file):
print(file)
try:
mytext = readfile(file)
# print(mytext)
tts = gTTS(text=mytext, lang='hu')
filename = file.replace('txt','mp3')
print(filename+" saved")
tts.save(filename)
except:
pass
# os.system(filename+"mpg321.mp3")
def main():
# print(list_files('txt'))
print(list_files('./input','txt'))
# subpaths = os.listdir('./input')
# subpaths = ['./input/'+x for x in subpaths]
# filesNew = []
# for subpath in subpaths:
# filesNew.extend(list_files(subpath,'txt'))
files = list_files('./input','txt')
for file in files:
# print(file)
textmp3(file)
# print(subpaths)
# textmp3('szoveg1.txt')
if __name__ == '__main__':
main()
# print('hello')
| 3.078125
| 3
|
src/lib/interfaces/iqueueprocessor.py
|
itsmylin/wechat-robinhood
| 13
|
12783023
|
<filename>src/lib/interfaces/iqueueprocessor.py
# Define an interface for possible future implementations of queue processors for other messenger platforms
class IQueueProcessor(object):
def process_message(self, message):
raise Exception('Not implemented')
| 2.5
| 2
|
tencect_s_class/pg03_excel/excel.py
|
ww35133634/chenxusheng
| 0
|
12783024
|
"""
python 操作 Excel
"""
import xlrd
import xlwt
# #读取Excel文件
# workbook = xlrd.open_workbook(filename)
# #获取所有表名
# sheet_names = workbook.sheets()
#
# # #通过索引顺序获取一个工作表
# sheet0 = workbook.sheets()[0]
# # # or
# sheet1 = workbook.sheet_by_index(1)
# # #通过表名获取一个工作表
# sheet3 = workbook.sheet1
# # for i in sheet_names:
# print(sheet3)
def read_xlrd(excelTestFile):
data = xlrd.open_workbook(excelTestFile) # 打开路径下的文件,并读取
table = data.sheet_by_index(0) # 根据sheet的索引,确定表格;也可以根据sheet名称确定,如:table = data.sheet_by_name('用户表')
for rowNum in range(table.nrows): # excel表格中的有效行数,从0开始
rowVale = table.row_values(rowNum) # row_values返回该行中所有数据组成的一个列表
for colNum in range(table.ncols): # excel表格中的有效列数,从0开始
if rowNum > 0 and colNum == 0:
print(int(rowVale[0]))
else:
print(rowVale[colNum])
print("#############################")
if __name__ == '__main__':
excelFile = r"C:\Users\CXS\Desktop\公司文件\机架信息.xlsm"
read_xlrd(excelTestFile=excelFile)
| 3.71875
| 4
|
zoinks/cogs/coolsville.py
|
geoffhouy/zoinks
| 0
|
12783025
|
<reponame>geoffhouy/zoinks
import zoinks.bot
from zoinks.bot import ZOINKS
import discord
from discord.ext import commands
import logging
logger = logging.getLogger(__name__)
COOLSVILLE_GUILD_ID = 0
COOLSVILLE_RULES_CHANNEL_ID = 0
COOLSVILLE_NOTIFICATIONS_CHANNEL_ID = 0
COOLSVILLE_GUEST_ROLE_ID = 0
COOLSVILLE_CONTENT_CREATOR_ROLE_ID = 0
COOLSVILLE_PIN_DISABLED_CHANNEL_IDS = (0, 1)
def message_from_video_embed(video_embed: dict, member: discord.Member):
"""Builds a message from the specified embedded video.
:param video_embed: The embedded video in the original message.
:param member: The member who sent the original message.
:type video_embed: dict
:type member: discord.Member
:return: The new message with content and an embed.
:rtype: tuple
"""
provider = video_embed.get('provider').get('name')
embed = discord.Embed(
title=video_embed.get('title') if provider == 'YouTube' else video_embed.get('description'),
description=video_embed.get('description') if provider == 'YouTube' else '',
url=video_embed.get('url'),
color=0xFF0000 if provider == 'YouTube' else 0x6441A4)
embed.set_author(
name=video_embed.get('author').get('name') if provider == 'YouTube' else video_embed.get('title').split('-')[0],
url=video_embed.get('author').get('url') if provider == 'YouTube' else video_embed.get('url'))
embed.set_image(
url=video_embed.get('thumbnail').get('url'))
content = f'Hey @everyone, {member.mention}'
if provider == 'YouTube':
content = f'{content} uploaded a new YouTube video!'
else:
content = f'{content} is now live on Twitch!'
return content, embed
class Coolsville:
"""Represents a cog for a Discord bot.
This cog provides utilities exclusively for the Coolsville server. The above module constants dictate which
guild, channels, and roles will be used.
"""
def __init__(self, bot: ZOINKS):
"""Constructs a new Coolsville object.
:param bot: The currently running Discord bot.
:type bot: ZOINKS
"""
self.bot = bot
self.pin_threshold = 10
logger.info(f'{self.__class__.__name__} loaded')
async def on_member_join(self, member):
"""Automates the new member experience.
1. Assigns the 'Guest' role to the new member.
2. Sends the new member a message suggesting to read the '#rules' channel.
Note: In Coolsville, the 'Guest' role can only read and write in the '#rules' channel.
:param member: The member that joined the guild.
:type member: discord.Member
:return: None
"""
if member.guild.id != COOLSVILLE_GUILD_ID:
return
if member.bot:
return
await member.add_roles(discord.Object(id=COOLSVILLE_GUEST_ROLE_ID))
rules_channel = self.bot.get_guild(id=COOLSVILLE_GUILD_ID).get_channel(channel_id=COOLSVILLE_RULES_CHANNEL_ID)
await member.send(embed=discord.Embed(
title='👋 Welcome',
description=f'Like, welcome to {member.guild}!\n\nPlease remember to read over '
f'{rules_channel.mention} to familiarize yourself with what\'s allowed in '
f'{member.guild}.\n\n If you have any comments, questions, or concerns, '
'please contact an Administrator or a Moderator.\n\nEnjoy your stay!',
color=zoinks.bot.color))
logger.info(f'{member} joined {member.guild}')
async def on_member_update(self, before, after):
"""Notifies guild members that a 'Content Creator' just started streaming on Twitch.
:param before: The member before being updated.
:param after: The member after being updated.
:type before: discord.Member
:type after: discord.Member
:return: None
"""
if after.guild.id != COOLSVILLE_GUILD_ID:
return
if COOLSVILLE_CONTENT_CREATOR_ROLE_ID not in [role.id for role in after.roles]:
return
if (isinstance(after.activity, discord.Streaming) and
after.activity.twitch_name is not None and
not isinstance(before.activity, discord.Streaming)):
notifications_channel = self.bot.get_guild(
id=COOLSVILLE_GUILD_ID).get_channel(
id=COOLSVILLE_NOTIFICATIONS_CHANNEL_ID)
await notifications_channel.send(
content=f'Hey @everyone, {after.mention} is now live on Twitch!',
embed=discord.Embed(
title=f'🎥 Twitch',
description=f'@everyone, {after.activity.name} is playing `{after.activity.details}`!',
url=f'https://www.twitch.tv/{after.activity.twitch_name}',
color=0x6441A4))
async def on_message(self, message):
"""Notifies guild members that a 'Content Creator' just uploaded a YouTube video or started streaming on Twitch.
When a direct message containing a link to the YouTube video, the Twitch stream, or any combination of either
is received from a 'Content Creator', the bot will highlight all guild members in the specified
'#notifications' channel.
:param message: The message being processed.
:type message: discord.Message
:return: None
"""
if not isinstance(message.channel, discord.DMChannel):
return
guild = self.bot.get_guild(id=COOLSVILLE_GUILD_ID)
if message.author not in guild.members:
return
member = guild.get_member(user_id=message.author.id)
if COOLSVILLE_CONTENT_CREATOR_ROLE_ID not in [role.id for role in member.roles]:
return
if not message.embeds:
return
notifications_channel = guild.get_channel(channel_id=COOLSVILLE_NOTIFICATIONS_CHANNEL_ID)
for video_embed in message.embeds:
video_embed = video_embed.to_dict()
provider = video_embed.get('provider')
if provider is None or (provider.get('name') != 'Twitch' and provider.get('name') != 'YouTube'):
continue
content, embed = message_from_video_embed(video_embed, member)
await notifications_channel.send(content=content, embed=embed)
async def on_raw_reaction_add(self, payload):
"""Pins a message after receiving (self.pin_threshold) pins of the same emoji.
:param payload: The details of the reaction.
:type payload: discord.RawReactionActionEvent
:return: None
"""
if payload.guild_id != COOLSVILLE_GUILD_ID:
return
if payload.channel_id in COOLSVILLE_PIN_DISABLED_CHANNEL_IDS:
return
channel = self.bot.get_channel(id=payload.channel_id)
if len(await channel.pins()) == 50:
return
message = await channel.get_message(id=payload.message_id)
if message.pinned:
return
reaction = next((reaction for reaction in message.reactions if reaction.count >= self.pin_threshold), None)
reactor = self.bot.get_guild(COOLSVILLE_GUILD_ID).get_member(user_id=payload.user_id)
if reaction is not None:
await message.pin()
await channel.send(embed=discord.Embed(
title='📌 Pin',
description=f'Congratulations {message.author.mention}, '
f'your message has been pinned after receiving a {reaction.emoji} from {reactor.mention}!',
color=zoinks.bot.color
))
@commands.command(hidden=True)
@commands.has_role(name='Guest')
@commands.check(lambda ctx:
ctx.guild and
ctx.guild.id == COOLSVILLE_GUILD_ID and
ctx.channel.id == COOLSVILLE_RULES_CHANNEL_ID)
async def verify(self, ctx):
"""Grants basic access to guests."""
await ctx.author.remove_roles(discord.Object(id=COOLSVILLE_GUEST_ROLE_ID))
await ctx.author.send(embed=discord.Embed(
title='✅ Verified', description=f'You\'ve been verified in {ctx.guild}!', color=zoinks.bot.color))
await ctx.message.delete()
logger.info(f'{ctx.author} verified in {ctx.guild}')
def setup(bot):
bot.add_cog(Coolsville(bot))
| 2.828125
| 3
|
source/slask/denavit_hartenberg_symbolic.py
|
johnsjob/master-thesis
| 0
|
12783026
|
from __future__ import division
#--------------------------#
import sys
#--------------------------#
import sympy as s
from sympy import cos, sin, pi
from sympy.matrices import Matrix as mat
#=====================================================#
from helperfunctions import matmul_series
sys.path.append("../int/misc-tools/")
import parsingtools as parse
#--------------------------------------------------------------------#
def diff_mat(M,param):
diff = lambda y: s.diff(y,param)
sh = M.shape
return mat(map(diff, M)).reshape(*sh)
#--------------------------------------------------------------------#
def matmul_series( *args ):
return reduce(s.Matrix.multiply, args)
#--------------------------------------------------------------------#
def _spin_tensor_diff_mat_ang(R,ang):
return matmul_series(diff_mat(mat_rot_z(ang),ang), mat_rot_z(ang).T)
#--------------------------------------------------------------------#
def spin_tensor(R,ang):
dang_dt = s.sympify('d'+str(ang)+'/dt')
return _spin_tensor_diff_mat_ang(R,ang) * dang_dt
#--------------------------------------------------------------------#
def mat_trans_x( tx ):
return mat([[1, 0, 0, tx],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
#--------------------------------------------------------------------#
def mat_trans_z( tz ):
return mat([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, tz],
[0, 0, 0, 1]])
#--------------------------------------------------------------------#
def mat_rot_x( ang ):
#convert to radians
c = cos(ang)
s = sin(ang)
return mat([[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]])
#--------------------------------------------------------------------#
def mat_rot_z( ang ):
#convert to radians
c = cos(ang)
s = sin(ang)
return mat([[c, -s, 0, 0],
[s, c, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
#--------------------------------------------------------------------#
def transform_to_next(A, alpha, D, theta):
Rz_J = mat_rot_z(theta)
Tz_J = mat_trans_z(D)
Tx_I = mat_trans_x(A)
Rx_I = mat_rot_x(alpha)
return matmul_series(Rz_J, Tz_J, Tx_I, Rx_I)
#--------------------------------------------------------------------#
def DH_params( *params ):
nbr_of_sections = int(len(params) / 4)
if len(params) == 1 and type(params[0]) in [list, tuple]:
raise ArithmeticError("Function does not use lists or tuples, please unpack using *.")
elif not (len(params) % 4 == 0):
raise ArithmeticError("Invalid number of Denavit-Hartenberg parameters.")
matrices = []
for k in xrange(0, nbr_of_sections):
A, alpha, D, theta = params[4*k:4*k+4]
matrices.append( transform_to_next(A, alpha, D, theta) )
return matmul_series(*matrices)
#--------------------------------------------------------------------#
def calc_tool_IRB120(a=None,b=None,c=None,d=None,e=None,f=None):
if a is None:
a = s.sympify('a')
if b is None:
b = s.sympify('b')
if c is None:
c = s.sympify('c')
if d is None:
d = s.sympify('d')
if e is None:
e = s.sympify('e')
if f is None:
f = s.sympify('f')
flange = DH_params(
0, 90,0.290,180+a,
0.270,0,0,90+b,
-0.070, 90, 0, 180+c,
0, 90, 0.302, 180+d,
0, 90, 0, 180+e,
0, 0, 0.072, 0+f
)
return flange
#--------------------------------------------------------------------#
def custom_round(v, prec = 1e-4):
coef = 1 / prec
return n.round(v * coef) / coef
#--------------------------------------------------------------------#
if __name__ == '__main__':
a = s.sympify('a')
| 2.109375
| 2
|
learning_object/collection/manager/modify_one.py
|
dsvalenciah/ROAp
| 4
|
12783027
|
<gh_stars>1-10
"""
Contains utility functions to works with learning-object modify.
"""
from datetime import datetime
from manager.exceptions.learning_object import (
LearningObjectNotFoundError, LearningObjectSchemaError,
LearningObjectMetadataSchemaError
)
import re
from marshmallowjson.marshmallowjson import Definition
from manager.exceptions.user import UserPermissionError
from manager.schemas.learning_object import LearningObject
from manager.utils.i18n_error import ErrorTranslator
def check_user_permission(user, learning_object):
learning_object_creator_id = learning_object.get('creator_id')
user_id = user.get('_id')
user_role = user.get('role')
_ = user.get('language')
if user_role != 'administrator':
if user_id != learning_object_creator_id:
raise UserPermissionError(
_('User is not own of this learning object.')
)
def get_lom_schema(db_client, lom_schema_id):
return db_client.lom_schema.find_one({'_id': lom_schema_id}).get('lom')
def modify_one(db_client, old_learning_object_id, new_learning_object, user):
"""Modify learning object."""
old_learning_object = db_client.learning_objects.find_one({
'_id': old_learning_object_id
})
_ = user.get('language')
if not old_learning_object:
raise LearningObjectNotFoundError(_('Learning Object _id not found.'))
check_user_permission(user, old_learning_object)
old_lom_schema = get_lom_schema(
db_client,
old_learning_object.get('lom_schema_id')
)
LearningObjectMetadata = Definition(old_lom_schema).top()
new_learning_object, errors = LearningObject(
exclude=['_id', 'creator_id', 'lom_schema_id']
).dump(new_learning_object)
if errors:
raise LearningObjectSchemaError(errors)
new_learning_object_metadata = new_learning_object.get('metadata')
errors = LearningObjectMetadata.validate(
new_learning_object_metadata
)
if errors:
print(LearningObjectMetadata.fields['annotation'].schema.fields)
errors_translator = ErrorTranslator(_)
raise LearningObjectMetadataSchemaError(errors_translator.i18n_error(errors))
new_learning_object.update({
'modified': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
})
db_client.learning_objects.update_one(
{'_id': old_learning_object.get('_id')},
{'$set': new_learning_object}
)
| 2.109375
| 2
|
packs/reamaze/actions/article_create.py
|
userlocalhost2000/st2contrib
| 164
|
12783028
|
<gh_stars>100-1000
from lib.actions import BaseAction
class ArticleCreate(BaseAction):
def run(self, title, body, topic=None, status=0):
if topic:
topic = self._convert_slug(topic)
path = '/topics/%s/articles' % topic
else:
path = '/articles'
payload = self._create_article(title=title, body=body, status=status)
response = self._api_post(path, json=payload)
return response
def _create_article(self, title, body, status=0):
payload = {
'article': {
'title': title,
'body': body,
'status': int(status)
}
}
return payload
| 2.515625
| 3
|
day-08/part-1/silvestre.py
|
badouralix/adventofcode-2018
| 31
|
12783029
|
from tool.runners.python import SubmissionPy
class SilvestreSubmission(SubmissionPy):
def run(self, s):
arr = list(map(int, s.splitlines()[0].split()))
def sum_meta(arr, i_node):
n_child = arr[i_node]
n_meta = arr[i_node + 1]
i_next = i_node + 2
ret = 0
for _ in range(n_child):
i_next, tmp = sum_meta(arr, i_next)
ret += tmp
return i_next + n_meta, ret + sum(arr[i_next:i_next+n_meta])
return sum_meta(arr, 0)[1]
| 2.453125
| 2
|
ABC/181/C.py
|
yu9824/AtCoder
| 0
|
12783030
|
# list(map(int, input().split()))
# int(input())
import sys
sys.setrecursionlimit(10 ** 9)
from itertools import combinations
def main(*args):
N, XY = args
def difference(p1, p2):
return p2[0] - p1[0], p2[1] - p1[1]
for c in combinations(XY, r = 3):
p1, p2, p3 = c # p = (x, y)
diff12 = difference(p1, p2)
diff13 = difference(p1, p3)
if diff12[1] * diff13[0] == diff13[1] * diff12[0]:
print('Yes')
break
else:
print('No')
if __name__ == '__main__':
N = int(input())
args = [N]
args.append({tuple(map(int, input().split())) for n in range(N)})
main(*args)
| 2.671875
| 3
|
Preprocessor.py
|
beckylum0216/MurdochNet_Yale_tf
| 0
|
12783031
|
<reponame>beckylum0216/MurdochNet_Yale_tf
import cv2
import numpy as np
class ProcessImage(object):
def __init__(self, imgWidth, imgHeight, rawImage):
self.a = 0
self.b = 0
self.c = self.a + imgWidth
self.d = self.b + imgHeight
self.width = imgWidth
self.height = imgHeight
self.image = rawImage
def DetectFace(self):
face_cascade = cv2.CascadeClassifier("./haar_cascade/haarcascade_frontalface_default.xml")
self.faces = face_cascade.detectMultiScale(self.image, 1.05, 3)
for x, y, w, h in self.faces:
cv2.rectangle(self.image, (x, y), (x + w, y + h), (0, 255, 255), 2)
print("x: ", x, " y: ", y, " w: ", w, " h: ",h)
#cv2.imshow("detected face", self.image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
return self.faces
def CropImage(self):
self.croppedImg = np.empty([self.faces[0][2], self.faces[0][3]], np.uint8)
self.width = self.faces[0][2]
self.height = self.faces[0][3]
for ii in range(self.faces[0][2]):
for jj in range(self.faces[0][3]):
self.croppedImg[ii][jj] = self.image[ii + self.faces[0][1]][jj + self.faces[0][0]]
#print("cropped image: ", croppedImg[ii][jj])
#cv2.imshow("cropped image", self.croppedImg)
return self.croppedImg
def ScaleImage(self, targetImage, targetWidth, targetHeight):
dimension = (targetWidth, targetHeight)
resizedImg = cv2.resize(targetImage, dimension, interpolation=cv2.INTER_AREA)
# cv2.imshow("resized img", resizedImg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return resizedImg
def ApplyGaborFilter(self, targetImage):
gabor_filter = cv2.getGaborKernel((self.width, self.height), 2.0, np.pi/8, 20.0, 0.5, 0, ktype=cv2.CV_32F)
self.gaborImg = cv2.filter2D(targetImage, cv2.CV_8UC3, gabor_filter)
#cv2.imshow("cropped gabor: ", self.gaborImg)
return self.gaborImg
| 2.90625
| 3
|
setup.py
|
VJftw/invoke-tools
| 2
|
12783032
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from os import path
from subprocess import check_output
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
version = check_output('git describe --abbrev=0'.split(' ')).decode(
'utf-8').strip()
setup(
name='invoke-tools',
version=version,
description='A set of tools to use the Invoke task runner easier in a work-flow.',
url='https://github.com/VJftw/invoke-tools',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
zip_safe=False,
packages=find_packages(),
install_requires=['docker', 'invoke', 'psutil', 'py-cpuinfo', 'gitpython', 'requests'],
extras_require={
'test': ['nose', 'coverage', 'rednose']
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='invoke tools'
)
| 1.695313
| 2
|
transformer_anatomy/downstream_multi_head_exp.py
|
heartcored98/Trasnformer_Anatomy
| 16
|
12783033
|
import sys
import time
import pandas as pd
from os import listdir
from os.path import isfile, join
import json
from .tasks import *
PATH_BERT = '/home/users/whwodud98/pytorch-pretrained-BERT'
sys.path.insert(0, PATH_BERT)
PATH_SENTEVAL = '/home/users/whwodud98/bert/SentEval'
PATH_TO_DATA = '/home/users/whwodud98/bert/SentEval/data/'
PATH_TO_CACHE = '/home/users/whwodud98/bert/cache/'
sys.path.insert(0, PATH_SENTEVAL)
import senteval
def get_results(dir_path='./mlp_results'):
columns = ['data_path', 'cache_path', 'result_path', 'batch_size', 'cbatch_size', 'nhid', 'optim', 'kfold',
'tenacity', 'usepytorch', 'epoch_size', 'device']
filenames = [f for f in listdir(dir_path) if isfile(join(dir_path, f)) if '.json' in f]
list_result = []
for filename in filenames:
with open(join(dir_path, filename), 'r') as infile:
# print(filename)
results = json.load(infile)
for key, result in results.items():
list_result.append(result)
df = pd.DataFrame(list_result)[['acc', 'devacc', 'devpearson', 'pearson', 'head', 'layer', 'task', 'model_name', 'location']]
for column in columns:
try:
df = df.drop(columns=column)
except:
pass
return df
def get_top_heads(model_name, task, metric='devacc', dir_path='./ds_linear_head_wise_results'):
df = get_results(dir_path=dir_path)
df = df.loc[df['model_name'] == model_name]
print(df)
df = df.loc[df['head'] >= 0]
df = df.loc[df['task'] == task] # Choose task
df = df.sort_values(by=[metric], ascending=False)
list_head = []
for index, row in df.iterrows():
list_head.append((row['layer'], row['head']))
return list_head
def save_exp_result(exp_result, task):
del exp_result['model']
exp_key = '{}_{}'.format(exp_result['num_head'], exp_result['location'])
result_name = "{}_{}.json".format(exp_result['model_name'], task)
result_dir = exp_result['result_path']
onlyfiles = [f for f in listdir(result_dir) if isfile(join(result_dir, f))]
if result_name in onlyfiles:
with open(join(result_dir, result_name), 'r') as f:
results = json.load(f)
with open(join(result_dir, result_name), 'w') as f:
results[exp_key] = exp_result
json.dump(results, f)
print("Append exp result at {} with key {}".format(result_name, exp_key))
else:
results = {}
with open(join(result_dir, result_name), 'w') as f:
results[exp_key] = exp_result
json.dump(results, f)
print("Create new exp result at {} with key {}".format(result_name, exp_key))
def prepare(params, _):
task = params['current_task']
model = params['model']
location = params['location']
model.prepare(task, location)
def batcher(params, batch):
model = params['model']
location = params['location']
head_size = params['head_size']
sentences = [' '.join(s) for s in batch]
embedding = model.encode(sentences, params['heads'], head_size, location)
return embedding
def experiment(model, task, args):
ts = time.time()
params = vars(args)
params['model'] = model
params['classifier'] = {'nhid': args.nhid,
'optim': args.optim,
'tenacity': args.tenacity,
'epoch_size': args.epoch_size,
'dropout': args.dropout,
'batch_size': args.cbatch_size}
params['heads'] = get_top_heads(args.model_name, task)[:args.num_head] # select first top n-heads
se = senteval.engine.SE(params, batcher, prepare)
result = se.eval([task])
if task in ['SICKRelatedness']:
params['devpearson'] = result[task]['devpearson']
params['pearson'] = result[task]['pearson']
elif task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark']:
params['pearson'] = result[task]['all']['pearson']['mean']
else:
params['devacc'] = result[task]['devacc']
params['acc'] = result[task]['acc']
model.save_cache(task, args.location)
te = time.time()
print("result: {}, took: {:3.1f} sec".format(result, te - ts))
return params
| 2.0625
| 2
|
blog/urls.py
|
shinnlove/datacheck
| 1
|
12783034
|
<filename>blog/urls.py<gh_stars>1-10
from django.urls import path,include
import blog.views
from django.conf.urls import url
urlpatterns=[
path('hello_world',blog.views.hello_world),
path('content',blog.views.article_content),
url(r'^index/', blog.views.index),
url(r'^index2/', blog.views.index2),
#url(r'all',blog.all,name='all'),
]
| 1.898438
| 2
|
apps/core/models/photos.py
|
CosmosTUe/Cosmos
| 1
|
12783035
|
<gh_stars>1-10
from django.db import models
from django.urls import reverse
class PhotoAlbum(models.Model):
title = models.CharField(max_length=255)
date = models.DateField()
album_cover = models.ImageField(upload_to="photos")
def get_absolute_url(self):
return reverse("cosmos_core:photo_album-list")
def __str__(self):
return "PhotoAlbum: {" + self.title + "}"
class PhotoObject(models.Model):
photo = models.ImageField(upload_to="photos")
album = models.ForeignKey(PhotoAlbum, on_delete=models.CASCADE, related_name="has_photos")
def __str__(self):
return "PhotoObject: {" + str(self.photo) + "}"
| 2.21875
| 2
|
test_Reader.py
|
mgood13/bme590hrm
| 0
|
12783036
|
def test_csvfinder():
"""Tests that the finder finds all the correct files
:returns test_files: List of .csv files
"""
from Reader import csvfinder
test_files = csvfinder()
assert test_files.count('poorform') == 0
assert test_files.count('wrongend') == 0
assert test_files.count('false.csv') == 1
assert test_files.count('mess.csv') == 1
assert test_files.count('poorform.csv') == 1
assert test_files.count('tab.csv') == 1
assert test_files.count('test_data1.csv') == 1
assert test_files.count('words.csv') == 1
assert test_files.count('test1.csv') == 1
assert test_files.count('test2.csv') == 1
return test_files
def test_csvchecker():
"""Tests that the check function discards and keeps correct files
:returns check_files: Dictionary of valid csv files
"""
from Reader import csvchecker
test_files = test_csvfinder()
check_files = csvchecker(test_files)
assert 'false.csv' not in check_files
assert 'mess.csv' not in check_files
assert 'poorform.csv' not in check_files
assert 'tab.csv' not in check_files
assert 'false.csv' not in check_files
assert 'test2.csv' not in check_files
assert 'test1.csv' in check_files
assert 'test_data1.csv' in check_files
assert 'words.csv' in check_files
return check_files
def test_floatcheck():
"""Tests to ensure that certain files are removed from the csv file list
"""
from Reader import floatcheck
check_files = test_csvchecker()
float_files = floatcheck(check_files)
assert 'words.csv' not in float_files
assert 'test1.csv' not in float_files
assert 'test_data1.csv' in float_files
| 3.625
| 4
|
scripts/validate_schemas.py
|
slebras/sample_service_validator_config
| 0
|
12783037
|
import sys
import yaml
from jsonschema import validate
if len(sys.argv) != 3:
raise RuntimeError(f'Please provide validation file and json schema file as arguments to validate_schemas.py')
merged_file = sys.argv[1]
json_schema_file = sys.argv[2]
with open(json_schema_file) as f:
_META_VAL_JSONSCHEMA = yaml.safe_load(f)
# _META_VAL_JSONSCHEMA = {
# 'type': 'object',
# 'definitions': {
# 'validator_set': {
# 'type': 'object',
# # validate values only
# 'additionalProperties': {
# 'type': 'object',
# 'properties': {
# 'key_metadata': {
# 'type': 'object',
# 'additionalProperties': {
# 'type': ['number', 'boolean', 'string', 'null']
# }
# },
# 'validators': {
# 'type': 'array',
# 'items': {
# 'type': 'object',
# 'properties': {
# 'module': {'type': 'string'},
# 'callable_builder': {'type': 'string'},
# 'parameters': {'type': 'object'}
# },
# 'additionalProperties': False,
# 'required': ['module', 'callable_builder']
# }
# }
# },
# 'required': ['validators']
# }
# },
# 'additionalProperties': False,
# },
# 'properties': {
# 'validators': {'$ref': '#/definitions/validator_set'},
# 'prefix_validators': {'$ref': '#/definitions/validator_set'},
# },
# 'additionalProperties': False
# }
files = [
"validation_files/ENIGMA-noops.yml",
"validation_files/SESAR-noops.yml",
"validation_files/SESAR.yml",
"validation_files/ENIGMA.yml",
"validation_files/miscellaneous.yml",
merged_file,
"metadata_validation.yml"
]
for file in files:
with open(file) as f:
cfg = yaml.safe_load(f)
validate(instance=cfg, schema=_META_VAL_JSONSCHEMA)
| 2.328125
| 2
|
setup.py
|
tudou0002/NEAT
| 1
|
12783038
|
<filename>setup.py
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt', 'r') as f:
install_requires = list()
for line in f:
re = line.strip()
if re:
install_requires.append(re)
setuptools.setup(
name="nameextractor",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="female name extractor",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
),
install_requires=install_requires
)
| 1.953125
| 2
|
util/util.py
|
LSnyd/MedMeshCNN
| 15
|
12783039
|
from __future__ import print_function
import torch
import numpy as np
import os
# from torch_scatter import scatter_add
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
MESH_EXTENSIONS = [
'.obj',
]
def is_mesh_file(filename):
return any(filename.endswith(extension) for extension in MESH_EXTENSIONS)
def pad(input_arr, target_length, val=0, dim=1):
shp = input_arr.shape
npad = [(0, 0) for _ in range(len(shp))]
npad[dim] = (0, target_length - shp[dim])
return np.pad(input_arr, pad_width=npad, mode='constant', constant_values=val)
def seg_accuracy(predicted, ssegs, meshes):
correct = 0
ssegs = ssegs.squeeze(-1)
correct_mat = ssegs.gather(2, predicted.cpu().unsqueeze(dim=2))
for mesh_id, mesh in enumerate(meshes):
correct_vec = correct_mat[mesh_id, :mesh.edges_count, 0]
edge_areas = torch.from_numpy(mesh.get_edge_areas())
correct += (correct_vec.float() * edge_areas).sum()
return correct
def intersection_over_union(preds, target, num_classes):
preds, target = torch.nn.functional.one_hot(preds, num_classes), torch.nn.functional.one_hot(target, num_classes)
iou = torch.zeros(num_classes, dtype=torch.float32)
for idx, pred in enumerate(preds):
i = (pred & target[idx]).sum(dim=0)
u = (pred | target[idx]).sum(dim=0)
iou = iou.add(i.cpu().to(torch.float) / u.cpu().to(torch.float))
return iou
def mean_iou_calc(pred, target, num_classes):
#Removal of padded labels marked with -1
slimpred = []
slimtarget = []
for batch in range(pred.shape[0]):
if (target[batch] == -1).any():
slimLabels = target[batch][target[batch]!=-1]
slimtarget.append(slimLabels)
slimpred.append(pred[batch][:slimLabels.size()[0]])
pred = torch.stack(slimpred,0)
target = torch.stack(slimtarget, 0)
iou = intersection_over_union(pred, target, num_classes)
mean_iou = iou.mean(dim=-1)
return mean_iou, iou
def print_network(net):
"""Print the total number of parameters in the network
Parameters:
network
"""
print('---------- Network initialized -------------')
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('[Network] Total number of parameters : %.3f M' % (num_params / 1e6))
print('-----------------------------------------------')
def get_heatmap_color(value, minimum=0, maximum=1):
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value - minimum) / (maximum - minimum)
b = int(max(0, 255 * (1 - ratio)))
r = int(max(0, 255 * (ratio - 1)))
g = 255 - b - r
return r, g, b
def normalize_np_array(np_array):
min_value = np.min(np_array)
max_value = np.max(np_array)
return (np_array - min_value) / (max_value - min_value)
def calculate_entropy(np_array):
entropy = 0
np_array /= np.sum(np_array)
for a in np_array:
if a != 0:
entropy -= a * np.log(a)
entropy /= np.log(np_array.shape[0])
return entropy
def pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
def myindexrowselect(groups, mask_index, device):
sparseIndices = groups._indices()
newIndices = []
for i, value in enumerate(mask_index):
#Get index from relevant indices
index = (sparseIndices[0] == value).nonzero()
#Get rows by index
sparseRow = [sparseIndices[:, value] for value in index]
sparseRow = torch.cat(sparseRow,1)[1]
singleRowIndices = torch.squeeze(torch.full((1,len(sparseRow)),i, dtype=torch.long),0).to(sparseRow.device)
indices = torch.stack((singleRowIndices,sparseRow))
newIndices.append(indices)
allNewIndices = torch.cat(newIndices,1)
#Create new tensor
groups = torch.sparse_coo_tensor(indices=allNewIndices,
values=torch.ones(allNewIndices.shape[1], dtype=torch.float),
size=(len(mask_index), groups.shape[1]))
return groups
| 2.203125
| 2
|
kitsune/questions/urls_api.py
|
AndrewDVXI/kitsune
| 929
|
12783040
|
from rest_framework import routers
from kitsune.questions.api import QuestionViewSet, AnswerViewSet
router = routers.SimpleRouter()
router.register(r"question", QuestionViewSet)
router.register(r"answer", AnswerViewSet)
urlpatterns = router.urls
| 1.632813
| 2
|
tenable.py
|
rohit-k-das/vulnerability-management-reporter
| 0
|
12783041
|
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import datetime
import logging
import time
import concurrent.futures
from dataclasses import dataclass, field
from typing import List, Tuple, Dict
import re
import netaddr
import ConfigParser
import os
logger = logging.getLogger(__name__)
MAX_THREADS = 14 # Get max number of threads for multi-threading
# Read credentials for tenable
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),'settings.ini'))
tenable_client_id = Config.get('Settings', 'Tenable_Client_Id')
tenable_secret_id = Config.get('Settings', 'Tenable_Secret_Id')
tenable_gcp_tag = Config.get('Settings', 'Tenable_GCP_tag')
tenable_workstations_tag = Config.get('Settings', 'Tenable_Workstation_tag')
tenable_api = "https://cloud.tenable.com"
# Generate session with max of 3 retries and interval of 1 second
def session_generator() -> requests.sessions.Session:
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
@dataclass # A class to contain all the necessary fields to create report
class TenableVulnerability:
plugin_name: str
resolution: str
additional_links: List[str]
ip: str
dns: str
os: str
cves: List[str]
plugin_family: str
exploit_available: bool
cvss_score: float
temporal_score: float
vpr_score: float
zone: str = field(init=False, default='') # Critical/Non-critical Asset
nessus_criticiality: str = field(init=False, default='')
vulnerability_type: str = field(init=False, default='')
actual_criticality: str = field(init=False, default='')
host_risk: str = field(init=False, default='')
platform: str = field(init=False, default='')
def __post_init__(self):
self.get_type()
self.nessus_criticiality_insight()
self.platform_based_on_os()
self.modify_solution()
def get_type(self):
self.vulnerability_type = 'config'
if self.plugin_family.lower() == 'Windows : Microsoft Bulletins'.lower():
self.vulnerability_type = 'package'
elif 'update ' in self.resolution.lower() or 'Update Set' in self.plugin_name or 'upgrade ' in self.resolution.lower() or 'MS KB' in self.plugin_name:
self.vulnerability_type = 'package'
elif 'Apply the client registry key workaround and the server registry key workaround suggested by Microsoft in the advisory.' == self.resolution:
self.vulnerability_type = 'config'
elif re.search('.* KB\d{3,} .*', self.resolution, flags=re.I) or 'patch' in self.resolution:
self.vulnerability_type = 'package'
def platform_based_on_os(self):
if 'windows' in self.os.lower():
self.platform = 'Windows'
elif 'mac' in self.os.lower():
self.platform = 'Mac'
else:
self.platform = 'Linux'
def modify_solution(self):
if self.vulnerability_type == 'package' and self.platform == 'Windows':
if 'Microsoft has released the following security updates to address this issue:' in self.resolution or 'Apply the following security updates ' in self.resolution or 'Apply Service Stack ' in self.resolution or 'Microsoft has released KB' in self.resolution or 'Install Microsoft KB' in self.resolution:
get_security_kb = re.findall(r"KB\d{4,}", self.resolution, flags=re.IGNORECASE)
if not get_security_kb:
get_security_kb = re.findall(r"\d{4,}", self.resolution, flags=re.IGNORECASE)
get_security_kb = ["KB%s" % security_kb for security_kb in get_security_kb]
if get_security_kb:
self.resolution = ','.join(get_security_kb).replace("'", '').replace('"', '').replace(' ', '')
elif 'Apply '.lower() in self.resolution.lower():
#
get_security_kb = re.findall(r".*Security .* (KB\d{4,}) or Cumulative.*", self.resolution, flags=re.IGNORECASE)
if not get_security_kb:
# Apply security update KB4022715 as well as refer to the KB article for additional information
get_security_kb = re.findall(r".*Security .* (KB\d{4,})", self.resolution, flags=re.IGNORECASE)
if not get_security_kb:
# Apply Cumulative Update KB4056890 or KB4057142 as well as || Apply Cumulative Update KB4493509 *
get_security_kb = re.findall(r".*Cumulative .* (KB\d{4,})", self.resolution, flags=re.IGNORECASE)
if get_security_kb:
self.resolution = ','.join(get_security_kb).replace("'", '').replace('"', '').replace(' ', '')
elif 'MS' in self.plugin_name and 'KB' not in self.resolution:
get_security_bulletin_number = re.findall(r"^MS\d{2,}-\d{3,}", self.plugin_name, flags=re.IGNORECASE)
if len(get_security_bulletin_number) == 1:
year = get_security_bulletin_number[0].split('-')[0].replace('MS', '')
link = "https://docs.microsoft.com/en-us/security-updates/SecurityBulletins/20%s/%s" % (year, get_security_bulletin_number[0].lower())
self.resolution = link
elif 'ADV' in self.plugin_name and ' KB' not in self.resolution:
get_ADV = re.findall(r"^ADV\d{4,}", self.plugin_name, flags=re.IGNORECASE)
if len(get_ADV) == 1:
ADV = get_ADV[0].split(':')[0]
link = "https://portal.msrc.microsoft.com/en-US/security-guidance/advisory/%s" % ADV.upper()
self.resolution = link
elif ('Microsoft has released a set of ' in self.resolution or 'Apply the appropriate patches according to the' in self.resolution or 'Microsoft has released security updates for ' in self.resolution or 'Microsoft has released a security update to ' in self.resolution):
self.resolution = self.additional_links[0]
elif self.vulnerability_type == 'package' and self.platform == 'Linux':
"""
Modify Linux Solution that you want in the report as per your Linux box
"""
if 'Update ' in self.resolution:
self.resolution = 'yum update -y '
for cve in self.cves:
self.resolution = '%s --cve %s ' % (self.resolution, cve)
def nessus_criticiality_insight(self):
if self.exploit_available:
if self.vpr_score > 0.0:
if self.vpr_score >= 7:
self.nessus_criticiality = 'High'
elif self.vpr_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
elif self.temporal_score > 0.0:
if self.temporal_score >= 7:
self.nessus_criticiality = 'High'
elif self.temporal_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
elif self.cvss_score > 0.0:
if self.cvss_score >= 7:
self.nessus_criticiality = 'High'
elif self.cvss_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
else:
if self.vpr_score > 0.0:
if self.vpr_score >= 7:
self.nessus_criticiality = 'Medium'
elif self.vpr_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
elif self.temporal_score > 0.0:
if self.temporal_score >= 7:
self.nessus_criticiality = 'Medium'
elif self.temporal_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
elif self.cvss_score > 0.0:
if self.cvss_score >= 7:
self.nessus_criticiality = 'Medium'
elif self.cvss_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
def get_host_risk(self):
"""
Use a combination of host dns (self.dns) and the zone that host is in to define host risk
For example:
haproxy_box = re.search('.*haproxy.*', self.dns, flags=re.IGNORECASE)
web_box = re.search('.*web.*', self.dns, flags=re.IGNORECASE)
app_box = re.search('.*app.*', self.dns, flags=re.IGNORECASE)
proxy_box = re.search('^proxy.*', self.dns, flags=re.IGNORECASE)
if self.zone == 'DMZ':
if self.platform == 'Linux':
if web_box or haproxy_box:
self.host_risk = 'High'
elif app_box:
self.host_risk = 'Medium'
elif self.zone == 'Secure':
if self.platform == 'Linux':
if app_box:
self.host_risk = 'High'
elif proxy_box:
self.host_risk = 'Medium'
"""
def actual_criticality_insight(self):
if self.host_risk == 'High':
if self.nessus_criticiality == 'Low':
self.actual_criticality = 'Medium'
elif self.nessus_criticiality == 'Medium':
self.actual_criticality = 'High'
elif self.nessus_criticiality == 'High':
self.actual_criticality = 'High'
elif self.host_risk == 'Medium':
if self.nessus_criticiality == 'Low':
self.actual_criticality = 'Medium'
elif self.nessus_criticiality == 'Medium':
self.actual_criticality = 'Medium'
elif self.nessus_criticiality == 'High':
self.actual_criticality = 'High'
elif self.host_risk == 'Low':
if self.nessus_criticiality == 'Low':
self.actual_criticality = 'Low'
elif self.nessus_criticiality == 'Medium':
self.actual_criticality = 'Low'
elif self.nessus_criticiality == 'High':
self.actual_criticality = 'Medium'
else:
self.actual_criticality = 'Unknown'
# Initiate download of all vulnerable assets
def initiate_download_vulnerabilities(tag: str) -> str:
logger.info("Initiating download of %s vulnerabilities seen in the last 15 days" % tag)
uuid = None
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
data = {
"num_assets": 1000,
"filters": {
"severity": ["low", "medium", "high", "critical"],
"since": int((datetime.datetime.now() - datetime.timedelta(days=15)).strftime("%s")),
"tag.Source": [tag]
}
}
resp = session.post("%s/vulns/export" % tenable_api, headers=headers, json=data)
response = resp.json()
if resp.ok:
uuid = response['export_uuid']
elif resp.status_code == 429:
logger.warning("Exceed rate limit.")
time.sleep(60)
# TO DO:
# Check header to see if spits out retry.
# print(resp.header)
uuid = initiate_download_vulnerabilities(tag)
else:
logger.error('ERROR %s: %s' % (resp.status_code, resp.text))
logger.error('Unable to make rest call to initiate download all %s vulnerabilities' % tag)
return uuid
# Check if report is ready for download
def check_vulnerabilities_download_status(uuid: str) -> Tuple[str,List[int]]:
logger.info("Checking download status of vulnerabilities for file %s" % uuid)
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
status = None
chunks = []
resp = session.get("%s/vulns/export/%s/status" % (tenable_api, uuid), headers=headers)
if resp.ok:
response = resp.json()
status = response['status']
if status == 'FINISHED':
chunks.extend(response['chunks_available'])
elif resp.status_code == 429:
logger.warning("Exceed rate limit.")
time.sleep(60)
# TO DO:
# Check header to see if spits out retry.
# print(resp.header)
status, chunks = check_vulnerabilities_download_status(uuid)
else:
logger.error('ERROR %s: %s' % (resp.status_code, resp.text))
logger.error('Unable to make rest call to get status of file download %s' % uuid)
return status, chunks
def parse_vulnerabilities(vulnerability: Dict) -> TenableVulnerability:
if 'exploit_available' in vulnerability['plugin'] and vulnerability['plugin']['exploit_available']:
exploit_available = True
else:
exploit_available = False
if 'cvss3_temporal_score' in vulnerability['plugin']:
temporal_score = vulnerability['plugin']['cvss3_temporal_score']
elif 'cvss_temporal_score' in vulnerability['plugin']:
temporal_score = vulnerability['plugin']['cvss_temporal_score']
else:
temporal_score = 0.0
if 'cvss3_base_score' in vulnerability['plugin']:
base_score = vulnerability['plugin']['cvss3_base_score']
elif 'cvss_base_score' in vulnerability['plugin']:
base_score = vulnerability['plugin']['cvss_base_score']
else:
base_score = 0.0
if 'vpr' in vulnerability['plugin']:
if 'score' in vulnerability['plugin']['vpr']:
vpr = vulnerability['plugin']['vpr']['score']
else:
vpr = 0.0
else:
vpr = 0.0
if 'see_also' in vulnerability['plugin']:
additional_links = [vulnerability['plugin']['see_also'][0]]
else:
additional_links = []
if 'cve' in vulnerability['plugin']:
cves = vulnerability['plugin']['cve']
else:
cves = []
vulnobj = TenableVulnerability(
vulnerability['plugin']['name'],
vulnerability['plugin']['solution'].replace('\r', '').replace('\n', ' '),
additional_links,
vulnerability['asset']['ipv4'],
vulnerability['asset']['fqdn'] or vulnerability['asset']['hostname'],
vulnerability['asset']['operating_system'][0] or '',
cves,
vulnerability['plugin']['family'],
exploit_available,
base_score,
temporal_score,
vpr
)
return vulnobj
# Get vulnerability of only those whose agents were last seen in 30 days
def download_vulnerabilities(uuid: str, chunk_id: int, agents: List[str]) -> List[TenableVulnerability]:
vulnerabilities = []
logger.info("Fetching list of vulnerabilities for chunk %d" % chunk_id)
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
resp = session.get("%s/vulns/export/%s/chunks/%d" % (tenable_api, uuid, chunk_id), headers=headers)
if resp.ok:
response = resp.json()
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(parse_vulnerabilities, vulnerability) for vulnerability in response if (agents and 'agent_uuid' in vulnerability['asset'] and vulnerability['asset']['agent_uuid'] in agents) or not agents]
for future in concurrent.futures.as_completed(fs):
vulnerabilities.append(future.result())
elif resp.status_code == 429:
logger.warning("Exceed rate limit.")
time.sleep(60)
# TO DO:
# Check header to see if spits out retry.
# print(resp.header)
vulnerabilities = download_vulnerabilities(uuid, chunk_id, agents)
else:
logger.error('ERROR %s: %s' % (resp.status_code, resp.text))
logger.error('Unable to make rest call to download vulnerabilities for chunk %d' % chunk_id)
return vulnerabilities
# Get any scanner id as all devices agents are associated with all scanners
def get_any_scanner_id() -> int:
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
r = session.get("%s/scanners" % tenable_api, headers=headers)
if r.ok:
response = r.json()
scanner_id = response['scanners'][0]['id']
logger.info("Received Tenable Scanner ID")
return scanner_id
else:
logger.error('Unable to make rest call to get scanner id')
logger.error('ERROR %s: %s' % (r.status_code, r.text))
return 0
# Fetch the groups (id and text) associated with the scanner
def get_agent_groups(scanner_id: int) -> Dict[int, str]:
logger.info("Fetching all agent groups...")
agent_group_ids = {}
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
agent_group_request = session.get("%s/scanners/%d/agent-groups" % (tenable_api, scanner_id), headers=headers)
if agent_group_request.ok:
agent_group_response = agent_group_request.json()
for agent_group in agent_group_response['groups']:
agent_group_ids[agent_group['id']] = agent_group['name']
logger.info("Completed collecting all agent groups")
return agent_group_ids
# Fetches all agents in a particular agent group
def get_agents_in_agent_group(scanner_id: int, group_id: int) -> List[str]:
agents = []
offset = 0
session = session_generator()
logger.info("Getting all agents belonging to group id %d", group_id)
while True:
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
agent_request = session.get(
"%s/scanners/%d/agent-groups/%s?limit=5000&offset=%d" % (tenable_api, scanner_id, group_id, offset),
headers=headers)
if agent_request.ok:
agent_response = agent_request.json()
for agent in agent_response['agents']:
if 'last_scanned' in agent and agent['last_scanned'] and agent['last_scanned'] >= int((datetime.datetime.now() - datetime.timedelta(days=30)).strftime("%s")):
agents.append(agent['uuid'].replace('-', ''))
# Tackle pagination
if agent_response['pagination']['total'] - offset <= 5000:
break
else:
offset = offset + 5000
else:
logger.error('Error %d:%s', agent_request.status_code, agent_request.text)
return agents
# Fetch all gcp agents
def get_gcp_agents(scanner_id: int) -> List[str]:
'''
Fetch Agents from Groups for GCP based on Agent Group Name
'''
agents = []
logger.info("Getting all gcp servers")
agent_group_ids = get_agent_groups(scanner_id)
if agent_group_ids:
# Map based on the value to the group id and fetch agents accordingly
for group_id in agent_group_ids:
if 'GCP' in agent_group_ids[group_id]:
agents.extend(get_agents_in_agent_group(scanner_id, group_id))
else:
pass
agents = list(set(agents))
logger.info('Found %d gcp agents' % len(agents))
return agents
# Fetch all workstation agents
def get_workstation_agents(scanner_id:int) -> List[str]:
'''
Fetch Agents from Groups for Workstations based on Agent Group Name
'''
agents = []
logger.info("Getting all workstation agents")
agent_group_ids = get_agent_groups(scanner_id)
if agent_group_ids:
# Map based on the value to the group id and fetch agents accordingly
for group_id in agent_group_ids:
if 'Workstations' in agent_group_ids[group_id]:
agents.extend(get_agents_in_agent_group(scanner_id, group_id))
else:
pass
agents = list(set(agents))
logger.info('Found %d workstation agents' % len(agents))
return agents
def get_rerouted_url(link):
link_dict = {}
resp = requests.get(link)
link_dict[link] = resp.url
return link_dict
def mapping_security_zone(iplist: List[str]) -> Dict[str, str]:
"""
Fake list of ranges for GCP
"""
dmz_range = list(netaddr.IPNetwork('192.168.0.0/24'))
secure_range = list(netaddr.IPNetwork('192.168.2.0/22'))
ip_zone = {}
for ip in iplist:
if netaddr.IPAddress(ip) in dmz_range:
ip_zone[ip] = 'DMZ'
elif netaddr.IPAddress(ip) in secure_range:
ip_zone[ip] = 'Secure'
return ip_zone
def fetch_gcp_vulnerabilities() -> List[TenableVulnerability]:
agents = []
vulnerabilities = []
uuid = initiate_download_vulnerabilities(tenable_gcp_tag)
if uuid is not None:
status, chunks = check_vulnerabilities_download_status(uuid)
while status != 'FINISHED':
time.sleep(10)
status, chunks = check_vulnerabilities_download_status(uuid)
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(download_vulnerabilities, uuid, chunk_id, agents) for chunk_id in chunks]
for future in concurrent.futures.as_completed(fs):
if future.result() is not None:
vulnerabilities.extend(future.result())
logger.info('Mapping info links to rerouted link')
links = []
for vulnerability in vulnerabilities:
links.extend(vulnerability.additional_links)
links = list(set(links))
map_link_to_its_rerouted_url = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=1000) as executor:
for link_dict in executor.map(get_rerouted_url, links):
map_link_to_its_rerouted_url.update(link_dict)
for vulnerability in vulnerabilities:
temp_links_holder = []
for link in vulnerability.additional_links:
if link in map_link_to_its_rerouted_url:
temp_links_holder.append(map_link_to_its_rerouted_url[link])
vulnerability.additional_links = temp_links_holder
# Map zones to IPs
vulnerable_ips = list(set(vulnerability.ip for vulnerability in vulnerabilities))
logging.info('Mapping security zone to %d IPs' % len(vulnerable_ips))
ip_zone_mapping = mapping_security_zone(vulnerable_ips)
for vulnerability in vulnerabilities:
if vulnerability.ip in ip_zone_mapping:
vulnerability.zone = ip_zone_mapping[vulnerability.ip]
logger.info("Found %d IPs that are not tagged to zones" % len(set([vulnerability.ip for vulnerability in vulnerabilities if not vulnerability.zone])))
for vulnerability in vulnerabilities:
vulnerability.get_host_risk()
logging.info('Getting actual criticality of the vulnerabilities')
for vulnerability in vulnerabilities:
vulnerability.actual_criticality_insight()
logger.info('Found %d vulnerabilities for GCP' % len(vulnerabilities))
return vulnerabilities
def fetch_workstation_vulnerabilities() -> List[TenableVulnerability]:
vulnerabilities = []
scanner_id = get_any_scanner_id()
if scanner_id > 0:
agents = get_workstation_agents(scanner_id)
uuid = initiate_download_vulnerabilities(tenable_workstations_tag)
if uuid is not None and agents:
status, chunks = check_vulnerabilities_download_status(uuid)
while status != 'FINISHED':
time.sleep(10)
status, chunks = check_vulnerabilities_download_status(uuid)
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(download_vulnerabilities, uuid, chunk_id, agents) for chunk_id in chunks]
for future in concurrent.futures.as_completed(fs):
if future.result():
vulnerabilities.extend(future.result())
logger.info('Mapping info links to rerouted link')
links = []
for vulnerability in vulnerabilities:
links.extend(vulnerability.additional_links)
links = list(set(links))
map_link_to_its_rerouted_url = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=1000) as executor:
for link_dict in executor.map(get_rerouted_url, links):
map_link_to_its_rerouted_url.update(link_dict)
for vulnerability in vulnerabilities:
temp_links_holder = []
for link in vulnerability.additional_links:
if link in map_link_to_its_rerouted_url:
temp_links_holder.append(map_link_to_its_rerouted_url[link])
vulnerability.additional_links = temp_links_holder
for vulnerability in vulnerabilities:
vulnerability.actual_criticality_insight()
logger.info('Found %d vulnerabilities for workstations' % len(vulnerabilities))
return vulnerabilities
| 2.25
| 2
|
migrations/versions/26dbeb68791e_.py
|
gesiscss/binder_gallery
| 4
|
12783042
|
<reponame>gesiscss/binder_gallery
"""empty message
Revision ID: 2<PASSWORD>
Revises: <PASSWORD>
Create Date: 2019-08-05 11:09:30.843900
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '2<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('repo', sa.Column('last_ref', sa.String(), server_default='master', nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('repo', 'last_ref')
# ### end Alembic commands ###
| 1.195313
| 1
|
tests/test_modify_group.py
|
bloodes/adressbook
| 0
|
12783043
|
from models.model_group import Group
import random
def test_modify_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create_new_group(Group(group_name='a', group_header='b', group_footer='c'))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group = Group(group_name='k', group_header='b', group_footer='y')
new_group.group_id = group.group_id
app.group.modify_group_by_id(new_group, group.group_id)
new_groups = db.get_group_list()
old_groups.remove(group)
old_groups.append(new_group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| 2.4375
| 2
|
main4.py
|
tamurata/E04a-Sprites
| 0
|
12783044
|
#!/usr/bin/env python3
import utils, os, random, time, open_color, arcade
utils.check_version((3,7))
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprites Example"
class MyGame(arcade.Window):
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
arcade.set_background_color(open_color.white)
self.animal_list = arcade.SpriteList()
def setup(self):
self.animal_sprite = arcade.Sprite("assets/Side/detail_crystal.png", 2.0)
self.animal_sprite.center_x = 400
self.animal_sprite.center_y = 300
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/detail_dirt.png", 2.0)
self.animal_sprite.center_x = 500
self.animal_sprite.center_y = 300
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/detail_tree.png", 2.0)
self.animal_sprite.center_x = 450
self.animal_sprite.center_y = 150
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/detail_rocks.png", 2.0)
self.animal_sprite.center_x = 200
self.animal_sprite.center_y = 100
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/snow_tile_bump.png", 2.0)
self.animal_sprite.center_x = 650
self.animal_sprite.center_y = 50
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/snow_tile_hill.png", 2.0)
self.animal_sprite.center_x = 600
self.animal_sprite.center_y = 400
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/snow_tile_riverFall.png", 2.0)
self.animal_sprite.center_x = 450
self.animal_sprite.center_y = 100
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/snow_tile_spawn.png", 2.0)
self.animal_sprite.center_x = 350
self.animal_sprite.center_y = 350
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/towerRound_base.png", 2.0)
self.animal_sprite.center_x = 680
self.animal_sprite.center_y = 133
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/towerRound_crystals.png", 2.0)
self.animal_sprite.center_x = 250
self.animal_sprite.center_y = 550
self.animal_list.append(self.animal_sprite)
def on_draw(self):
arcade.start_render()
self.animal_list.draw()
def update(self, delta_time):
pass
def on_mouse_motion(self, x, y, dx, dy):
self.animal_sprite.center_x = x
self.animal_sprite.center_y = y
def main():
""" Main method """
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 2.9375
| 3
|
train.py
|
dogydev/COVID-Efficientnet-Pytorch
| 5
|
12783045
|
import logging
import os
import numpy as np
from sklearn.metrics import classification_report
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.nn import CrossEntropyLoss
from data.dataset import COVIDxFolder
from data import transforms
from torch.utils.data import DataLoader
from model import architecture
import util
import config
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def save_model(model, config):
if isinstance(model, torch.nn.DataParallel):
# Save without the DataParallel module
model_dict = model.module.state_dict()
else:
model_dict = model.state_dict()
state = {
"state_dict": model_dict,
"global_step": config['global_step'],
"clf_report": config['clf_report']
}
f1_macro = config['clf_report']['macro avg']['f1-score'] * 100
name = "{}_F1_{:.2f}_step_{}.pth".format(config['name'],
f1_macro,
config['global_step'])
model_path = os.path.join(config['save_dir'], name)
torch.save(state, model_path)
log.info("Saved model to {}".format(model_path))
def validate(data_loader, model, best_score, global_step, cfg):
model.eval()
gts, predictions = [], []
log.info("Validation started...")
for data in data_loader:
imgs, labels = data
imgs = util.to_device(imgs, gpu=cfg.gpu)
with torch.no_grad():
logits = model(imgs)
probs = model.module.probability(logits)
preds = torch.argmax(probs, dim=1).cpu().numpy()
labels = labels.cpu().detach().numpy()
predictions.extend(preds)
gts.extend(labels)
predictions = np.array(predictions, dtype=np.int32)
gts = np.array(gts, dtype=np.int32)
acc, f1, prec, rec = util.clf_metrics(predictions=predictions,
targets=gts,
average="macro")
report = classification_report(gts, predictions, output_dict=True)
log.info("VALIDATION | Accuracy {:.4f} | F1 {:.4f} | Precision {:.4f} | "
"Recall {:.4f}".format(acc, f1, prec, rec))
if acc > best_score:
save_config = {
'name': config.name,
'save_dir': config.ckpts_dir,
'global_step': global_step,
'clf_report': report
}
save_model(model=model, config=save_config)
best_score = acc
log.info("Validation end")
model.train()
return best_score
def main():
if config.gpu and not torch.cuda.is_available():
raise ValueError("GPU not supported or enabled on this system.")
use_gpu = config.gpu
log.info("Loading train dataset")
train_dataset = COVIDxFolder(config.train_imgs, config.train_labels,
transforms.train_transforms(config.width,
config.height))
train_loader = DataLoader(train_dataset,
batch_size=config.batch_size,
shuffle=True,
drop_last=True,
num_workers=config.n_threads,
pin_memory=use_gpu)
log.info("Number of training examples {}".format(len(train_dataset)))
log.info("Loading val dataset")
val_dataset = COVIDxFolder(config.val_imgs, config.val_labels,
transforms.val_transforms(config.width,
config.height))
val_loader = DataLoader(val_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.n_threads,
pin_memory=use_gpu)
log.info("Number of validation examples {}".format(len(val_dataset)))
if config.weights:
#state = torch.load(config.weights)
state = None
log.info("Loaded model weights from: {}".format(config.weights))
else:
state = None
state_dict = state["state_dict"] if state else None
model = architecture.COVIDNext50(n_classes=config.n_classes)
if state_dict:
model = util.load_model_weights(model=model, state_dict=state_dict)
if use_gpu:
model.cuda()
model = torch.nn.DataParallel(model)
optim_layers = filter(lambda p: p.requires_grad, model.parameters())
# optimizer and lr scheduler
optimizer = Adam(optim_layers,
lr=config.lr,
weight_decay=config.weight_decay)
scheduler = ReduceLROnPlateau(optimizer=optimizer,
factor=config.lr_reduce_factor,
patience=config.lr_reduce_patience,
mode='max',
min_lr=1e-7)
# Load the last global_step from the checkpoint if existing
global_step = 0 if state is None else state['global_step'] + 1
class_weights = util.to_device(torch.FloatTensor(config.loss_weights),
gpu=use_gpu)
loss_fn = CrossEntropyLoss()
# Reset the best metric score
best_score = -1
for epoch in range(config.epochs):
log.info("Started epoch {}/{}".format(epoch + 1,
config.epochs))
for data in train_loader:
imgs, labels = data
imgs = util.to_device(imgs, gpu=use_gpu)
labels = util.to_device(labels, gpu=use_gpu)
logits = model(imgs)
loss = loss_fn(logits, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if global_step % config.log_steps == 0 and global_step > 0:
probs = model.module.probability(logits)
preds = torch.argmax(probs, dim=1).detach().cpu().numpy()
labels = labels.cpu().detach().numpy()
acc, f1, _, _ = util.clf_metrics(preds, labels)
lr = util.get_learning_rate(optimizer)
log.info("Step {} | TRAINING batch: Loss {:.4f} | F1 {:.4f} | "
"Accuracy {:.4f} | LR {:.2e}".format(global_step,
loss.item(),
f1, acc,
lr))
if global_step % config.eval_steps == 0 and global_step > 0:
best_score = validate(val_loader,
model,
best_score=best_score,
global_step=global_step,
cfg=config)
scheduler.step(best_score)
global_step += 1
if __name__ == '__main__':
seed = config.random_seed
if seed:
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
main()
| 2.03125
| 2
|
minos/auth_token/exceptions.py
|
minos-framework/minos-auth-token
| 5
|
12783046
|
class TokenConfigException(Exception):
"""Base Api Gateway Exception."""
class NoTokenException(TokenConfigException):
"""Exception to be raised when token is not available."""
class ApiGatewayConfigException(TokenConfigException):
"""Base config exception."""
| 1.992188
| 2
|
scripts/appsetup.py
|
tdude92/pengumoneymaker
| 0
|
12783047
|
<gh_stars>0
import pyautogui
import time
print("Please move your mouse to the icon of an open browser running club penguin on the task bar.")
print("Capturing in...")
for i in range(10, 0, -1):
print("", i, " ", end = "\r")
time.sleep(1)
mouse_pos = pyautogui.position()
print("Position successfully captured.")
with open("browser_pos.txt", "w") as write_file:
write_file.write(str(mouse_pos.x) + " " + str(mouse_pos.y))
| 2.828125
| 3
|
waymo_kitti_converter/tools/lidar_to_image_test.py
|
anhvth/Pseudo_Lidar_V2
| 0
|
12783048
|
import cv2
import numpy as np
from calibration import get_calib_from_file
# kitti
# name = '000000'
# pc_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/kitti/velodyne/'+name+'.bin'
# img_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/kitti/image_2/'+name+'.png'
# calib_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/kitti/calib/'+name+'.txt'
# waymo-kitti
name = '00000-00001'
pc_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/velodyne/'+name+'.bin'
img_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/image_0/'+name+'.png'
calib_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/calib/'+name+'.txt'
def cart_to_homo(mat):
mat = np.vstack([mat, np.ones((1, mat.shape[1]))])
return mat
def pc_to_pt(pc, V2C, R0, P):
def cart2hom(pts_3d):
""" Input: nx3 points in Cartesian
Oupput: nx4 points in Homogeneous by pending 1
"""
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1))))
return pts_3d_hom
def project_velo_to_ref(pts_3d_velo):
pts_3d_velo = cart2hom(pts_3d_velo) # nx4
return np.dot(pts_3d_velo, np.transpose(V2C))
def project_ref_to_rect(pts_3d_ref):
""" Input and Output are nx3 points """
return np.transpose(np.dot(R0, np.transpose(pts_3d_ref)))
def project_rect_to_image(pts_3d_rect):
""" Input: nx3 points in rect camera coord.
Output: nx2 points in image2 coord.
"""
pts_3d_rect = cart2hom(pts_3d_rect)
pts_2d = np.dot(pts_3d_rect, np.transpose(P)) # nx3
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
return pts_2d[:, 0:2]
# filter behind
ind = pc[:, 0] > 0 # lidar: x is front
pc = pc[ind, :]
print('pc', pc)
ref = project_velo_to_ref(pc)
print('ref',ref)
rect = project_ref_to_rect(ref)
print('rect', rect)
depth = rect[:, 2]
print(rect.shape, depth.shape)
image = project_rect_to_image(rect)
return image, depth
def main():
calib = get_calib_from_file(calib_pathname)
v2c = calib['Tr_velo2cam']
r0 = calib['R0']
px = calib['P2']
# v2c = np.array([
# [7.533745000000e-03, -9.999714000000e-01, -6.166020000000e-04, -4.069766000000e-03],
# [1.480249000000e-02, 7.280733000000e-04, -9.998902000000e-01, -7.631618000000e-02],
# [9.998621000000e-01, 7.523790000000e-03, 1.480755000000e-02, -2.717806000000e-01]])
# r0 = np.array([
# [9.999239000000e-01, 9.837760000000e-03, -7.445048000000e-03],
# [-9.869795000000e-03, 9.999421000000e-01, -4.278459000000e-03],
# [7.402527000000e-03, 4.351614000000e-03, 9.999631000000e-01]])
# px = np.array([
# [7.215377000000e+02, 0.000000000000e+00, 6.095593000000e+02, 4.485728000000e+01],
# [0.000000000000e+00, 7.215377000000e+02, 1.728540000000e+02, 2.163791000000e-01],
# [0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 2.745884000000e-03]])
pc = np.fromfile(pc_pathname, dtype=np.float32).reshape((-1, 4))[:, :3]
# filter all behind image plane
keep = []
for i in range(pc.shape[0]):
p = pc[i, :]
if p[0] > 0:
keep.append(p)
# pc = np.vstack(keep)
#
# tmp = np.eye(4)
# tmp[:3, :3] = r0
# r0 = tmp
# pc = np.transpose(pc) # (n,3) -> (3,n)
# pc = cart_to_homo(pc) # (3,n) -> (4,n)
#
# v2c = cart_to_homo(v2c) # (3,4) -> (4,4)
#
# print(px.shape, r0.shape, v2c.shape, pc.shape)
pt, depth = pc_to_pt(pc, v2c, r0, px)
print(pt.shape, depth.shape)
# pt = px @ r0 @ v2c @ pc
# print(pt.shape)
# pt = pt[:2] / pt[2]
print(pt)
import matplotlib.pyplot as plt
cmap = plt.cm.get_cmap("hsv", 256)
cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255
# draw
img = cv2.imread(img_pathname)
for i in range(pt.shape[0]):
x = pt[i, 0]
y = pt[i, 1]
color = cmap[np.clip(640/depth[i], 0, 255).astype(np.int), :]
# if 0 < x < 1920 and 0 < y < 1080:
# print('yah')
# print(int(x), int(y))
cv2.circle(img, (int(x), int(y)), 1, tuple(color), -1)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
while True:
cv2.imshow('image', img)
key = cv2.waitKey(1)
if key == 27: # exit
break
elif key != -1:
print('Undefined key:', key)
if __name__ == '__main__':
main()
| 2.515625
| 3
|
app.py
|
DARK-art108/Cotton-Leaf-Disease-Detection
| 11
|
12783049
|
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re
import numpy as np
import tensorflow as tf
import pathlib
import wget
# from tensorflow.compat.v1.compat import ConfigProto
# from tensorflow.compat.v1 import InteractiveSession
#from tensorflow.python.client.session import InteractiveSession
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.2
# config.gpu_options.allow_growth = True
# session = InteractiveSession(config=config)
# Keras
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
# from gevent.pywsgi import WSGIServer
# Model saved with Keras model.save()
MODEL_PATH = 'model_resnet.hdf5'
MODEL_URL = 'https://github.com/DARK-art108/Cotton-Leaf-Disease-Prediction/releases/download/v1.0/model_resnet.hdf5'
UPLOAD_FOLDER = os.path.join(os.path.dirname(__file__), 'static', 'uploads')
# Download model if not present
while not pathlib.Path(MODEL_PATH).is_file():
print(f'Model {MODEL_PATH} not found. Downloading...')
wget.download(MODEL_URL)
# Define a flask app
app = Flask(__name__)
# Define upload path
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Developing in the absence of TensorFlow :P (Python 3.9.0 x64)
# def load_model(aa):
# class a:
# @staticmethod
# def predict(*args):
# return 1
# return a()
# class image:
# @staticmethod
# def load_img(path, target_size):
# return 'a'
# @staticmethod
# def img_to_array(img):
# return 'v'
# Load your trained model
model = load_model(MODEL_PATH)
def model_predict(img_path, model):
print(img_path)
img = image.load_img(img_path, target_size=(224, 224))
# Preprocessing the image
x = image.img_to_array(img)
# x = np.true_divide(x, 255)
## Scaling
x = x / 255
x = np.expand_dims(x, axis=0)
# Be careful how your trained model deals with the input
# otherwise, it won't make correct prediction!
# x = preprocess_input(x)
preds = model.predict(x)
preds = np.argmax(preds, axis=1)
if preds == 0:
preds = "The leaf is a diseased cotton leaf."
elif preds == 1:
preds = "The leaf is a diseased cotton plant."
elif preds == 2:
preds = "The leaf is a fresh cotton leaf."
else:
preds = "The leaf is a fresh cotton plant."
return preds
@app.route('/', methods=['GET', 'POST'])
def index():
# Main page
if request.method == 'POST':
# Get the file from post request
print(request.files, request.form, request.args)
f = None
if 'image' in request.files: f = request.files['image']
if f:
# Save the file to ./uploads
file_path = os.path.join(
app.config['UPLOAD_FOLDER'], secure_filename(f.filename))
f.save(file_path)
# Make prediction
preds = model_predict(file_path, model)
result = preds
return render_template('index.html', result=result, img=secure_filename(f.filename))
return render_template('index.html', result=None, err='Failed to receive file')
# First time
return render_template('index.html', result=None)
if __name__ == '__main__':
app.run(port=5001, debug=True)
| 2.203125
| 2
|
cell_imaging_utils/__init__.py
|
lionben89/BGU_cell_imaging_utils
| 0
|
12783050
|
<reponame>lionben89/BGU_cell_imaging_utils
from cell_imaging_utils.datasets_metadata.table.datasetes_metadata_csv import DatasetMetadataSCV
from cell_imaging_utils.datasets_metadata.dict.datasetes_metadata_pickle import DatasetMetadataPickle
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "1.0.0"
def get_module_version():
return __version__
__all__ = ["DatasetMetadataSCV", "DatasetMetadataPickle"]
| 1.492188
| 1
|