blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a94001111da649b92a725b1d7bc961265802cb8 | c0ffe8573c627cda23840318bb22ca4bc612897e | /google-practice/Interview-Questions/oneEditOrMore.py | 0385c3199cc150292e559094c2c7951376b18795 | [] | no_license | ammarasmro/experiments-tuts | 284b37c9cce14c02e73a0bce65d36dfa9d85ee74 | 21395e64fce03c7c3d7590eb48038415f7640245 | refs/heads/master | 2021-01-22T19:35:57.891498 | 2017-09-30T07:33:57 | 2017-09-30T07:33:57 | 102,421,027 | 0 | 0 | null | 2017-09-30T07:33:58 | 2017-09-05T01:55:13 | Python | UTF-8 | Python | false | false | 804 | py | def isOneEdit(myString1, myString2):
if abs(len(myString1)-len(myString2)) > 1:
return False
counter = 0
str1Iter = 0
str2Iter = 0
while str1Iter < len(myString1) - 1 and str2Iter < len(myString2) - 1:
if myString1[str1Iter] != myString2[str2Iter]:
counter += 1
if myString1[str1Iter+1] == myString2[str2Iter]:
str1Iter += 1
elif myString1[str1Iter] == myString2[str2Iter]:
str2Iter += 1
str1Iter += 1
str2Iter += 1
if counter > 1:
return False
return True
myString1 = "pale"
myString2 = "ple"
print isOneEdit(myString1, myString2)
myString1 = "pales"
myString2 = "pale"
print isOneEdit(myString1, myString2)
myString1 = "pale"
myString2 = "bale"
print isOneEdit(myString1, myString2)
myString1 = "pale"
myString2 = "bake"
print isOneEdit(myString1, myString2) | [
"ammarasmaro@gmail.com"
] | ammarasmaro@gmail.com |
fa546a46671df8d5e7d5f0030dbce2f35ebe48ce | 1df53d3142563ccf0e47615dc226be0d6b9475aa | /model_utils.py | 3ed7b63f1cbafd89e7921c9febda497eb12c891d | [
"MIT"
] | permissive | zjunet/KDDCUP2020_AutoEnsemble | eee94298515db54f6bbc4c03729418913c2ae4ad | 88b766eb66e33d51ca350b22485bd9839e193ab2 | refs/heads/master | 2022-10-27T05:02:11.076782 | 2020-06-15T03:49:14 | 2020-06-15T03:49:14 | 285,231,427 | 1 | 0 | null | 2020-08-05T08:43:58 | 2020-08-05T08:43:57 | null | UTF-8 | Python | false | false | 20,601 | py | import pandas as pd
import numpy as np
import lightgbm as lgb
import torch
from sklearn.model_selection import train_test_split
from torch_scatter import scatter_add
from torch_geometric.utils import add_self_loops, degree, add_remaining_self_loops, remove_self_loops, is_undirected
from torch_geometric.utils import to_undirected, sort_edge_index
import gc
from sub_models import Label_Extract
from sub_models import Feature_Extract
import model_train as mt
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
import os
def check_label(data, device):
x = data.x_label.numpy()
y = np.ones(data.x.shape[0], dtype=int)
y[data.train_mask] = 1
y[data.test_mask] = 0
X_train, X_test, y_train, y_test = train_test_split(x,y,test_size=0.5,random_state=0)
rf1 = RandomForestClassifier(n_jobs=4, n_estimators=400, random_state=123)
print('Predicting...')
rf1.fit(X_train, y_train)
y_prob = rf1.predict(X_test)
acc = len(y_test[y_test == y_prob]) / len(y_test)
print("验证集训练集预测值", acc)
if (acc < 0.7):
return True
else:
return False
def get_feature(data, label_is_work, is_direct, has_feature, edge_expand,device):
if (has_feature):
newX = data.x.detach().clone()
if (data.x.shape[1] > 128):
pca = PCA(n_components=128)
newX = pca.fit_transform(data.x.detach().numpy())
newX = torch.tensor(newX, dtype=torch.float, device=device)
model = Feature_Extract()
data = data.to(device)
model = model.to(device)
feature = model(newX, data, has_feature)
feature = feature.detach()
del (model)
data.feature = feature
else:
newX = torch.ones((data.x.shape[0], 2), dtype=torch.float, device=device)
model = Feature_Extract()
data = data.to(device)
model = model.to(device)
feature = model(newX, data, has_feature)
feature = feature.detach()
del (model)
data.feature = feature
data = data.to('cpu')
motifs = count_motifs(data, is_direct)
if (motifs is not None):
print("增加motifs")
data.feature = torch.cat(
(data.feature, torch.tensor(motifs, dtype=torch.float)), dim=1)
del (motifs)
if (label_is_work):
print("特征提取 label and feature")
data.feature = torch.cat((data.feature, data.x_label), dim=1)
else:
print("特征提取 feature")
del (data.label)
del (data.x_label)
gc.collect()
return data
def count_motifs(data, is_direct):
if (data.edge_index.shape[1] > 500000):
return None
edge_index = data.edge_index.clone()
if (is_direct == True):
edge_index = to_undirected(data.edge_index)
edge_index, _ = sort_edge_index(edge_index)
edge_index = edge_index.numpy()
k = pd.DataFrame(edge_index.T).reset_index(drop=True)
k.rename(columns={0: data.x.shape[0], 1: edge_index.shape[1]}, inplace=True)
try:
name = 'graph' + str(data.x.shape[0]) + str(data.x.shape[1]) + str(
data.edge_index.shape[0]) + str(data.edge_index.shape[1])
name1 = name + '.in'
name2 = name + '.out'
path = './' + name + '.in'
k.to_csv(path, sep=' ', index=False)
os.system('./orca 4 ' + name1 + ' ' + name2)
k = pd.read_csv('./' + name2, sep=' ', header=None)
except:
return None
return k.to_numpy()
def add_direct_edge(data):
data = data.to('cpu')
edge_index = data.edge_index.detach().clone()
edge_index, _ = sort_edge_index(edge_index)
edge_index = edge_index.numpy()
k = pd.DataFrame(edge_index.T).reset_index(drop=True)
k.rename(columns={0: data.x.shape[0], 1: edge_index.shape[1]}, inplace=True)
k['1'] = data.edge_weight.detach().numpy()
#print(k)
#return
try:
name = 'edge' + str(data.x.shape[0]) + str(data.x.shape[1]) + str(
data.edge_index.shape[0]) + str(data.edge_index.shape[1])
name1 = name + '.in'
name2 = name + '.out'
path = './' + name + '.in'
k.to_csv(path, sep=' ', index=False)
os.system('./edge 4 ' + name1 + ' ' + name2)
k = pd.read_csv('./' + name2, sep=' ', header=None)
except:
return None,None
k = k.to_numpy()
return k[:,0:2],k[:,2]
def gnn_test_time(data, aggr, device):
if (aggr == 'ggnn'):
return mt.ggnn_test(data, device)
elif (aggr == 'ggnn_di'):
return mt.ggnn_di_test(data, device)
elif (aggr == 'gated_add_di'):
return mt.ggnn_di_test(data, device,48,'add')
elif (aggr == 'gated_mean_di'):
return mt.ggnn_di_test(data, device,48,'mean')
elif (aggr == 'appnp_gcn'):
return mt.appnp_gcn_test(data, device)
elif (aggr == 'appnp_gcn128'):
return mt.appnp_gcn_test(data, device,128)
elif (aggr == 'appnp_gcn48'):
return mt.appnp_gcn_test(data, device,48)
elif (aggr == 'appnp_gcn96'):
return mt.appnp_gcn_test(data, device,96)
elif (aggr == 'arma'):
return mt.arma_test(data, device)
elif (aggr == 'gmm'):
return mt.gmm_test(data, device)
elif (aggr == 'mean'):
return mt.mean_test(data, device)
elif (aggr == 'graphnn'):
return mt.graphnn_test(data, device)
elif (aggr == 'graphnn_di'):
return mt.graphnn_di_test(data, device)
elif (aggr == 'mf'):
return mt.mf_test(data, device)
elif (aggr == 'agnn'):
return mt.agnn_test(data, device)
elif (aggr == 'sage'):
return mt.sage_test(data, device)
elif (aggr == 'ggin'):
return mt.ggin_test(data, device)
elif (aggr == 'gat'):
return mt.gat_test(data, device)
elif (aggr == 'ggin32'):
return mt.ggin_test(data, device, 32)
elif (aggr == 'ggin24'):
return mt.ggin_test(data, device, 24)
elif (aggr == 'ggin48'):
return mt.ggin_test(data, device, 48)
elif (aggr == 'ggin64'):
return mt.ggin_test(data, device, 64)
elif (aggr == 'ggin96'):
return mt.ggin_test(data, device,96)
elif (aggr == 'ggin_di'):
return mt.ggin_di_test(data, device,12)
elif (aggr == 'ggin_di24'):
return mt.ggin_di_test(data, device,24)
elif (aggr == 'ggin_di32'):
return mt.ggin_di_test(data, device,32)
elif (aggr == 'ggin_di48'):
return mt.ggin_di_test(data, device,48)
elif (aggr == 'ggin_di56'):
return mt.ggin_di_test(data, device,56)
elif (aggr == 'ggin_di64'):
return mt.ggin_di_test(data, device,64)
elif (aggr == 'gcn16'):
return mt.gcn_test(data, device,16)
elif (aggr == 'gcn156'):
return mt.gcn_test(data, device,156)
elif (aggr == 'gcn64'):
return mt.gcn_test(data, device,64)
elif (aggr == 'gcn128'):
return mt.gcn_test(data, device,128)
elif (aggr == 'gcn48'):
return mt.gcn_test(data, device,48)
elif (aggr == 'gated_mean32'):
return mt.gated_test(data, device,32,'mean')
elif (aggr == 'gated_mean48'):
return mt.gated_test(data, device,48,'mean')
elif (aggr == 'gated_mean56'):
return mt.gated_test(data, device,56,'mean')
elif (aggr == 'gated_add56'):
return mt.gated_test(data, device,56,'add')
elif (aggr == 'gated_add48'):
return mt.gated_test(data, device,48,'add')
elif (aggr == 'gated_add32'):
return mt.gated_test(data, device,32,'add')
else:
print(aggr,"not exist")
def gnn_train_and_predict(data,aggr,sample_mask,val_mask,node_norm,device,time_control=None):
if (aggr == 'ggnn'):
return mt.ggnn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'ggnn_di'):
return mt.ggnn_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'ggnn_add_di'):
return mt.ggnn_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,48,'add')
elif (aggr == 'ggnn_mean_di'):
return mt.ggnn_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,48,'mean')
elif (aggr == 'gin'):
return mt.gin_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'gin_di'):
return mt.gin_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'gat'):
return mt.gat_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'gcn'):
return mt.gcn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,12)
elif (aggr == 'gcn16'):
return mt.gcn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,16)
elif (aggr == 'gcn32'):
return mt.gcn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,32)
elif (aggr == 'gcn64'):
return mt.gcn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,64)
elif (aggr == 'gcn128'):
return mt.gcn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,128)
elif (aggr == 'gcn48'):
return mt.gcn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,48)
elif (aggr == 'appnp_lin'):
return mt.appnp_lin_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'appnp_gcn128'):
return mt.appnp_gcn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,128)
elif (aggr == 'appnp_gcn48'):
return mt.appnp_gcn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,48)
elif (aggr == 'appnp_gcn96'):
return mt.appnp_gcn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,96)
elif (aggr == 'arma'):
return mt.arma_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'gmm'):
return mt.gmm_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'mean'):
return mt.mean_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'graphnn'):
return mt.graphnn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'graphnn_di'):
return mt.graphnn_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'mf'):
return mt.mf_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'agnn'):
return mt.agnn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'sage'):
return mt.sage_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control)
elif (aggr == 'ggin'):
return mt.ggin_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,48)
elif (aggr == 'ggin32'):
return mt.ggin_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,32)
elif (aggr == 'ggin24'):
return mt.ggin_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,24)
elif (aggr == 'ggin48'):
return mt.ggin_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,48)
elif (aggr == 'ggin64'):
return mt.ggin_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,64)
elif (aggr == 'ggin96'):
return mt.ggin_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,96)
elif (aggr == 'ggin_di24'):
return mt.ggin_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,24)
elif (aggr == 'ggin_di32'):
return mt.ggin_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,32)
elif (aggr == 'ggin_di48'):
return mt.ggin_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,48)
elif (aggr == 'ggin_di56'):
return mt.ggin_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,56)
elif (aggr == 'ggin_di64'):
return mt.ggin_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,64)
elif (aggr == 'gated_mean32'):
return mt.gated_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,32,'mean')
elif (aggr == 'gated_mean48'):
return mt.gated_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,48,'mean')
elif (aggr == 'gated_mean56'):
return mt.gated_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,56,'mean')
elif (aggr == 'gated_add32'):
return mt.gated_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,32,'add')
elif (aggr == 'gated_add48'):
return mt.gated_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,48,'add')
elif (aggr == 'gated_add56'):
return mt.gated_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,56,'add')
else:
print(aggr,"not exist")
def train_one_model(data,aggr,sample_mask,val_mask,node_norm,device,time_control=None):
if (aggr == 'feature'):
return mt.label_train_and_predict(data, sample_mask, val_mask, node_norm,time_control)
else:
return gnn_train_and_predict(data, aggr, sample_mask, val_mask, node_norm,device, time_control)
def train_two_model(data,aggr,sample_mask_1,val_mask_1,sample_mask_2,val_mask_2,
node_norm1=None,node_norm2=None,device='cpu',time_control=None):
pred1 = train_one_model(data, aggr, sample_mask_1, val_mask_1, node_norm1,device, time_control)
if (time_control.isTimeToStop() == True):
return pred1, None
pred2 = train_one_model(data, aggr, sample_mask_2, val_mask_2, node_norm2,device, time_control)
return pred1, pred2
def get_sample_retio(data):
#print("计算采样比例")
train_num = data.train_mask[data.train_mask == True].shape[0]
node_num = data.train_mask.shape[0]
retio = (train_num - 1000) / train_num
retio = np.max((retio, 0.6666))
#print("采样比例:",retio)
return retio
def sample_by_label(x_mask,labels,weight=0.5,top=True,random=False,bootstamp=False):
if (random == False):
index = torch.tensor(range(x_mask.shape[0]))[x_mask].numpy()
label = labels[x_mask].numpy()
df = pd.DataFrame()
df['index'] = index
df['label'] = label
class_list = list(df['label'].unique())
sample_list = []
def typicalsamling(group, sample_list):
name = int(group.name)
df = None
if (bootstamp == False):
if (top == True):
df = group.sample(frac=1, replace=False,random_state=1337).head(int(len(group) * weight))
else:
df = group.sample(frac=1, replace=False,random_state=1337).tail(int(len(group) * weight))
else:
df = group.sample(frac=1, replace=True)
sample_list += list(df['index'].values)
return df
result = df.groupby(['label']).apply(typicalsamling, sample_list)
if (bootstamp == True):
sample_mask = sample_list
p = pd.DataFrame()
p['num'] = [1] * len(sample_mask)
p['index'] = sample_mask
p = p.groupby('index').sum()
sample_mask = torch.zeros(x_mask.shape[0], dtype=torch.bool)
val_mask = x_mask.clone()
norm_weight = torch.ones(x_mask.shape[0], dtype=torch.float)
sample_mask[p.index] = True
val_mask[p.index] = False
norm_weight[p.index] = torch.tensor(p['num'].values, dtype=torch.float)
return sample_mask, val_mask, norm_weight
sample_list = np.array(sample_list)
sample_mask = torch.zeros(x_mask.shape[0], dtype=torch.bool)
sample_mask[sample_list] = True
val_mask = x_mask.clone().detach()
val_mask[sample_list] = False
return sample_mask, val_mask
else:
index = torch.tensor(range(x_mask.shape[0]))[x_mask].numpy()
label = labels[x_mask].numpy()
df = pd.DataFrame()
df['index'] = index
df['label'] = label
sample_list = df.sample(frac=weight)
sample_list = np.array(sample_list)
sample_mask = torch.zeros(x_mask.shape[0], dtype=torch.bool)
sample_mask[sample_list] = True
val_mask = x_mask.clone().detach()
val_mask[sample_list] = False
return sample_mask, val_mask
def get_sample(data, retio):
sample_mask1, val_mask1 = sample_by_label(data.train_mask, data.y, retio,True, False)
sample_mask2, val_mask2 = sample_by_label(data.train_mask, data.y, retio,False, False)
node_norm1 = torch.ones(data.x.shape[0], dtype=torch.float)
node_norm1 = node_norm1 / 1.2
node_norm1[val_mask2] = 1.2
node_norm2 = torch.ones(data.x.shape[0], dtype=torch.float)
node_norm2 = node_norm2 / 1.2
node_norm2[val_mask1] = 1.2
return sample_mask1, val_mask1, sample_mask2, val_mask2, node_norm1, node_norm2
def count_label(data, is_direct, device):
for i in range(2):
data = data.to(device)
model = Label_Extract()
try:
torch.cuda.empty_cache()
with torch.no_grad():
model = model.to(device)
x = model(data, is_direct)
data.x_label = x
data = data.to('cpu')
return data
except:
data = data.to('cpu')
model = model.to('cpu')
torch.cuda.empty_cache()
with torch.no_grad():
data = data.to('cpu')
model = model.to('cpu')
x = model(data, is_direct)
data.x_label = x
return data
def get_rank_two(result):
pred = torch.tensor(result)
max1 = pred.max(dim=1)[1]
pred = pred - pred.max(dim=1)[0].view(-1, 1)
pred[pred == 0] = -10000
max2 = pred.max(dim=1)[1]
pred = torch.cat((max1, max2), dim=-1)
return pred
def get_simiraly(best,x,y):
pre = len(x[(x==y)&(best!=y)])
return pre
def choose_model(data, aggr_list, train_mask, val_mask, device, clock):
result_list = []
model_list = []
for i in range(0, len(aggr_list)):
aggr = aggr_list[i]
pred = train_one_model(data, aggr, train_mask, val_mask, None, device,
clock)
if (pred is not None):
model_list.append(aggr)
result_list.append(pred)
if (clock.isTimeToStop() == True):
print("时间耗尽")
break
result_list = np.array(result_list)
model_list = np.array(model_list)
if(len(result_list)==0):
return result_list,model_list
rank = choose_model_by_result(data, result_list, val_mask, aggr_list)
return result_list[rank], model_list[rank]
def choose_model_by_result(data, result1, val_mask, aggr_list):
data = data.to('cpu')
val_acc = []
for e in result1:
pred = np.argmax(e, axis=1)
pred = torch.tensor(pred)
correct = float(pred[val_mask].eq(data.y[val_mask]).sum().item())
acc2 = correct / (val_mask.sum().item() + 0)
#print('*gcn 测试集Accuracy: {:.4f}'.format(acc2))
val_acc.append(acc2)
val_acc = np.array(val_acc)
best_index = np.argsort(-val_acc)
#best_ans = get_rank_two(result1[best_index[0]])
best_ans = np.argmax(result1[best_index[0]],axis=1)
sim_list = []
y = data.y[val_mask].numpy()
for e in result1:
pred = np.argmax(e,axis=1)
sim = get_simiraly(best_ans[val_mask], pred[val_mask],y)
#print(sim)
sim_list.append(sim)
sim_list = np.array(sim_list)
best_index2 = np.argsort(-sim_list)
score1 = np.ones(len(best_index))
score1[best_index] = range(len(best_index))
score2 = np.ones(len(best_index2))
score2[best_index2] = range(len(best_index2))
score2[best_index[0]] = -1
#print(score2)
#score = score1 + score2 * 1.1
score = score2+score1
print("model selection")
print(aggr_list)
print("val acc:", val_acc)
print("sim: ", sim_list)
print("val score:", score1)
print("sim score:", score2)
print("merge score:", score)
return np.argsort(score)
| [
"826804116@qq.com"
] | 826804116@qq.com |
5a0772e1a8a55625488fe06642e451fb792dad75 | b0129214b1d493bdec6fc4658727775fb4066a5e | /addons/todo_user/__manifest__.py | 373e3f23f73a060a7b0267b94f628db4cc01f954 | [] | no_license | gitstalker/docker_odoo | 9875636e4f1bf60a8e55c7a66e8c85abf5f61661 | c049d93586f1c35300563fc77685da22d9cc4e14 | refs/heads/master | 2020-05-02T01:10:45.705337 | 2018-10-20T12:03:20 | 2018-10-20T12:03:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | {
'name':'Multiuser To-Do',
'description': 'Extend the To-Do app to multiuser.',
'depends': ['website'],
'data':['views/templates.xml'],
'author': 'hdwolf'
} | [
"spacegoing@gmail.com"
] | spacegoing@gmail.com |
923934d04555b341a1c5a3b1c3b87d95a95a11b7 | 07c1bc9e8d85cecd55f9cb4ce7e8a0767064e394 | /test.py | 6077752e74e79791a9a2072deb1fb0b02ae54077 | [
"MIT"
] | permissive | dblume/wine-tasting | 2868163d3e95f39e6c338f73dfbe0318e3e73794 | 53a4ab6b518fb84b89f6752a8bc764fbf27c4b92 | refs/heads/main | 2021-11-25T10:29:50.903756 | 2021-11-19T18:13:34 | 2021-11-19T18:13:34 | 20,635,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,815 | py | #!/usr/bin/env python
import sys
import unittest
from StringIO import StringIO
import wine_allocator
class Tee(object):
def __init__(self):
self.saved_stdout = sys.stdout
self.stdout = StringIO()
sys.stdout = self
def close_and_get_output(self):
sys.stdout = self.saved_stdout
output = self.stdout.getvalue().strip()
self.stdout.close()
return output
def write(self, data):
self.stdout.write(data)
self.saved_stdout.write(data)
class TestWineAllocation(unittest.TestCase):
def setUp(self):
self.tee = Tee()
def get_stdout(self):
return self.tee.close_and_get_output().splitlines()
def perform_test_and_verify_result(self, lines, num_out_lines):
allocator = wine_allocator.WineAllocator()
allocator.process(lines)
out_lines = self.get_stdout()
self.assertEqual(str(num_out_lines), out_lines[0])
self.assertEqual(int(out_lines[0]), len(out_lines)-1)
self.assertTrue(set(out_lines[1:]).issubset(set(lines)))
# The next test asserts the results contain non-repeating wines
# and that there are no more than 3 instances of the same person.
wine_allocator.verify_result_file(allocator.outfile.name)
def test_no_contention(self):
lines = ("p1\tw1",
"p1\tw2",
"p1\tw3",
"p1\tw4",
"p1\tw5",
"p2\tw6",
"p2\tw7",
"p3\tw8",
"p4\tw9",
"p5\tw10",
)
self.perform_test_and_verify_result(lines, 8)
def test_contention_for_one(self):
lines = ("p1\tw1",
"p1\tw2",
"p1\tw3",
"p1\tw4",
"p1\tw5",
"p2\tw1",
"p2\tw6",
)
self.perform_test_and_verify_result(lines, 5)
def test_simple_contention_for_two(self):
lines = ("p1\tw1",
"p2\tw1",
"p3\tw1",
"p4\tw2",
"p5\tw2",
)
self.perform_test_and_verify_result(lines, 2)
def test_only_quad_contention(self):
lines = ("p0\tw1",
"p0\tw2",
"p0\tw3",
"p0\tw4",
"p1\tw1",
"p1\tw2",
"p1\tw3",
"p1\tw4",
"p2\tw1",
"p2\tw2",
"p2\tw3",
"p2\tw4",
"p3\tw1",
"p3\tw2",
)
self.perform_test_and_verify_result(lines, 4)
def test_cant_sell_all(self):
lines = ("p0\tw1",
"p0\tw2",
"p0\tw3",
"p0\tw4",
"p0\tw5",
"p0\tw6",
"p0\tw7",
"p1\tw1",
"p1\tw2",
"p1\tw3",
"p1\tw4",
"p1\tw5",
"p1\tw6",
"p1\tw7",
)
self.perform_test_and_verify_result(lines, 6)
def test_adhoc_contention(self):
lines = ("p0\tw1",
"p0\tw2",
"p1\tw1",
"p1\tw2",
"p1\tw3",
"p1\tw4",
"p1\tw5",
"p2\tw0",
"p2\tw7",
"p3\tw8",
"p4\tw9",
"p5\tw10",
)
self.perform_test_and_verify_result(lines, 10)
if __name__ == '__main__':
unittest.main()
| [
"david.blume@gmail.com"
] | david.blume@gmail.com |
ea8340585fbefecf22d761ebd8f68b9adb4a6aab | d2a20d4118dcfadc25ccf8214d1ecb21c3fd8e91 | /ye's model/randomForest.py | 46407bc51989dfa40404e034de8f38636e8079f7 | [] | no_license | jinfy/PopulationSpatialization | b4358e1bb1ca4121583c50736cece392cc1f8c74 | a6c468b32effa3a055bd8cfe7f0ab3eb88d3a895 | refs/heads/main | 2023-08-11T10:24:39.106679 | 2021-09-16T07:09:44 | 2021-09-16T07:09:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,543 | py | import pandas as pd
import numpy as np
# 100 200 500
RESOLUTION = 500
N_ROW = 0
N_COLUMN = 0
if RESOLUTION == 100:
N_ROW = 1540
N_COLUMN = 1250
elif RESOLUTION == 200:
N_ROW = 770
N_COLUMN = 625
elif RESOLUTION == 500:
N_ROW = 308
N_COLUMN = 250
grid_src = r"..\Data\grid" + str(RESOLUTION) + ".csv"
#读取街道人口的相关信息
sub_midu = pd.read_table("../Code/resource/sub_midu.txt",sep=',',index_col='gid')
sub_midu = sub_midu.sort_index()
from utils import *
import random
#
# #读取格网的相关信息
gridInfo = pd.read_table(grid_src,sep=',',index_col='id')
gridInfo = gridInfo.sort_index()
# #county的id和人口
countyInfo = pd.read_table( "../Code/resource/wuhanCountyPop.txt",sep=',',index_col='countyId')
county_dict = countyInfo.to_dict()['countyPopNum']
# #sub的id和人口
subInfo = pd.read_table(r"../Code/resource/wuhanSubDistrictPop.txt",sep=',',index_col='subId')
sub_dict = subInfo.to_dict()['subPopNum']
#
index_matrix = np.array(gridInfo.index).reshape((N_ROW,N_COLUMN))[::-1]#索引的二维矩阵
county_matrix = np.array(gridInfo['county_id']).reshape((N_ROW,N_COLUMN))[::-1]#countyid的二维矩阵
sub_matrix = np.array(gridInfo['sub_id']).reshape((N_ROW,N_COLUMN))[::-1]#subid的二维矩阵
###########################
sub_value = pd.read_table("sub_value.txt", sep=',' , dtype=float,
usecols=['light', 'dem', 'ndvi','road_dis', 'poi_dis', 'poi'])
grid_value = pd.read_table("grid_value.txt", sep=',' , dtype=float,
usecols=['light', 'dem', 'ndvi','road_dis', 'poi_dis', 'poi'])
##########################
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
#24
fc = 0
sum = 0
max = 100000
sum_r2 = 0
sum_MAE=0
sum_RMSE = 0
max_MAE =1000000
max_RMSE = 1000000000
county = {11: 0, 18: 0, 15: 0, 14: 0, 12: 0, 6: 0, 10: 0, 3: 0, 1: 0, 16: 0, 4: 0, 5: 0, 7: 0, 9: 0, 8: 0, 17: 0, 2: 0}
var=0
for i in range(10):
rfc = RandomForestRegressor(max_features='sqrt',n_estimators=250,min_samples_leaf=1,
oob_score=True,criterion="mae",bootstrap=True,random_state=i)
rfc.fit(sub_value, sub_midu)
result = rfc.predict(grid_value).reshape((N_ROW, N_COLUMN))[::-1]
weight_matrix = normalize(result,county_matrix,county_dict)
pop_matrix = calCountyPop(weight_matrix,county_matrix,county_dict)
pop_dict = {}
[rows, cols] = pop_matrix.shape
for i in range(rows):
for j in range(cols):
if (np.isnan(county_matrix[i, j])):
pass
else:
if (county_matrix[i, j] in pop_dict):
pop_dict[county_matrix[i, j]].append(pop_matrix[i,j])
else:
pop_dict[county_matrix[i, j]] = []
var += math.sqrt(np.var(pop_matrix[np.isnan(county_matrix) == False] / 0.01))
# print(var)
MAE, RMSE ,r2= calSubError(pop_matrix,sub_matrix,sub_dict)
print(MAE)
sum_MAE += MAE
if max_MAE > MAE:
max_MAE = MAE
sum_RMSE += RMSE
if max_RMSE > RMSE:
max_RMSE = RMSE
sum_r2 += r2
print("MAE mean:" + str(sum_MAE / 10))
print("MAE min:" + str(max_MAE))
print("RMSE mean:" + str(sum_RMSE / 10))
print("var mean:" + str(var / 10))
print("RMSE min:" + str(max_RMSE))
print("r2 mean:" + str(sum_r2/10))
print("fc mean:" + str(fc/10))
| [
"noreply@github.com"
] | noreply@github.com |
31ef41bd0fca49eeaa48af78dd411fcbfe5cc11f | 5b52b0ea918fbf6160def50541ffbe6aeb5a01ae | /bin/uniprot.prot.py | c7d346f11b2e7773c52fcaa96bef844f247d09c7 | [
"MIT"
] | permissive | cossio/ProteoPy | 74fe3d1fc17f3b91738da90c0aef0e5eb71a311b | a3569dcef34e4d416863c812e90498d749dc288f | refs/heads/master | 2021-09-06T02:43:05.614654 | 2018-02-01T20:06:29 | 2018-02-01T20:06:29 | 111,602,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | #!/usr/bin/env python
import sys
import argparse
import ProteoPy
PARSER = argparse.ArgumentParser(description='Get basic information from Uniprot from a list of Uniprot IDs')
PARSER.add_argument('--prots', type=str, help='list of proteins')
PARSER.add_argument('--out', type=str, help='output file')
PARSER.add_argument('--mass', action='store_true', help='molar mass')
PARSER.add_argument('--length', action='store_true', help='sequence length')
ARGS = PARSER.parse_args()
SERV = ProteoPy.Services()
with open(ARGS.prots) as prots_file, open(ARGS.out, 'w', 1) as out_file:
# column headers
out_file.write('UniprotID')
if ARGS.mass:
out_file.write('\tmass')
if ARGS.length:
out_file.write('\tlength')
out_file.write('\n')
for (lno, names) in enumerate(prots_file):
if lno == 0: # skip first line
continue
for pid in names.split(';'):
pid = pid.rstrip()
if ARGS.mass or ARGS.length:
try:
mass, length = SERV.uniprot_data(pid)
except KeyboardInterrupt:
raise
except:
ProteoPy.util.printwarn('error retrieving mass or length of ' + pid + ' ... skipping')
continue
out_file.write(pid)
if ARGS.mass:
out_file.write('\t' + str(mass))
if ARGS.length:
out_file.write('\t' + str(length))
out_file.write('\n')
| [
"j.cossio.diaz@gmail.com"
] | j.cossio.diaz@gmail.com |
25161b9b4c4e193d64701617854874c8f308c454 | 50af049cd65bc2a421d94e2b8a1b103594053d53 | /montyhall.py | b7db06f68eaf602112702efb064e07c28869f7c6 | [] | no_license | eumesmamente/pequepy | c7daa3f24c3dfbb3a0ba2824df186c823c5d83ec | 74450b4e54fb928a86eb6304ad60e838ae1e8a4f | refs/heads/master | 2021-06-23T17:46:51.181373 | 2021-03-19T00:09:20 | 2021-03-19T00:09:20 | 73,612,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | #!/usr/bin/python
# coding: utf-8
# XOGO: O problema de Monty Hall
import random
import os
os.system('clear')
def xogo():
partidas=0
xogando = True
cp=0
ncp=0
while xogando:
os.system('clear')
premio=random.randint(1,3)
print " ___ ___ ___"
print " | 1 | | 2 | | 3 |"
print " | | | | | |"
print " --- --- ---"
print " Escolle entre tres portas. Unha delas ten PREMIO!!"
escolleporta = int(raw_input(" Escolle a porta número 1, a número 2 ou a 3.\n"))
paso = True
while paso:
escolleoutra=random.randint(1,3)
if escolleoutra != premio and escolleoutra != escolleporta:
paso = False
print " ESCOLLICHE a porta",escolleporta,",na porta",escolleoutra,"NON hai premio.\n"
print " Queres cambiar e escoller outra porta?\n\n Teclea o número",escolleporta,"se non queres cambiar\n ou escolle outra porta que non sexa a",escolleoutra,".\n"
cambiaporta = int(raw_input())
if cambiaporta != escolleporta and cambiaporta == premio:
print " Cambiache de porta e tes P R E M I O !! :)\n"
cp = cp+1
if cambiaporta == escolleporta and cambiaporta == premio:
print " Non cambiache de porta e tes P R E M I O !! :)\n"
ncp = ncp+1
if cambiaporta != escolleporta and cambiaporta != premio:
print " Cambiache de porta e NON tes premio :(\n"
if cambiaporta == escolleporta and cambiaporta != premio:
print " Non cambiache de porta e NON tes premio :(\n"
partidas=partidas+1
ganados=cp+ncp
if ganados < 1:
porcentaxecp=0
else:
porcentaxecp=round((cp*100/ganados) ,0)
porcentaxencp=100-porcentaxecp
print "Partidas xogadas:",partidas
print " Gañache cambiando",cp,"veces,",porcentaxecp,"%, e sen cambiar",ncp,"veces.",porcentaxencp,"% \n "
print "\n Outra partida? S/N"
outra = raw_input()
if outra != "s" and outra != "S":
print "\n FIN DO XOGO..."
xogando = False
xogo()
| [
"eumesmamente@riseup.net"
] | eumesmamente@riseup.net |
34f53a507b87e722114310cbe682154ef5b603bf | dbad872a1e1b1e03b76197c330f81dbb678372f3 | /roman_to_int.py | d4a59953c537747491a7343a35fd021f3c9e62df | [] | no_license | rohitandcode/LC_Easy | 6d1e4033af21599e0f49df2fad65239bda3674b2 | d6eb1461cbc1a445deae0126523905916d431c9d | refs/heads/master | 2020-12-03T09:44:52.398403 | 2020-06-24T18:18:49 | 2020-06-24T18:18:49 | 231,270,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | # LC13
class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
dicto = {'I':1,
'V':5,
'X':10,
'L':50,
'C':100,
'D':500,
'M':1000}
lis = list(s)
sums = 0
for i in range(len(lis)):
if lis[i] in dicto:
if i > 0:
if dicto[lis[i-1]] < dicto[lis[i]]:
sums = sums + dicto[lis[i]] - 2*dicto[lis[i-1]]
else:
sums += dicto[lis[i]]
else:
sums += dicto[lis[i]]
return sums
"""
roman_to_int('III')
roman_to_int('VIII')
roman_to_int('IV')
roman_to_int('IX')
roman_to_int('XLIX')
"""
| [
"noreply@github.com"
] | noreply@github.com |
63631d49e64a6f1b69225c61c39177874adf39f9 | cde31c1b90800c241fd30bd143ac117594bd3f0e | /negPos/utils.py | f9012957fa01dfe219acdcdab192f0ec6e0090c6 | [] | no_license | hadarbmdev/interactionAnalysis_thesis | 59d8e6a9cddd9d4761725b67c9c0450c38dedcf7 | d2649462a521de788fcd45d3ddd2ebf885ebc6a6 | refs/heads/master | 2023-02-12T04:20:27.092651 | 2021-01-12T19:37:20 | 2021-01-12T19:37:20 | 272,697,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,913 | py | import enum
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import csv
import json
from operator import itemgetter
import os
import fileinput
from py import global_utils
import math
class BehaviorsEmotions(enum.Enum):
posBposT = 1
posBnegT = 2
negBposT = 3
negBnegT = 4
posBunknown = 5
negBunknown = 6
def setBehaviorAndEmotionEntry(jFile, srcPath, *args):
directory = global_utils.getDeocdedDir(srcPath)
filename = os.fsdecode(jFile)
jsonFile = open(directory + filename, 'r')
currentInteraction = json.load(jsonFile)
result = []
args = list(args)
turnOperation = args[0]
for interactionTurn in currentInteraction:
interactionTurn = turnOperation(interactionTurn)
strArr = str(currentInteraction)
strArr = strArr.replace("'", '"')
jsonFile = open(srcPath + filename, 'w')
jsonFile.write(str(currentInteraction))
def setBehaviorAndEmotionEntryForObj(interactionTurn):
legend = global_utils.getLegend()
legendColumnX = "Behavior"
lookupValue = interactionTurn[legendColumnX]
legendSubTableX = legend[legendColumnX]
# lookup for the key of the selected behavior, in the legent
xKey = global_utils.getKeyForValue(legendSubTableX, lookupValue)
if (xKey != "-1"):
legendColumnY = "Group Code"
# print('looking up in table "' + str(legendColumnY) +'" at key ' + str(xKey))
legendSubTableY = legend[legendColumnY]
# this is the group of the behavior
targetValueY = legendSubTableY[xKey]
behaviorCategory = targetValueY
# print('... found: ' + str(targetValueY))
if (not "Code" in interactionTurn):
interactionTurn["Code"] = -1
interactionTurn["Code"] = getBehaviorEmotionCodingFromBehaviorCategoryAndModifiers(
behaviorCategory, interactionTurn['Modifier_1'], interactionTurn['Modifier_2'], interactionTurn['Modifier_3']).value
else:
print('did not find key for lookupValue: "' +
(lookupValue) + '" on column ' + legendColumnX)
return interactionTurn
def divideCurrentRowOfSubjects(bKey, legend, matrix_input, motherTurnsCount, childTurnsCount, rowNum):
# only mothers.
curr = matrix_input.loc[rowNum, bKey]
if (math.isnan(curr)):
newCalc = 0
else:
newCalc = (matrix_input.loc[rowNum, bKey])/motherTurnsCount
matrix_input.loc[rowNum, bKey] = newCalc
return matrix_input
def getBehaviorEmotionCodingFromBehaviorCategoryAndModifiers(behaviorCategory, mod1, mod2, mod3):
if ((behaviorCategory == "Control") or (behaviorCategory == "Inadequate boundaries setting") or (behaviorCategory == "Hostility") or isControlBehavior(mod1, mod2, mod3)):
# it will be negativeB. now check the tone
if(isPostivieTone(mod1, mod2, mod3)):
return BehaviorsEmotions.negBposT
if(isNegativeTone(mod1, mod2, mod3)):
return BehaviorsEmotions.negBnegT
else:
return BehaviorsEmotions.negBunknown
else: # positibe bahavior. Check for tone
if(isPostivieTone(mod1, mod2, mod3)):
return BehaviorsEmotions.posBposT
if(isNegativeTone(mod1, mod2, mod3)):
return BehaviorsEmotions.posBnegT
else:
return BehaviorsEmotions.posBunknown
def isPostivieTone(mod1, mod2, mod3):
if ((mod1 == "mother positive tone") or (mod2 == "mother positive tone") or ((mod3 == "mother positive tone"))):
return True
else:
return False
def isControlBehavior(mod1, mod2, mod3):
return ((mod1 == "control") or (mod2 == "control") or ((mod3 == "control")))
def isNegativeTone(mod1, mod2, mod3):
if ((mod1 == "mother negative tone") or (mod2 == "mother negative tone") or ((mod3 == "mother negative tone"))):
return True
else:
return False
| [
"hadarbmdev@gmail.com"
] | hadarbmdev@gmail.com |
a94191f3db4e9969babddfa2a95463af5918d353 | b1bd44b26db3e81368549bb3b2fddf541630649e | /IMCcalc.py | 4645a74ef8a82e1c1a3761d8f76864c13203ab58 | [] | no_license | josh231101/RGBColorPicker | c8d9e08d1165dad1ac8d6ffbee887491c05356b0 | d9f6251b168a686450604512b493baf33f7ac6e5 | refs/heads/master | 2023-01-16T04:46:08.762524 | 2020-11-23T00:18:07 | 2020-11-23T00:18:07 | 311,843,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | import PySimpleGUI as sg
def calc_imc(p,h):
return p /(h**2)
sg.theme('Topanga')
layout = [
[sg.T('Peso (kh): '),sg.InputText(),],
[sg.T('Altura (m): '),sg.InputText()],
[sg.Button("Clear"), sg.Button("Calc"), sg.Button("Salir")]
]
window = sg.Window('IMC CALCULATOR',layout)
while True:
event, values = window.read()
if event == sg.WINDOW_CLOSED or event == "Salir":
break
elif event == "Clear":
window[0].update("")
window[1].update("")
elif event == "Calc":
try:
p = float(values[0])
h = float(values[1])
imc = calc_imc(p,h)
sg.popup(f"IMC={imc}")
except:
sg.popup_ok("No ingresaste numeros o te falta un campo")
window.close() | [
"wiijosue333@gmail.com"
] | wiijosue333@gmail.com |
ed0fe0703252d2b5008aa85f54c93ebe6c1e81fb | ef662f6e004f29f0c4d9623c8655e7562c9994d1 | /miniblp/market.py | f6946decb698b9cacf743da5bd8ca9f3977e60e3 | [] | no_license | james-atkins/blp-assignment | faf477ebfc56071b295d6d29b134bff368287673 | 5e511420e062765e26a5e0c9e0a42f2f103ce9cf | refs/heads/master | 2023-01-05T08:04:05.261243 | 2020-11-05T10:53:06 | 2020-11-05T10:53:06 | 227,912,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,706 | py | from typing import Optional, Tuple
import numpy as np
from numba import njit
from scipy import linalg
from .common import Vector, Theta2, Matrix
from .data import Individuals, Products
from .iteration import Iteration, IterationResult
class Market:
""" A market underlying the BLP model. """
name: str
individuals: Individuals
products: Products
log_market_shares: Vector
logit_delta: Vector
def __init__(self, individuals: Individuals, products: Products):
market_ids = np.unique(individuals.market_ids)
assert market_ids == np.unique(products.market_ids)
assert len(market_ids) == 1
self.name = market_ids[0]
self.individuals = individuals
self.products = products
# Compute the closed form (logit) mean utility
self.log_market_shares = np.log(self.products.market_shares)
log_share_outside_option = np.log(1 - self.products.market_shares.sum())
self.logit_delta = self.log_market_shares - log_share_outside_option
def __repr__(self):
return f"<Market: {self.name}>"
def compute_mu(self, theta2: Theta2) -> Matrix:
random_coefficients = theta2.sigma @ self.individuals.nodes.T
if self.individuals.D > 0:
random_coefficients += theta2.pi @ self.individuals.demographics.T
return self.products.X2 @ random_coefficients
def compute_choice_probabilities(self, delta: Vector, theta2: Theta2) -> Matrix:
mu = self.compute_mu(theta2)
return _compute_choice_probabilities(delta, mu)
def compute_market_shares(self, delta: Vector, theta2: Theta2):
mu = self.compute_mu(theta2)
return _compute_market_shares(delta, mu, self.individuals.weights)
def compute_delta(self, mu: Matrix, iteration: Iteration, initial_delta: Vector) -> IterationResult:
""" Compute the mean utility for this market that equates observed and predicted market shares. """
# Use closed form solution if no heterogeneity
if self.products.K2 == 0:
return IterationResult(self.logit_delta)
else:
log_market_shares = self.log_market_shares
individual_weights = self.individuals.weights
def contraction(delta: Vector) -> Vector:
computed_market_shares = _compute_market_shares(delta, mu, individual_weights)
return delta + log_market_shares - np.log(computed_market_shares)
return iteration.iterate(initial_delta, contraction)
def solve_demand(self, initial_delta: Vector, theta2: Theta2, iteration: Iteration, compute_jacobian: bool) -> Tuple[IterationResult, Optional[Matrix]]:
# Solve the contraction mapping
mu = self.compute_mu(theta2)
result = self.compute_delta(mu, iteration, initial_delta)
# Compute the Jacobian
if result.success and compute_jacobian:
jacobian = self._compute_delta_by_theta_jacobian(theta2, result.final_delta, mu)
else:
jacobian = None
return result, jacobian
def jacobian(self, theta2: Theta2, delta: Vector) -> Matrix:
mu = self.compute_mu(theta2)
return self._compute_delta_by_theta_jacobian(theta2, delta, mu)
def _compute_delta_by_theta_jacobian(self, theta2: Theta2, delta: Vector, mu: Matrix) -> Matrix:
choice_probabilities = _compute_choice_probabilities(delta, mu)
shares_by_delta_jacobian = self._compute_share_by_delta_jacobian(choice_probabilities)
shares_by_theta_jacobian = self._compute_share_by_theta_jacobian(choice_probabilities, theta2)
return linalg.solve(shares_by_delta_jacobian, -shares_by_theta_jacobian)
def _compute_share_by_delta_jacobian(self, choice_probabilities):
""" Compute the Jacobian of market shares with respect to delta. """
diagonal_shares = np.diagflat(self.products.market_shares)
weighted_probabilities = self.individuals.weights[:, np.newaxis] * choice_probabilities.T
return diagonal_shares - choice_probabilities @ weighted_probabilities
def _compute_share_by_theta_jacobian(self, choice_probabilities: Vector, theta2: Theta2):
""" Compute the Jacobian of market shares with respect to theta. """
jacobian = np.empty(shape=(self.products.J, theta2.P))
for p, parameter in enumerate(theta2.unfixed):
v = parameter.agent_characteristic(self)
x = parameter.product_characteristic(self)
jacobian[:, p] = (choice_probabilities * v.T * (x - x.T @ choice_probabilities)) @ self.individuals.weights
return jacobian
@njit
def _compute_choice_probabilities(delta: Vector, mu: Matrix) -> Matrix:
"""
Compute choice probabilities
Uses the the log-sum-exp trick, which is inspired from the pyblp code, translated to numba.
"""
# J x I array
utilities = np.expand_dims(delta, axis=1) + mu
# Loop is equivalent to np.clip(utilities.max(axis=0, keepdims=True), 0, None)
I, J = utilities.shape
utility_reduction = np.zeros(J)
for j in range(J):
for i in range(I):
if utilities[i, j] > utility_reduction[j]:
utility_reduction[j] = utilities[i, j]
utilities -= utility_reduction
exp_utilities = np.exp(utilities)
scale = np.exp(-utility_reduction)
return exp_utilities / (scale + np.sum(exp_utilities, axis=0))
@njit
def _compute_market_shares(delta: Vector, mu: Matrix, individual_weights: Vector) -> Vector:
choice_probabilities = _compute_choice_probabilities(delta, mu)
return choice_probabilities @ individual_weights # Integrate over agents to calculate the market share
| [
"hello@jamesatkins.net"
] | hello@jamesatkins.net |
2f8373e82d8f6770def3f2102776d72c8c6a834f | 6871c745f82121a26812502f320f0c21741fe42b | /config.py | b802d2b6f4a520a1bf1353db9ec84656c92ee378 | [] | no_license | ericdonnelly/Brick-House | 7fc8a8eadcfdf826963d98cc14825a4bd64b9966 | ad36a35954b37ed211200bb31f3abf402129d599 | refs/heads/main | 2023-02-12T08:50:11.220865 | 2021-01-06T02:35:39 | 2021-01-06T02:35:39 | 320,123,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | # GMAPS API Key
gkey = "AIzaSyA7syV4Z2LAYZ64QqKCcmUrGGN18-PmqX4" | [
"ericdonnelly601@gmail.com"
] | ericdonnelly601@gmail.com |
05ccf6e2d5d1a9e66261f6829dcff9f2468cbea3 | 124bdbf417117fe23168f043dd265f88b3bd6e70 | /lib/datasets/__init__.py | e62bcd2b434d9f62ee2b19a9875c4c64db1d00e6 | [] | no_license | tonyonifo/anytime | 943f56ebd4759f0f5181607d8030d50eabb8d38b | 86bba7a334fc65899da01b30d925437163c1dede | refs/heads/master | 2023-08-02T21:32:42.977184 | 2021-10-05T16:58:35 | 2021-10-05T16:58:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .cityscapes import Cityscapes as cityscapes | [
"email@email.com"
] | email@email.com |
d2c3e39fff7a8b828b1238645aaf46ce75152fe1 | d15b4a64e3ac1a488a3eae0e0cebe0f215247f09 | /build_image_data.py | 8bc1aa659af8f5805a924b99a7be4ff6f607a385 | [
"MIT"
] | permissive | AlexanderSoroka/CNN-oregon-wildlife-classifier | adbb3d67d2ff4eb46b5e2a9c1d9a0e66388f9e60 | a57bf84322f1fa153e44b6e4209beb09663f4d9e | refs/heads/main | 2023-03-26T09:24:52.989527 | 2021-03-08T16:25:26 | 2021-03-08T16:25:26 | 343,571,381 | 2 | 6 | MIT | 2021-03-09T07:53:41 | 2021-03-01T22:11:28 | Python | UTF-8 | Python | false | false | 12,210 | py | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module implements TFRecords creation based on filetree and train/val ratio as a lab example for BSU students.
The image data set is expected to reside in JPEG files located in the following directory structure:
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into a sharded data set consisting of TFRecord files
output_directory/train-00000-of-01024
...
output_directory/train-01023-of-01024
and
output_directory/validation-00000-of-00128
...
output_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set.
The labels file contains a list of valid labels where each line corresponds to a label.
We map each label contained in the file to an integer corresponding to the line number starting from 0.
Each record within the TFRecord file is a serialized
Example proto. The Example proto contains many fields, the most important are:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
flags.DEFINE_string('input', default=None, help='Data directory')
flags.DEFINE_string('output', default=None, help='Output directory')
flags.DEFINE_integer('shards', 10, 'Number of shards per split of TFRecord files.')
flags.DEFINE_integer('num_threads', 2, 'Number of threads to preprocess the images.')
flags.DEFINE_string('labels_file', 'labels', 'Labels file')
flags.DEFINE_integer('shuffle', default=10, help='How many times apply shuffle procedure')
FLAGS = flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
example = tf.train.Example(features=tf.train.Features(feature={
'image/label': _int64_feature(label),
'image/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
Returns:
image_buffer: string, JPEG encoding of RGB image.
"""
# Read the image file.
return image_data
def _process_image_files_batch(
thread_index, ranges, name, filenames, texts, labels, num_shards
):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output, output_filename)
writer = tf.io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
with tf.io.gfile.GFile(filename, 'rb') as f:
image_buffer = f.read()
except Exception as e:
print(e)
print('SKIPPED: Unexpected error while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label, text)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
threads = []
for thread_index in range(len(ranges)):
args = (thread_index, ranges, name, filenames, texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.io.gfile.GFile(labels_file, 'r').readlines()]
print(f'Unique labels: {unique_labels}')
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*.jpg' % (data_dir, text)
matching_files = tf.io.gfile.glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
label_index += 1
print('Found %d JPEG files across %d labels inside %s.' % (len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _shuffle(filenames, texts, labels):
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.shuffle(shuffled_index)
return [filenames[i] for i in shuffled_index], \
[texts[i] for i in shuffled_index], \
[labels[i] for i in shuffled_index]
def main(_):
assert FLAGS.input, ('Specify data root directory with --input flag')
assert FLAGS.output, ('Specify destination directory with --output flag')
assert not FLAGS.shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.shards')
print('Saving results to %s' % FLAGS.output)
if not os.path.exists(FLAGS.output):
os.makedirs(FLAGS.output)
# Get all files and split it to validation and training data
names, texts, labels = _find_image_files(os.path.join(FLAGS.input), FLAGS.labels_file)
for _ in range(FLAGS.shuffle):
names, texts, labels = _shuffle(names, texts, labels)
_process_image_files('train', names, texts, labels, FLAGS.shards)
print(f'Dataset size: {len(names)}')
if __name__ == '__main__':
app.run(main)
| [
"soroka.a.m@gmail.com"
] | soroka.a.m@gmail.com |
fa4a0e62bf2e2b590e0d76319c06be7d8fa5f3c2 | 8713f974dd88fddd0079315f8b420794953ce91d | /src/utils/crypto_key.py | b6042654c76f9dd954c56dfa99f44d030bb768e5 | [
"Apache-2.0"
] | permissive | dhxie/cosc-learning-labs | 99ec8f190f4d25e6ac8134e807d609d7731b68d8 | 1ae30212e679012be4ac9e72dce7808777e69f9c | refs/heads/master | 2021-01-18T17:16:55.942687 | 2015-07-17T02:42:41 | 2015-07-17T02:42:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,007 | py | #!/usr/bin/env python
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# This code uses https://github.com/pexpect/pexpect. The pexpect licence is below:
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
This script will use telnet to login to a Cisco XRv device and set the cryptographic keys
to support SSH connectivity. This version of the script uses the arguments encoded in the
script itself.
To test for whether the crypto key is set, and whether the netconf agent is running, we can
use this command:
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 830 cisco@172.16.1.11 -s netconf
Guidance from: See http://linuxcommando.blogspot.com/2008/10/how-to-disable-ssh-host-key-checking.html
By configuring the null device file as the host key database, SSH is fooled into thinking that the
SSH client has never connected to any SSH server before, and so will never run into a mismatched host key.
The parameter StrictHostKeyChecking specifies if SSH will automatically add new host keys to the
host key database file. By setting it to no, the host key is automatically added, without user
confirmation, for all first-time connection. Because of the null key database file, all
connection is viewed as the first-time for any SSH server host. Therefore, the host key is
automatically added to the host key database with no user confirmation. Writing the key to
the /dev/null file discards the key and reports success.
'''
from __future__ import print_function
from __future__ import absolute_import
import pexpect
import sys
def add_crypto_key (devices=[], username = 'cisco', password = 'cisco'):
if len(devices) != 0:
network_devices = devices
else:
network_devices = ['172.16.1.11']
for network_device in network_devices:
telnet_command = "telnet %s" % network_device
child = pexpect.spawn (telnet_command)
child.logfile = sys.stdout
child.expect ('Username:')
child.sendline (username)
child.expect ('Password:')
child.sendline (password)
child.expect ('#')
child.sendline ('crypto key generate dsa')
index = child.expect (['[yes/no]','1024]'])
if index == 0:
child.sendline ('yes')
child.expect ('1024]')
child.sendline ('')
child.sendline ('')
elif index == 1:
child.sendline ('')
child.sendline ('')
if __name__ == '__main__':
add_crypto_key ()
| [
"anamatute@hotmail.com"
] | anamatute@hotmail.com |
4c72cb0146c753049d6f053bd18d47b06596e152 | 2aad3dc790f288c9ff178b2fb1bd7f86fbc4a47e | /cnbolgs/users/serializers.py | 1994c05602a1b24b8cc84665508834d3e132f1c9 | [] | no_license | Daisy-Yjy/blog | 06e0a616e16736cd3301d295179245afe5530df0 | e107221ebdcd90c2bd93fd3f4ea6ce0e647652b2 | refs/heads/main | 2023-04-06T20:22:34.389756 | 2021-04-17T02:45:43 | 2021-04-17T02:45:43 | 355,381,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,274 | py | import re
from django_redis import get_redis_connection
from rest_framework import serializers
from rest_framework_jwt.settings import api_settings
from itsdangerous import TimedJSONWebSignatureSerializer as TJWSSerializer, BadData
from .models import User, GithubUser
from cnbolgs import settings
class RegisterViewSerializer(serializers.ModelSerializer):
"""用户注册序列化器"""
password2 = serializers.CharField(label='确认密码', write_only=True)
email_code = serializers.CharField(label='邮箱验证码', write_only=True)
sms_code = serializers.CharField(label='短信验证码', write_only=True)
allow = serializers.CharField(label='同意协议', write_only=True)
token = serializers.CharField(label='token', read_only=True)
class Meta:
model = User
fields = ['id', 'username', 'mobile', 'password', 'password2', 'sms_code', 'allow', 'token', 'email_code', 'email']
extra_kwargs = {
'username': {
'min_length': 5,
'max_length': 20,
'error_messages': {
'min_length': '仅允许5-20字符的用户名',
'max_length': '仅允许5-20字符的用户名'
}
},
'password': {
'write_only': True,
'min_length': 8,
'max_length': 20,
'error_messages': {
'min_length': '仅允许8-20字符的密码',
'max_length': '仅允许8-20字符的密码'
}
},
}
def validate_mobile(self, value):
"""单独校验手机号"""
if not re.match(r'1[3-9]\d{9}$', value):
raise serializers.ValidationError('手机号格式错误')
return value
def validate_allow(self, value):
"""单独校验是否通过协议"""
if value != '1':
raise serializers.ValidationError('请同意用户协议')
return value
def validate(self, attrs):
""""校验密码、验证码"""
if attrs['password2'] != attrs['password']:
raise serializers.ValidationError('两个密码不一致')
redis_conn_sms = get_redis_connection('sms_codes')
real_sms_code = redis_conn_sms.get('sms_%s' % attrs['mobile'])
if real_sms_code is None or attrs['sms_code'] != real_sms_code.decode():
raise serializers.ValidationError('短信验证码错误')
redis_conn_email = get_redis_connection('email_codes')
real_email_code = redis_conn_email.get('email_%s' % attrs['email'])
if real_email_code is None or attrs['email_code'] != real_email_code.decode():
raise serializers.ValidationError('邮箱验证码错误')
return attrs
def create(self, validated_data):
del validated_data['password2']
del validated_data['allow']
del validated_data['sms_code']
del validated_data['email_code']
password = validated_data.pop('password')
user = User.objects.create(**validated_data)
user.set_password(password)
user.save()
# 生成token
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER # 引用jwt_payload_handler函数(生成payload)
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER # 生成jwt
payload = jwt_payload_handler(user) # 根据user生成用户相关的载荷
token = jwt_encode_handler(payload) # 传入载荷生成完整的jwt
user.token = token
return user
class LoginViewSerializer(serializers.ModelSerializer):
"""用户登录序列化器"""
sms_code = serializers.CharField(label='验证码', write_only=True)
token = serializers.CharField(label='token', read_only=True)
class Meta:
model = User
fields = ['username', 'mobile', 'password', 'sms_code', 'token']
class GithubUserViewSerializer(serializers.Serializer):
"""openid绑定序列化器"""
openid_token = serializers.CharField(label='openid')
email = serializers.CharField(label='邮箱')
mobile = serializers.CharField(label='手机号')
password = serializers.CharField(label='密码', max_length=20, min_length=8)
sms_code = serializers.CharField(label='短信验证码')
email_code = serializers.CharField(label='邮箱验证码')
def validate_mobile(self, value):
"""单独校验手机号"""
if not re.match(r'1[3-9]\d{9}$', value):
raise serializers.ValidationError('手机号格式错误')
return value
def validate_email(self, value):
"""单独验证邮箱"""
if not re.match(r'[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}$', value):
raise serializers.ValidationError('邮箱格式错误')
return value
def validate(self, attrs):
# 解密openid
openid_token = attrs.pop('openid_token')
serializer = TJWSSerializer(settings.SECRET_KEY, 600)
try:
data = serializer.loads(openid_token)
openid = data.get('openid')
except BadData:
openid = None
if openid is None:
raise serializers.ValidationError('openid无效')
# openid 加入到字典中
attrs['openid'] = openid
mobile = attrs['mobile']
sms_code = attrs['sms_code']
email_code = attrs['email_code']
redis_conn_sms = get_redis_connection('sms_codes')
real_sms_code = redis_conn_sms.get('sms_%s' % attrs['mobile'])
if real_sms_code is None or sms_code != real_sms_code.decode():
raise serializers.ValidationError('短信验证码错误')
redis_conn_email = get_redis_connection('email_codes')
real_email_code = redis_conn_email.get('email_%s' % attrs['email'])
if real_email_code is None or email_code != real_email_code.decode():
raise serializers.ValidationError('邮箱验证码错误')
try:
user = User.objects.get(mobile=mobile)
except User.DoesNotExist:
pass
else:
attrs['user'] = user
return attrs
def create(self, validated_data):
user = validated_data.get('user')
if user is None: # 如果没有用户创建一个新用户
mobile = validated_data.get('mobile')
password = validated_data.get('password')
email = validated_data.get('email')
user = User.objects.create(username=mobile, mobile=mobile, email=email)
user.set_password(password)
user.save()
# openid与用户绑定
openid = validated_data.get('openid')
GithubUser.objects.create(openid=openid, user=user)
return user
class ForgetPasswordSerializer(serializers.Serializer):
"""忘记密码序列化器"""
email = serializers.CharField(label='邮箱')
email_code = serializers.CharField(label='邮箱验证码')
password = serializers.CharField(label='输入密码', write_only=True)
password2 = serializers.CharField(label='确认密码', write_only=True)
def validate_email(self, value):
"""单独验证邮箱"""
if not re.match(r'[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}$', value):
raise serializers.ValidationError('邮箱格式错误')
return value
def validate(self, attrs):
""""校验密码、邮箱验证码"""
if attrs['password2'] != attrs['password']:
raise serializers.ValidationError('两个密码不一致')
redis_conn_email = get_redis_connection('email_codes')
real_email_code = redis_conn_email.get('email_%s' % attrs['email'])
if real_email_code is None or attrs['email_code'] != real_email_code.decode():
raise serializers.ValidationError('邮箱验证码错误')
return attrs
def create(self, validated_data):
del validated_data['password2']
del validated_data['email_code']
email = validated_data.get('email')
password = validated_data.get('password')
user = User.objects.get(email=email)
user.set_password(password)
user.save()
return user
| [
"1971141290@qq.com"
] | 1971141290@qq.com |
e899dce720bec0bd2b1c3742bee19db16187e09a | b9121dcc859563d1e148ab539a47e471f467ede2 | /cvfyblogproj/blog/models.py | e4450dddf449f17ff8e3466dfa8b0818ff5a941d | [] | no_license | VinitK/blog-application | 2a12ce97b794f23a8c6e9a3b810c2756029453e2 | e9cc19ddb9e98f5515ae557fe2b345e907bb9d2a | refs/heads/master | 2020-04-28T01:14:09.706591 | 2019-03-11T03:33:47 | 2019-03-11T03:33:47 | 174,847,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | from django.db import models
from django.utils import timezone
from django.urls import reverse
# Create your models here.
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=256)
text = models.TextField()
create_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def approve_comments(self):
return self.comments.filter(approved_comment=True)
def get_absolute_url(self):
return reverse("post_detail", kwargs={'pk': self.pk})
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey('blog.Post', related_name='comments', on_delete=models.CASCADE)
author = models.CharField(max_length=256)
text = models.TextField()
create_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def get_absolute_url(self):
return reverse('post_list')
def __str__(self):
return self.text
| [
"vinit@cutshort.io"
] | vinit@cutshort.io |
483a613632f81bdf62554edbb2080815498078b6 | 750db6d8fb836a361e7b0d1060e52efd2f2c4c36 | /inference_app/migrations/0016_auto_20200330_1705.py | c58edaa13aa88d37ce24c41cd9d8986030b2893a | [] | no_license | dragosavac/vehicle_detection_app | b1aa2df97b78c4a3b63a1863d1d275e2881ac838 | 394b64161ec40f3389c46839311489b85b3d87bd | refs/heads/master | 2023-01-24T22:47:59.900112 | 2020-11-28T12:46:52 | 2020-11-28T12:46:52 | 251,379,121 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # Generated by Django 2.2.11 on 2020-03-30 17:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('inference_app', '0015_auto_20200330_1626'),
]
operations = [
migrations.RemoveField(
model_name='inferenceinstance',
name='calculation_model',
),
migrations.DeleteModel(
name='InferenceModel',
),
]
| [
"rade.dragosavac@symphony.is"
] | rade.dragosavac@symphony.is |
c73ca1e3ae1851e55b801e24ff6219b8ff872295 | 59eccb126e4efd0f39bab0b00683a9fbdd9b5e69 | /tests/test_core.py | a1a01024c77cd89be8debe19e4246d704d0f3971 | [
"BSD-3-Clause"
] | permissive | Deepomatic/channels_redis | c694ca7ab711937e2c3c245c5f817cdf06549640 | 54d935beb6842333ba4f3e6c29cbe86c4307cf16 | refs/heads/master | 2020-03-09T20:07:54.566703 | 2018-04-03T16:05:44 | 2018-04-03T16:05:44 | 128,975,901 | 0 | 0 | null | 2018-04-10T18:08:14 | 2018-04-10T18:08:13 | null | UTF-8 | Python | false | false | 9,624 | py | import asyncio
import async_timeout
import pytest
from async_generator import async_generator, yield_
from asgiref.sync import async_to_sync
from channels_redis.core import ChannelFull, RedisChannelLayer
TEST_HOSTS = [("localhost", 6379)]
MULTIPLE_TEST_HOSTS = [
"redis://localhost:6379/0",
"redis://localhost:6379/1",
"redis://localhost:6379/2",
"redis://localhost:6379/3",
"redis://localhost:6379/4",
"redis://localhost:6379/5",
"redis://localhost:6379/6",
"redis://localhost:6379/7",
"redis://localhost:6379/8",
"redis://localhost:6379/9",
]
@pytest.fixture()
@async_generator
async def channel_layer():
"""
Channel layer fixture that flushes automatically.
"""
channel_layer = RedisChannelLayer(hosts=TEST_HOSTS, capacity=3)
await yield_(channel_layer)
await channel_layer.flush()
@pytest.fixture()
@async_generator
async def channel_layer_multiple_hosts():
"""
Channel layer fixture that flushes automatically.
"""
channel_layer = RedisChannelLayer(hosts=MULTIPLE_TEST_HOSTS, capacity=3)
await yield_(channel_layer)
await channel_layer.flush()
@pytest.mark.asyncio
async def test_send_receive(channel_layer):
"""
Makes sure we can send a message to a normal channel then receive it.
"""
await channel_layer.send(
"test-channel-1",
{
"type": "test.message",
"text": "Ahoy-hoy!",
},
)
message = await channel_layer.receive("test-channel-1")
assert message["type"] == "test.message"
assert message["text"] == "Ahoy-hoy!"
@pytest.mark.parametrize("channel_layer", [None]) # Fixture can't handle sync
def test_double_receive(channel_layer):
"""
Makes sure we can receive from two different event loops using
process-local channel names.
"""
channel_layer = RedisChannelLayer(hosts=TEST_HOSTS, capacity=3)
channel_name_1 = async_to_sync(channel_layer.new_channel)()
channel_name_2 = async_to_sync(channel_layer.new_channel)()
async_to_sync(channel_layer.send)(channel_name_1, {"type": "test.message.1"})
async_to_sync(channel_layer.send)(channel_name_2, {"type": "test.message.2"})
# Make things to listen on the loops
async def listen1():
message = await channel_layer.receive(channel_name_1)
assert message["type"] == "test.message.1"
async def listen2():
message = await channel_layer.receive(channel_name_2)
assert message["type"] == "test.message.2"
# Run them inside threads
async_to_sync(listen2)()
async_to_sync(listen1)()
# Clean up
async_to_sync(channel_layer.flush)()
@pytest.mark.asyncio
async def test_send_capacity(channel_layer):
"""
Makes sure we get ChannelFull when we hit the send capacity
"""
await channel_layer.send("test-channel-1", {"type": "test.message"})
await channel_layer.send("test-channel-1", {"type": "test.message"})
await channel_layer.send("test-channel-1", {"type": "test.message"})
with pytest.raises(ChannelFull):
await channel_layer.send("test-channel-1", {"type": "test.message"})
@pytest.mark.asyncio
async def test_send_specific_capacity(channel_layer):
"""
Makes sure we get ChannelFull when we hit the send capacity on a specific channel
"""
custom_channel_layer = RedisChannelLayer(hosts=TEST_HOSTS, capacity=3, channel_capacity={"one": 1})
await custom_channel_layer.send("one", {"type": "test.message"})
with pytest.raises(ChannelFull):
await custom_channel_layer.send("one", {"type": "test.message"})
await custom_channel_layer.flush()
@pytest.mark.asyncio
async def test_process_local_send_receive(channel_layer):
"""
Makes sure we can send a message to a process-local channel then receive it.
"""
channel_name = await channel_layer.new_channel()
await channel_layer.send(
channel_name,
{
"type": "test.message",
"text": "Local only please",
},
)
message = await channel_layer.receive(channel_name)
assert message["type"] == "test.message"
assert message["text"] == "Local only please"
@pytest.mark.asyncio
async def test_multi_send_receive(channel_layer):
"""
Tests overlapping sends and receives, and ordering.
"""
channel_layer = RedisChannelLayer(hosts=TEST_HOSTS)
await channel_layer.send("test-channel-3", {"type": "message.1"})
await channel_layer.send("test-channel-3", {"type": "message.2"})
await channel_layer.send("test-channel-3", {"type": "message.3"})
assert (await channel_layer.receive("test-channel-3"))["type"] == "message.1"
assert (await channel_layer.receive("test-channel-3"))["type"] == "message.2"
assert (await channel_layer.receive("test-channel-3"))["type"] == "message.3"
@pytest.mark.asyncio
async def test_reject_bad_channel(channel_layer):
"""
Makes sure sending/receiving on an invalic channel name fails.
"""
with pytest.raises(TypeError):
await channel_layer.send("=+135!", {"type": "foom"})
with pytest.raises(TypeError):
await channel_layer.receive("=+135!")
@pytest.mark.asyncio
async def test_reject_bad_client_prefix(channel_layer):
"""
Makes sure receiving on a non-prefixed local channel is not allowed.
"""
with pytest.raises(AssertionError):
await channel_layer.receive("not-client-prefix!local_part")
@pytest.mark.asyncio
async def test_groups_basic(channel_layer):
"""
Tests basic group operation.
"""
channel_layer = RedisChannelLayer(hosts=TEST_HOSTS)
channel_name1 = await channel_layer.new_channel(prefix="test-gr-chan-1")
channel_name2 = await channel_layer.new_channel(prefix="test-gr-chan-2")
channel_name3 = await channel_layer.new_channel(prefix="test-gr-chan-3")
await channel_layer.group_add("test-group", channel_name1)
await channel_layer.group_add("test-group", channel_name2)
await channel_layer.group_add("test-group", channel_name3)
await channel_layer.group_discard("test-group", channel_name2)
await channel_layer.group_send("test-group", {"type": "message.1"})
# Make sure we get the message on the two channels that were in
async with async_timeout.timeout(1):
assert (await channel_layer.receive(channel_name1))["type"] == "message.1"
assert (await channel_layer.receive(channel_name3))["type"] == "message.1"
# Make sure the removed channel did not get the message
with pytest.raises(asyncio.TimeoutError):
async with async_timeout.timeout(1):
await channel_layer.receive(channel_name2)
@pytest.mark.asyncio
async def test_groups_channel_full(channel_layer):
"""
Tests that group_send ignores ChannelFull
"""
channel_layer = RedisChannelLayer(hosts=TEST_HOSTS)
await channel_layer.group_add("test-group", "test-gr-chan-1")
await channel_layer.group_send("test-group", {"type": "message.1"})
await channel_layer.group_send("test-group", {"type": "message.1"})
await channel_layer.group_send("test-group", {"type": "message.1"})
await channel_layer.group_send("test-group", {"type": "message.1"})
await channel_layer.group_send("test-group", {"type": "message.1"})
@pytest.mark.asyncio
async def test_groups_multiple_hosts(channel_layer_multiple_hosts):
"""
Tests advanced group operation with multiple hosts.
"""
channel_layer = RedisChannelLayer(hosts=MULTIPLE_TEST_HOSTS, capacity=100)
channel_name1 = await channel_layer.new_channel(prefix="channel1")
channel_name2 = await channel_layer.new_channel(prefix="channel2")
channel_name3 = await channel_layer.new_channel(prefix="channel3")
await channel_layer.group_add("test-group", channel_name1)
await channel_layer.group_add("test-group", channel_name2)
await channel_layer.group_add("test-group", channel_name3)
await channel_layer.group_discard("test-group", channel_name2)
await channel_layer.group_send("test-group", {"type": "message.1"})
await channel_layer.group_send("test-group", {"type": "message.1"})
# Make sure we get the message on the two channels that were in
async with async_timeout.timeout(1):
assert (await channel_layer.receive(channel_name1))["type"] == "message.1"
assert (await channel_layer.receive(channel_name3))["type"] == "message.1"
with pytest.raises(asyncio.TimeoutError):
async with async_timeout.timeout(1):
await channel_layer.receive(channel_name2)
@pytest.mark.parametrize("num_channels,timeout", [
(1, 1), # Edge cases - make sure we can send to a single channel
(10, 1),
(100, 10),
])
@pytest.mark.asyncio
async def test_groups_multiple_hosts_performance(
channel_layer_multiple_hosts, num_channels, timeout
):
"""
Tests advanced group operation: can send efficiently to multiple channels
with multiple hosts within a certain timeout
"""
channel_layer = RedisChannelLayer(hosts=MULTIPLE_TEST_HOSTS, capacity=100)
channels = []
for i in range(0, num_channels):
channel = await channel_layer.new_channel(prefix="channel%s" % i)
await channel_layer.group_add("test-group", channel)
channels.append(channel)
async with async_timeout.timeout(timeout):
await channel_layer.group_send("test-group", {"type": "message.1"})
# Make sure we get the message all the channels
async with async_timeout.timeout(timeout):
for channel in channels:
assert (await channel_layer.receive(channel))["type"] == "message.1"
| [
"andrew@aeracode.org"
] | andrew@aeracode.org |
0a2f8a83695f5c8c2c2ea926b7914ee08ee6cbdd | 50ab563ae9d4506048a54b99f4f80b5b276c8f3b | /2pointer/16_threeSumClosest.py | 8f4963a35261a37b05078f03b3f5a9a533381172 | [] | no_license | chenpengcode/Leetcode | b403d05bd67b6661d1595136922f77bcb23947cf | 4f92911896c8b92c51650413b998e0bb1edfa4c0 | refs/heads/master | 2021-04-08T13:17:41.482766 | 2020-11-26T17:21:23 | 2020-11-26T17:21:23 | 248,779,182 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | from typing import List
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
ans = float('inf')
for i in range(len(nums) - 1):
left, right = i + 1, len(nums) - 1
while left < right:
three_sum = nums[i] + nums[left] + nums[right]
if abs(ans - target) > abs(three_sum - target):
ans = three_sum
if three_sum > target:
right -= 1
elif three_sum < target:
left += 1
else:
return target
return ans
if __name__ == '__main__':
nums = [1, 1, 1, 0]
target = -100
solution = Solution()
print(solution.threeSumClosest(nums, target))
| [
"cpcoder@aliyun.com"
] | cpcoder@aliyun.com |
32ae94af39dd74b8af3e10ed20e6aa9da09144eb | c70804bf9679944e8718438d86b08c5e296545bc | /extractFotNote.py | 0955b31396b1087c83fef51b5357a85bf2071c36 | [] | no_license | Kamel773/Delve_-Search_Engine- | f74c5b47f896854f3d13bf1a6ab66a8a40bfcfdd | aa0abde34546170a9a9186f895e087d1b60cb614 | refs/heads/master | 2020-05-18T01:08:09.226478 | 2019-09-03T08:09:16 | 2019-09-03T08:09:16 | 184,081,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,676 | py | from bs4 import BeautifulSoup
import re
import nltk.data
import os
import glob
import pickle
import csv
import glob, multiprocessing
from threading import Lock, Thread
import time
'''
"Beautiful code must be short", Adam Kolawa.
'''
pickleFile = open("/home/rashedka/Desktop/FinalExtracting/DelveXML/listXMLfile_Delve_XML.p",'rb')
fileAlreadyExtracted = pickle.load(pickleFile)
alphabets= "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|et al.|\sWherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
def split_into_sentences(text):
text = " " + str(text) + " "
text = text.replace("\n"," ")
for checkingHttp in text.split():
if 'http' in checkingHttp:
text = text.replace(checkingHttp,checkingHttp.replace(".","<prd>"))
#if checkingHttp[-5:].endswith('<prd>'):
#print(checkingHttp)
#text = text.replace(checkingHttp,checkingHttp.replace("<prd>",".")) + '.'
if bool(re.match('^[0-9.%]+%', checkingHttp)):
text = text.replace(checkingHttp,checkingHttp.replace(".","<prd>"))
#if bool(re.match('^[0-9.]', checkingHttp)):
#print(checkingHttp)
text = text.replace("et al.","et al")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + alphabets + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + alphabets + "[.]"," \\1<prd>",text)
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
#text = text.replace("?","?<stop>")
#text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
#// Reading the XML file
outputCSV = open('extractingFootNoteSentence.csv', mode='w')
outputCSV_writer = csv.writer(outputCSV, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
listXMLfile = []
count = 0
path = '/home/rashedka/Desktop/Delve_XML/'
for filename in glob.glob(os.path.join(path,'*.xml')):
#Extracting the file name
start = '/home/rashedka/Desktop/Delve_XML/'
end = '.tei.xml'
filenameXML = (filename.split(start))[1].split(end)[0]
if filenameXML in fileAlreadyExtracted:
#print(filenameXML)
continue
#print(filenameXML)
xmlfile = open(filename,'r')
contents = xmlfile.read()
t2_soup = None
count = count + 1
dic = {}
soup = BeautifulSoup(contents, "html5lib")
data = ''
for note in soup.find_all('note'):
try:
if note.attrs['place'] != 'foot':
continue
except:
continue
try:
if (len(note.get_text().split())) == 1:
footNote = note.attrs['n'] + ' ' + note.get_text()
dic[note.attrs['n']] = note.get_text()
if (len(note.get_text().split())) > 1:
footNote = note.attrs['n'] + ' ' + note.get_text()
for word in footNote.split():
#if word[:5] == 'https':
if 'htt' in word:
data = data + word + ' '
if word.isdigit():
data = data + word + ' '
for multi in data.split():
if multi.isdigit():
key = multi
continue
dic[key] = multi
listXMLfile.append(filenameXML)
outputCSV_writer.writerow([filenameXML, dic[key]])
print(count)
except:
continue
#print(dic)
'''
#// extracting the sentance
cleanSentence = None
sentences = split_into_sentences(contents)
for sentence in sentences:
for key in dic:
query = '<ref type="bibr" target="#b0">' + key +'</ref>'
if query in sentence:
print('---')
cleanSentence = BeautifulSoup(sentence, "html5lib")
print(cleanSentence.get_text())
cleanSentence = cleanSentence.get_text()
print(dic[key])
print(count)
print(dic)
listXMLfile.append(filenameXML)
outputCSV_writer.writerow([filenameXML, dic[key], cleanSentence])
'''
#print(listXMLfile)
pickle.dump(listXMLfile, open( "listXMLfile.p", "wb" ) )
'''
from bs4 import BeautifulSoup
import re
import nltk.data
import os
import glob
import pickle
import csv
alphabets= "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|et al.|\sWherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
def split_into_sentences(text):
text = " " + str(text) + " "
text = text.replace("\n"," ")
for checkingHttp in text.split():
if 'http' in checkingHttp:
text = text.replace(checkingHttp,checkingHttp.replace(".","<prd>"))
#if checkingHttp[-5:].endswith('<prd>'):
#print(checkingHttp)
#text = text.replace(checkingHttp,checkingHttp.replace("<prd>",".")) + '.'
if bool(re.match('^[0-9.%]+%', checkingHttp)):
text = text.replace(checkingHttp,checkingHttp.replace(".","<prd>"))
#if bool(re.match('^[0-9.]', checkingHttp)):
#print(checkingHttp)
text = text.replace("et al.","et al")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + alphabets + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + alphabets + "[.]"," \\1<prd>",text)
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
#text = text.replace("?","?<stop>")
#text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
#// Reading the XML file
outputCSV = open('extractingFootNoteSentence.csv', mode='w')
outputCSV_writer = csv.writer(outputCSV, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
listXMLfile = []
count = 0
path = '/home/rashedka/Desktop/Delve_XML/'
for filename in glob.glob(os.path.join(path,'*.xml')):
#Extracting the file name
start = '/home/rashedka/Desktop/Delve_XML/'
end = '.tei.xml'
filenameXML = (filename.split(start))[1].split(end)[0]
#print(filenameXML)
xmlfile = open(filename,'r')
contents = xmlfile.read()
t2_soup = None
count = count + 1
dic = {}
soup = BeautifulSoup(contents, "html5lib")
data = ''
for note in soup.find_all('note'):
try:
if note.attrs['place'] != 'foot':
continue
except:
continue
try:
if (len(note.get_text().split())) == 1:
footNote = note.attrs['n'] + ' ' + note.get_text()
dic[note.attrs['n']] = note.get_text()
if (len(note.get_text().split())) > 1:
footNote = note.attrs['n'] + ' ' + note.get_text()
for word in footNote.split():
#if word[:5] == 'https':
if 'htt' in word:
data = data + word + ' '
if word.isdigit():
data = data + word + ' '
for multi in data.split():
if multi.isdigit():
key = multi
continue
dic[key] = multi
except:
continue
#print(dic)
#// extracting the sentance
cleanSentence = None
sentences = split_into_sentences(contents)
for sentence in sentences:
for key in dic:
query = '<ref type="bibr" target="#b0">' + key +'</ref>'
if query in sentence:
print('---')
cleanSentence = BeautifulSoup(sentence, "html5lib")
print(cleanSentence.get_text())
cleanSentence = cleanSentence.get_text()
print(dic[key])
print(count)
print(dic)
listXMLfile.append(filenameXML)
outputCSV_writer.writerow([filenameXML, dic[key], cleanSentence])
print(listXMLfile)
pickle.dump(listXMLfile, open( "listXMLfile.p", "wb" ) )
''' | [
"noreply@github.com"
] | noreply@github.com |
98e69f23ae346f2016310590e40cc654fdf7d26b | 90072552491d0eab6c611286d55a1f5155bdcf29 | /config.py | 42ebf335b0adf231867b97b956dedce3e792dbec | [] | no_license | Xerber/stalker | 8d8287e1296922d61404866f0f7476dd57f0fbed | d097792aa3b4bca4f3a869660e61bcc9e7b05423 | refs/heads/master | 2021-01-04T18:01:34.489084 | 2020-02-15T11:51:56 | 2020-02-15T11:51:56 | 240,700,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | host=''
user=''
password=''
home='/home/snowman/films_script/'
| [
"cherobuk.91@gmail.com"
] | cherobuk.91@gmail.com |
80e9e4574ed8693b8553db2e3c96d497209dcf92 | 80b54d0762516cffa9b9c86086a337c004bb4b18 | /main.py | e39ccacf333f0f6e24c2253869e91588e9e07bda | [
"MIT"
] | permissive | Lol3rrr/TicTacToe-AI-NN | d3d3629ba3252ce85473ce4231066764101f45bf | 4cfe30531233e7ffb4411923876244a32fda23cb | refs/heads/master | 2020-05-26T00:56:51.709771 | 2019-05-25T21:33:11 | 2019-05-25T21:33:11 | 188,056,704 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from gameNN import Game
from gameQ import Game
from qLearning import QTable
if __name__ == "__main__":
#runningGame = Game(3)
runningGame = Game(3)
runningGame.run() | [
"34582309+Lol3rrr@users.noreply.github.com"
] | 34582309+Lol3rrr@users.noreply.github.com |
277f5c2929da8f67793af05b2f29f8923a5f57b6 | d1a6270f3752dee9c7cc5e9ab490d2dc1ec264ba | /Code/Preprocessing/utils.py | a1dbb1df4ce086d2d707928e98b4c6ee63f6a021 | [] | no_license | Jasonti0120/Bitcoin-NLP-Project | bc1d567da43907e687920cfcc676080f605fe079 | 17c4beb2bf53b70ce9dc8ef0d3d8863a6230657e | refs/heads/main | 2023-08-21T11:46:10.305022 | 2021-10-14T15:49:36 | 2021-10-14T15:49:36 | 387,865,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,263 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 20 13:38:06 2021
@author: jasonti
"""
# sentiment score
def s_score(df,target,output):
"""
Parameters
----------
df : target dataframe.
target : A string: column name for text
output : A string: column name for sentiment score
Returns
-------
None.
"""
score = []
n=0
for text in df[target]:
n+=1
if n %1000 ==0:
print(n)
score.append(sentiment_scores(text)["compound"])
df[output] = score
def sentiment_scores(sentence):
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# Create a SentimentIntensityAnalyzer object.
sid_obj = SentimentIntensityAnalyzer()
# polarity_scores method of SentimentIntensityAnalyzer
# object gives a sentiment dictionary.
# which contains pos, neg, neu, and compound scores.
sentiment_dict = sid_obj.polarity_scores(sentence)
return sentiment_dict
def merge_and_csv(var1, var2, temp, path, name, out_path):
import pandas as pd
m = pd.merge(left=var1, right=var2, on=temp)
write_pickle(path, name+"pkl", m)
m.to_csv(out_path+"/"+name+".csv")
return m
#Vector count & td-idf
def create_tf_idf(df_in, min_n, max_n):
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
my_tf_idf = TfidfVectorizer(ngram_range=(min_n, max_n))
my_tf_idf_text = pd.DataFrame(my_tf_idf.fit_transform(df_in).toarray())
my_tf_idf_text.columns = my_tf_idf.get_feature_names()
return my_tf_idf_text
def create_vec(df_in, min_n, max_n):
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
my_vec = CountVectorizer(ngram_range=(min_n, max_n))
my_vec_text = pd.DataFrame(my_vec.fit_transform(df_in).toarray())
my_vec_text.columns = my_vec.get_feature_names()
return my_vec_text
#cleeaning the text
def dictionary_check(var_in):
import enchant
d = enchant.Dict("en_US")
tmp = var_in.split()
tmp = [word for word in tmp if d.check(word)]
tmp = ' '.join(tmp)
return tmp
def rem_sw(var_in):
from nltk.corpus import stopwords
sw = stopwords.words('english')
clean_text = [word for word in var_in.split() if word not in sw]
clean_text = ' '.join(clean_text)
return clean_text
def unique_words(var_in):
tmp = len(set(var_in.split()))
return tmp
#write and open pickle
def open_pickle(path_in, file_name):
import pickle
tmp = pickle.load(open(path_in + file_name, "rb"))
return tmp
def write_pickle(path_in, file_name, var_in):
import pickle
pickle.dump(var_in, open(path_in + file_name, "wb"))
def find_row(rowname, head):
for i in head:
if i == rowname:
return True
return False
#clean the specific row in a file
def seek_and_clean(path_in,filename, rowname, columnname):
import pandas as pd
df = pd.read_csv(path_in + filename)
if find_row(rowname, df.head()):
df[columnname] = df[rowname].apply(clean_text)
return df
#Clean the text
def clean_text(var_in):
import re
tmp = re.sub("[^A-z]+", " ", var_in.lower())
return tmp
| [
"cti@drew.edu"
] | cti@drew.edu |
82dd4f49df2007a396a712276cb962ee9c2f9c29 | f62b604587b78733c42e49145d2b79ea06d23374 | /src/main/resources/versionone/getStories.py | 979803ba897e6147b72c44fe8f93f84cecb8f613 | [] | no_license | zvercodebender/xlr-versionone-plugin | e7b41ebee6445325c07be978580d7086825d8873 | 5ca798167fe83f17663ac47c40bd577ea515d47c | refs/heads/master | 2021-01-21T10:45:49.923887 | 2017-10-25T19:14:32 | 2017-10-25T19:14:32 | 83,480,048 | 0 | 0 | null | 2017-02-28T21:17:01 | 2017-02-28T21:17:01 | null | UTF-8 | Python | false | false | 1,064 | py | #
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS
# FOR A PARTICULAR PURPOSE. THIS CODE AND INFORMATION ARE NOT SUPPORTED BY XEBIALABS.
#
import sys, traceback
from versionone.VersionOneClient import VersionOneClient
try:
v1Client = VersionOneClient.create_v1Client( versionOneServer )
results = v1Client.getStories( whereClause )
assets = results['Assets']
data = {}
for asset in assets:
print( asset )
print( asset['Attributes']['Name'] )
assetName = asset['Attributes']['Name']['value']
print( asset['Attributes']['Number'] )
assetNumber = asset['Attributes']['Number']['value']
print( asset['Attributes']['Status.Name'] )
assetStatus = asset['Attributes']['Status.Name']['value']
data[assetNumber] = "%s | %s " % (assetName, assetStatus )
# End for
print( data )
except :
traceback.print_exc(file=sys.stdout)
sys.exit(1)
# End try | [
"rbroker@RainSong.local"
] | rbroker@RainSong.local |
6aa5472a81eb982be6841fecca028c9450d0bc71 | 64653a5a2a64cd0a18643ea3c537dd21c3122167 | /ohmyeye/urls.py | 5b9c0c4129965443f6f886b43d7f8ad8bd26d616 | [] | no_license | baidoosik/ohmyeye | 52662341701c3905efe5c7cf329fbe6e400022de | 1619f981f9f4e4e84b4577df28e769e9340ba06a | refs/heads/master | 2021-04-27T02:47:45.497702 | 2018-02-24T22:25:37 | 2018-02-24T22:25:37 | 122,702,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | """ohmyeye URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^accounts/', include('allauth.urls')),
url(r'^admin/', admin.site.urls),
url(r'^', include('home.urls', namespace='home')),
url(r'^accounts/', include('accounts.urls', namespace='accounts'))
]
| [
"qoentlr37@naver.com"
] | qoentlr37@naver.com |
192513b2ebb9f2f9c07d84b4cdb0e2c0f10f8099 | 2db7597686f33a0d700f7082e15fa41f830a45f0 | /Python/coding/longestPalindromicSubstring.py | 5b5cc4e5f9239e189fda40d29fb79084b668ae13 | [] | no_license | Leahxuliu/Data-Structure-And-Algorithm | 04e0fc80cd3bb742348fd521a62bc2126879a70e | 56047a5058c6a20b356ab20e52eacb425ad45762 | refs/heads/master | 2021-07-12T23:54:17.785533 | 2021-05-17T02:04:41 | 2021-05-17T02:04:41 | 246,514,421 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | '''
5. Longest Palindromic Substring
注意题目的return是什么
'''
def longestPalindrome(self, s: str) -> str:
if s == '':
return ''
n = len(s)
dp = [[False] * n for _ in range(n)]
max_len = 1
start = 0
for i in range(n - 1, -1, -1):
for j in range(i, n):
if i == j:
dp[i][j] = True
elif j - i == 1:
if s[i] == s[j]:
dp[i][j] = True
if max_len < 2:
max_len = 2
start = i
else:
if s[i] == s[j] and dp[i + 1][j - 1] == True:
dp[i][j] = True
if max_len < j - i + 1:
max_len = j - i + 1
start = i
return s[start:start + max_len] | [
"leahxuliu@gmail.com"
] | leahxuliu@gmail.com |
bbde362c350b48413d134b1fce47ff145dce03bf | 63b3ab7d84f2a2c974da0bb4643b7a3bcaffa23a | /lab3/src/main.py | 5b28478a9c1e505ad65c28ef69604cc9ab2d2918 | [] | no_license | Lukasz1928/NLP | 1506a4062839231f5403a8becde9d7ba41a758b5 | 4f54fad491d75f3a25c69c4b39bfa777d34ceaa7 | refs/heads/master | 2020-04-28T06:24:07.733077 | 2019-06-03T13:40:03 | 2019-06-03T13:40:38 | 175,056,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,150 | py | from elasticsearch import Elasticsearch
from src.es_utils import create_index, load_data
from src.file_utils import get_all_filenames, read_polimorfologik, save_results
import matplotlib.pyplot as plt
import Levenshtein
def aggregate_terms(terms):
words = {}
for doc in terms:
term_vectors = doc['term_vectors']['text']
for k, v in term_vectors['terms'].items():
if k in words.keys():
words[k] += v['term_freq']
else:
words[k] = v['term_freq']
freqs = []
freqs_dict = {}
for w in words.keys():
if len(w) > 1 and w.isalpha():
freqs.append((w, words[w]))
freqs_dict[w] = words[w]
return sorted(freqs, key=lambda x: x[1], reverse=True), freqs_dict
def get_term_vectors(es, index_name):
files = get_all_filenames()
data = []
for f in files:
ret = es.termvectors(index_name, doc_type='doc', fields=['text'], id=f)
data.append(ret)
return data
def plot_frequencies(freqs):
xs = [x + 1 for x in range(len(freqs))]
ys = [freqs[i][1] for i in range(len(xs))]
plt.plot(xs, ys, '.')
plt.xlabel('word rank')
plt.ylabel('word appearances')
plt.xscale('log')
plt.yscale('log')
plt.savefig('results/frequencies.png')
def find_words_not_in_dictionary(words, dictionary):
not_in_dict = []
for w in words:
if w[0] not in dictionary:
not_in_dict.append(w)
return not_in_dict
def find_top_words_not_in_dictionary(words):
return sorted(words, key=lambda w: w[1], reverse=True)[:30]
def find_top_words_not_in_dictionary_with_3_occurences(words):
return [w for w in words if w[1] == 3][:30]
def find_most_probable_corrections(words, frequencies, dictionary):
corrections = []
for w in words:
min_dist = 99999
possible_corrections = []
for d in dictionary:
dist = Levenshtein.distance(w[0], d)
if dist < min_dist:
min_dist = dist
possible_corrections = [d]
elif dist == min_dist:
possible_corrections.append(d)
best_correction = max(possible_corrections, key=lambda w: frequencies[w] if w in frequencies.keys() else -1)
corrections.append((w[0], best_correction))
return corrections
def main():
index_name = 'idx'
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
pm = read_polimorfologik()
create_index(es, index_name)
load_data(es, index_name)
tv = get_term_vectors(es, index_name)
agg, agg_dict = aggregate_terms(tv)
plot_frequencies(agg)
words_not_in_dict = find_words_not_in_dictionary(agg, pm)
top_words_not_in_dict = find_top_words_not_in_dictionary(words_not_in_dict)
words_not_in_dict_with_3_occurrences = find_top_words_not_in_dictionary_with_3_occurences(words_not_in_dict)
corrections = find_most_probable_corrections(words_not_in_dict_with_3_occurrences, agg_dict, pm)
save_results(words_not_in_dict, top_words_not_in_dict, words_not_in_dict_with_3_occurrences, corrections)
if __name__ == "__main__":
main()
| [
"Lukasz19281@gmail.com"
] | Lukasz19281@gmail.com |
60edc9040518713f172c1e188db2a0857b8fbf2a | eb431b6e9b4ea2263bc174a7758663abc17c04e1 | /mysite/mysite/settings.py | a57f88cd8effd894e1f39f08035adf7e57696b3f | [] | no_license | dh1p0em/my-first-blog | 13d410fa3b540cfe1eef832caf756d3e4fa93ff3 | 144f0c19233cf749e0a94b35c18a2ea46debffd8 | refs/heads/master | 2020-04-19T18:09:18.746393 | 2019-01-31T09:26:23 | 2019-01-31T09:26:23 | 168,355,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,089 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z)ziwxx46=onym%i@d!6lm(j#k9hende^$_(=4^_m2!mp+&67('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"anilefecoban5@gmail.com"
] | anilefecoban5@gmail.com |
72d4ac2a4b4667b29a2e230fe8492dccbfc820b2 | d730acc4921b91ed67d52c31b6a6e12c78397941 | /blog/migrations/0001_initial.py | ac6e4f9594814e457ab4b7e03bcfb9fd27dd0453 | [] | no_license | An-dy1/my-first-blog | 8dbf1b0ee0d0f6175a6b9a9b98e2621e9c5e5bad | ac10cc832cc08e27561ed75fe008a83d25ccb201 | refs/heads/master | 2020-03-23T16:54:11.380012 | 2018-07-21T22:18:12 | 2018-07-21T22:18:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.0.6 on 2018-07-21 16:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"andrea_sue2@icloud.com"
] | andrea_sue2@icloud.com |
ea1b01621f36c02bc3965ee91415f198ac0d47b2 | ffc3ebe790a6cb4667c777ca4840358c58723463 | /improveo/settings.py | 4fd85f93142e885d98e43f320af7703ed928b2a5 | [] | no_license | odedahay/django-company-crm | ec480efed790f8a58068fd14b73dd459c88ae3d6 | 97ef246d78b1e25fe61dc28dc2633b94dd51acd7 | refs/heads/master | 2021-01-09T18:52:21.258002 | 2020-03-02T00:51:58 | 2020-03-02T00:51:58 | 242,417,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,572 | py | """
Django settings for improveo project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!y*z#h^zc=%#mz-!u@3#4q$a)vqgf6g31r877-+ac_j=s35v*9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#3rd party apps:
'crispy_forms',
#our apps:
'profiles',
'reports',
'areas',
'products',
'categories',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'improveo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'improveo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CRISPY_TEMPLATE_PACK = "bootstrap4"
LOGIN_URL = '/admin/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static_proj')]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_cdn', 'static_root')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_cdn', 'media_root') | [
"odedahay@gmail.com"
] | odedahay@gmail.com |
a3022d74803f2215703054399c88df7a6f9ff4c5 | 79a27d368f117885f072bff493d8fb712faa4479 | /orders/models.py | 8f0f26477215f812060e25cd9914b47f2face847 | [] | no_license | Sultanbek9899/onlineshop | 2e03b4abad39bfbf4ce4c7e9cc5ea2385352812c | 082546cc4ba3bb2ab0f63d293c2521c53fdde651 | refs/heads/master | 2022-12-20T18:44:45.803307 | 2020-09-25T22:00:02 | 2020-09-25T22:00:02 | 291,037,882 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from django.db import models
# модель заказа для сохранения данных заказа
from products.models import Product
class Order(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email = models.EmailField()
address = models.CharField(max_length=250)
postal_code = models.CharField(max_length=20)
city = models.CharField(max_length=100)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
paid = models.BooleanField(default=False)
class Meta:
ordering = ('-created',)
def __str__(self):
return 'Order № {}'.format(self.id)
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name='items', on_delete=models.CASCADE)
product = models.ForeignKey(Product, related_name='order_items', on_delete=models.CASCADE)
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField(default=1)
def __str__(self):
return '{}'.format(self.id)
def get_cost(self):
return self.price * self.quantity
| [
"sultanbek9899@gmail.com"
] | sultanbek9899@gmail.com |
a6fbf4d86b6573df70d0ccec4fbe0d3916817e21 | cdd228f719a7db73d710cc6ad638e47740bc93ad | /6_ValidateIP.py | a3235deb4e072fdbac7563b9a21b4842c78f3af4 | [] | no_license | DKanyana/PythonProblems | 3f94a75c00f134e93322eeecdd815ae9ef4ae49e | 602fe53af1268a6fd62913bc3c96ec096f3bce2c | refs/heads/master | 2020-03-27T23:04:01.557524 | 2018-09-04T04:18:51 | 2018-09-04T04:18:51 | 147,287,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | def validate_ip(ip):
len_ip =len(ip.strip())
octet =[]
if len_ip <=0 or len_ip>15:
return False
elif ip.strip().count('.') !=3:
return False
else:
octets = ip.strip().split('.')
for octet in octets:
if not octet.isdigit() or int(octet) <0 or int(octet) >255:
return False
return True
ip='192:45.12.00a00'
print(validate_ip(ip))
#length check
#delimiter check
#space check
#count of delimiter check
#octet check
#Time complexity - O(1)
#Space complexity - O(n) | [
"noreply@github.com"
] | noreply@github.com |
a5bbfe1cd9b7a9bfcb2c0917f391fc5edc38b94c | fc3bed6b30a1ec5040d01ef8b428f7a34da4f571 | /monitoring/readtemps.py | 33030e796917675f01c589cdee3966404dbd4b3e | [] | no_license | rickroty/igatemonitor | 156f73dbe69d34dc6a89c5cc4538ddad02d88733 | c801becf37010f0968c69fef70aa624618f83bb3 | refs/heads/master | 2021-08-22T02:44:22.414655 | 2017-11-29T04:13:19 | 2017-11-29T04:13:19 | 106,628,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,071 | py | import os
import sys
import time
import datetime
import string
import requests
import json
#os.system('modprobe w1-gpio')
#os.system('modprobe w1-therm')
temp_sensor_inside = ['28-03146d1044ff','28-0115527003ff']
temp_sensor_outside = ['28-0215155c4aff','28-0215525b5bff']
w1path = '/sys/bus/w1/devices/{0}/w1_slave'
# read row data
def temp_raw(temp_sensor):
f=open(temp_sensor,'r')
lines=f.readlines()
f.close()
return lines
# parse row data
def read_temp(temp_sensor):
lines=temp_raw(temp_sensor)
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines=temp_raw(temp_sensor)
# get temperature output
# we can find it at this row 74 01 4b 46 7f ff 0c 10 55 t=23250
# t=23250
temp_output = lines[1].find('t=')
if temp_output != -1:
temp_string = lines[1].strip()[temp_output+2:]
temp_c = round(float(temp_string)/1000.0,1) # Celsius
#temp_f = temp_c * 9.0 / 5.0 + 32.0 # Fahrenheit
return temp_c
else:
return -1
try:
inside_temp = read_temp(w1path.format(temp_sensor_inside[0]))
except:
inside_temp = read_temp(w1path.format(temp_sensor_inside[1]))
try:
outside_temp = read_temp(w1path.format(temp_sensor_outside[0]))
except:
outside_temp = read_temp(w1path.format(temp_sensor_outside[1]))
timestamp = int(time.time())
hostname = os.environ['HOSTIP']
uri='http://' + hostname + ':24224/temperature'
data={"APRS_station": "KG7TMT-10", "inside_temp": inside_temp, "outside_temp": outside_temp, "date": timestamp}
print "Writing stats to: " + uri
print json.dumps(data, indent=4, sort_keys=True)
r = requests.post(uri, json=data)
print "response status=" + str(r.status_code)
| [
"noreply@github.com"
] | noreply@github.com |
9ab5ef5a853915ae349e5ac9687669ca151241a2 | c6499f46ff5f9685f3d4e09a0e548a1d81fa4dec | /.upython/stubs/random.py | 281de1a6b0a61b95096d9b32cccac7a8ad6aaa90 | [
"MIT"
] | permissive | vtt-info/upython-samples | 7ffaf77817b2ce1c46d632850528060ac8df545c | c28e9667a22621dd4b62cc64927dfb051f8a84d2 | refs/heads/main | 2023-01-13T11:47:11.018041 | 2020-11-15T06:44:30 | 2020-11-15T06:44:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
Module: 'random' on pySBC 1.13.0 with FW1.0-171
"""
# MCU: (sysname='pySBC', nodename='pySBC', release='1.13.0 with FW1.0', version='v1.13-171-g7720a5aa6-dirty on 2020-10-20', machine='simpleRTK-SBC with STM32F745')
# Stubber: 1.3.4
def choice():
pass
def getrandbits():
pass
def randint():
pass
def random():
pass
def randrange():
pass
def seed():
pass
def uniform():
pass
| [
"tayfunkaran@gmtcontrol.com"
] | tayfunkaran@gmtcontrol.com |
9280ac79f4a7935fd8a74c47669555a1ff5abb41 | 058ab1a93f8213f93810c088d65e958a4ec50de7 | /Assignment_2_SourceCode/Unused_FIles/ModelTest.py | b90101f96e4ede829cd0d48817e38771d3cd4a0e | [] | no_license | urube/adv_programming_python_ara_assignment2_2019-2 | 222bef94ca22a0e033bdc437d6f4c3e81918ed93 | e58966c35311a00ec82880d12ea6a07dd6bb1c57 | refs/heads/master | 2020-08-14T12:19:58.097314 | 2019-10-15T04:35:16 | 2019-10-15T04:35:16 | 215,167,222 | 0 | 0 | null | 2019-10-15T00:49:49 | 2019-10-15T00:04:22 | HTML | UTF-8 | Python | false | false | 380 | py | import unittest
class ModelTest(unittest.TestCase):
def test(self):
from drawer_kieran import DrawerKieran
from parser_dang import ParserDang
to_draw = open('test.txt', "r+").read()
parser = ParserDang(DrawerKieran())
s = parser.parse(to_draw)
self.assertEqual(s, 'pen down')
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
74f848ad711384a3273598f2f4b68e0141b9e2e9 | 78afda4cc334ea78043fbcc73169bc93ec63392c | /Py_Alergies/settings.py | 9c4758792260dbb1bea51314592acfe896a2e4ee | [] | no_license | GisDJordje/Gis_Projekat | e2debbc3e0e5af9035133fc19df89fa0642238cf | 752a80e494797c6975d878cda608d1a5e5b671a6 | refs/heads/master | 2021-06-27T11:59:21.302590 | 2017-09-13T04:07:20 | 2017-09-13T04:07:20 | 103,349,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,614 | py | """
Django settings for Py_Alergies project.
Generated by 'django-admin startproject' using Django 1.9.12.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'df2x=_=^s((oxbjr%*r6r-s&t7btsdq#-ltcqhi05imlwkvqqh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
''' Login Logout '''
from django.core.urlresolvers import reverse_lazy
LOGIN_REDIRECT_URL = reverse_lazy('alergies_maper:dashboard')
LOGIN_URL = reverse_lazy('account:login')
LOGOUT_URL = reverse_lazy('account:logout')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Alergies_Blog',
'account',
'alergies_maper',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Py_Alergies.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Py_Alergies.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"djolesub@gmail.com"
] | djolesub@gmail.com |
d4df58f2c24bda82a67205c1ec1ae96f54522fc9 | 1817aca734cda258cbbfd9e13fbf040d76824621 | /aliyun-python-sdk-domain/aliyunsdkdomain/__init__.py | 03e753e73534f3d3d1530c2b89ec0fb5458f2aaa | [
"Apache-2.0"
] | permissive | sdk-team/aliyun-openapi-python-sdk | 4bd770718e70e31f19e1e322727c27ba74d9fb80 | 996cb07bfcf010fe3ab65daa73d26df2f3b6e97f | refs/heads/master | 2022-08-04T13:11:56.729215 | 2022-07-25T10:01:10 | 2022-07-25T10:01:10 | 183,356,741 | 0 | 0 | null | 2019-04-25T04:33:24 | 2019-04-25T04:33:24 | null | UTF-8 | Python | false | false | 22 | py | __version__ = "3.13.0" | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
498098e3792ce0f2613ffa5458596fc7c7d2fd88 | 5027bd2ff21141b6ffd576ec4009eed389acbacb | /audino/backend/routes/users.py | b555f2abc5caa9a063b58f9258f0d767da97cca1 | [
"MIT"
] | permissive | JacobGlennAyers/Audio_Labeling_System_AID | a19dda2b01491359e7fe10357ffa18e4cffff398 | 5ea3ed57d556b7d3a55df74b61909ea8f6c3127a | refs/heads/main | 2023-03-10T10:11:06.317063 | 2021-02-19T23:58:04 | 2021-02-19T23:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,003 | py | import sqlalchemy as sa
from flask import jsonify, flash, redirect, url_for, request
from flask_jwt_extended import jwt_required, get_jwt_identity
from werkzeug.urls import url_parse
from backend import app, db
from backend.models import User
from . import api
@api.route("/users", methods=["POST"])
@jwt_required
def create_user():
# TODO: Make jwt user id based to expire user session if permissions are changed
identity = get_jwt_identity()
request_user = User.query.filter_by(username=identity["username"]).first()
is_admin = True if request_user.role.role == "admin" else False
if is_admin == False:
return jsonify(message="Unauthorized access!"), 401
if not request.is_json:
return jsonify(message="Missing JSON in request"), 400
username = request.json.get("username", None)
password = request.json.get("password", None)
role_id = request.json.get("role", None)
if not username:
return (
jsonify(message="Please provide your username!", type="USERNAME_MISSING"),
400,
)
if not password:
return (
jsonify(message="Please provide your password!", type="PASSWORD_MISSING"),
400,
)
if not role_id:
return (jsonify(message="Please provide your role!", type="ROLE_MISSING"), 400)
if role_id not in ["1", "2"]:
return (
jsonify(message="Please assign correct role!", type="ROLE_INCORRECT"),
400,
)
try:
user = User(username=username, role_id=role_id)
user.set_password(password)
db.session.add(user)
db.session.commit()
db.session.refresh(user)
except Exception as e:
if type(e) == sa.exc.IntegrityError:
app.logger.info(f"User {username} already exists!")
return (jsonify(message="User already exists!", type="DUPLICATE_USER"), 409)
app.logger.error("Error creating user")
app.logger.error(e)
return jsonify(message="Error creating user!"), 500
return jsonify(user_id=user.id, message="User has been created!"), 201
@api.route("/users/no_auth", methods=["POST"])
def create_user_no_auth():
authNeeded = request.json.get("authNeeded", None)
dont_make_admin = False
if (not authNeeded):
dont_make_admin = True;
app.logger.info("this far")
print("hello?")
# TODO: Make jwt user id based to expire user session if permissions are changed
identity = get_jwt_identity()
app.logger.info(identity)
if (identity == None):
if (authNeeded):
return jsonify(message="Unauthorized access!"), 401
else:
request_user = User.query.filter_by(username=identity["username"]).first()
is_admin = True if request_user.role.role == "admin" else False
authNeeded = not(is_admin)
print("hello?")
app.logger.info(authNeeded)
app.logger.info("this far")
if authNeeded: #is_admin == False and authNeeded) or
return jsonify(message="Unauthorized access!"), 401
if not request.is_json:
return jsonify(message="Missing JSON in request"), 400
app.logger.info("this far")
username = request.json.get("username", None)
password = request.json.get("password", None)
role_id = "2"
app.logger.info(role_id)
if not username:
return (
jsonify(message="Please provide your username!", type="USERNAME_MISSING"),
400,
)
if not password:
return (
jsonify(message="Please provide your password!", type="PASSWORD_MISSING"),
400,
)
app.logger.info("this far")
if not role_id:
return (jsonify(message="Please provide your role!", type="ROLE_MISSING"), 400)
if role_id not in ["1", "2"]:
return (
jsonify(message="Please assign correct role!", type="ROLE_INCORRECT"),
400,
)
app.logger.info("this far")
try:
user = User(username=username, role_id=role_id)
user.set_password(password)
db.session.add(user)
db.session.commit()
db.session.refresh(user)
except Exception as e:
if type(e) == sa.exc.IntegrityError:
app.logger.info(f"User {username} already exists!")
return (jsonify(message="User already exists!", type="DUPLICATE_USER"), 409)
app.logger.error("Error creating user")
app.logger.error(e)
return jsonify(message="Error creating user!"), 500
return jsonify(user_id=user.id, message="User has been created!"), 201
@api.route("/users/<int:user_id>", methods=["GET"])
@jwt_required
def fetch_user(user_id):
identity = get_jwt_identity()
request_user = User.query.filter_by(username=identity["username"]).first()
is_admin = True if request_user.role.role == "admin" else False
if is_admin == False:
return jsonify(message="Unauthorized access!"), 401
try:
user = User.query.get(user_id)
except Exception as e:
app.logger.error(f"No user exists with user_id: {user_id}")
app.logger.error(e)
return (
jsonify(message="No user exists with given user_id", user_id=user_id),
404,
)
return (
jsonify(
user_id=user.id,
username=user.username,
role_id=user.role.id,
role=user.role.role,
),
200,
)
@api.route("/users/<int:user_id>", methods=["PATCH"])
@jwt_required
def update_user(user_id):
identity = get_jwt_identity()
request_user = User.query.filter_by(username=identity["username"]).first()
is_admin = True if request_user.role.role == "admin" else False
if is_admin == False:
return jsonify(message="Unauthorized access!"), 401
if not request.is_json:
return jsonify(message="Missing JSON in request"), 400
role_id = request.json.get("role", None)
if not role_id:
return (jsonify(message="Please provide your role!", type="ROLE_MISSING"), 400)
role_id = int(role_id)
# TODO: Make sure these ids exist in database ie. fetch them from database and check
if role_id not in [1, 2]:
return (
jsonify(message="Please assign correct role!", type="ROLE_INCORRECT"),
400,
)
try:
users = db.session.query(User).filter_by(role_id=1).all()
if len(users) == 1 and users[0].id == user_id and role_id == 2:
return jsonify(message="Atleast one admin should exist"), 400
user = User.query.get(user_id)
user.set_role(role_id)
db.session.commit()
except Exception as e:
app.logger.error("No user found")
app.logger.error(e)
return jsonify(message="No user found!"), 404
return (
jsonify(
username=user.username,
role=user.role.role,
role_id=user.role.id,
message="User has been updated!",
),
200,
)
@api.route("/users", methods=["GET"])
@jwt_required
def fetch_all_users():
identity = get_jwt_identity()
request_user = User.query.filter_by(username=identity["username"]).first()
is_admin = True if request_user.role.role == "admin" else False
if is_admin == False:
return jsonify(message="Unauthorized access"), 401
try:
users = User.query.all()
response = list(
[
{
"user_id": user.id,
"username": user.username,
"role": user.role.role.title(),
"created_on": user.created_at.strftime("%B %d, %Y"),
}
for user in users
]
)
except Exception as e:
message = "Error fetching all users"
app.logger.error(message)
app.logger.error(e)
return jsonify(message=message), 500
return jsonify(users=response), 200
| [
"sean.hyatt.perry@gmail.com"
] | sean.hyatt.perry@gmail.com |
0a96109443526a53b04bd4318fbadcab27e6ffa7 | 598c5f1f2d2ec805e2f5c9bad107367be56329dc | /SoshikiProject/Soshiki/urls.py | ce714b0b9b42fabb593f7bcc5ae21becc7968dc3 | [] | no_license | HE-Arc/Soshiki | f84c9f4e4fb7294ca00c3056f08e53d5a2a2b2e0 | d0ebca543bd647599090725806d86e4ecf665ecc | refs/heads/master | 2021-03-19T12:26:57.049066 | 2018-04-07T12:46:42 | 2018-04-07T12:46:42 | 122,045,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | """Soshiki URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('SoshikiApp.urls')),
path('admin/', admin.site.urls),
] | [
"jules007.cesar@hotmail.com"
] | jules007.cesar@hotmail.com |
e1994321314f87f666539e6bd8eeacaca4baae6a | 1348fc770c52446b0659cab143fb610641962812 | /Python/Bootcamp_seleksi/2b.3.py | 3c864f7db65aa2fdbf6e7fa060242cfe0afc3a76 | [] | no_license | bryanbernigen/ITBSem1 | 075d41b008741312b3f17ab1c73dd54d65da5be5 | ef10221599f625058cebb706233798010ba3c945 | refs/heads/main | 2023-04-17T19:38:04.215994 | 2021-05-05T04:48:03 | 2021-05-05T04:48:03 | 353,978,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | #2b.3 Lakukan OneHotEncoding pada kolom bentuk. Kolom bentuk yang
# belum diproses dapat dihapus (Hint: Library Sci-Kit Learn)
#template awal
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder,OneHotEncoder,LabelBinarizer
from sklearn import preprocessing
df = pd.read_csv("D:\\data_pandas\\data_sekolah.csv")
df['bentuk_encoded'] = LabelEncoder().fit_transform(df.bentuk)
X = OneHotEncoder().fit_transform(df.bentuk_encoded.values.reshape(-1,1)).toarray()
dfOneHot = pd.DataFrame(X, columns = ["Bentuk_"+str(int(i)+1) for i in range(X.shape[1])])
df = pd.concat([df, dfOneHot], axis=1)
print(df.loc[:,'Bentuk_1':'Bentuk_4'])
print('Bentuk_1: SD')
print('Bentuk_2: SMA')
print('Bentuk_3: SMK')
print('Bentuk_4: SMP') | [
"bryanbernigen@gmail.com"
] | bryanbernigen@gmail.com |
cb5f5ea3a6bd11f98432fda3ede902747c891678 | abf29eca06e33fd9a758c41b6f7990f6c2d15970 | /DirProject/DirProject/asgi.py | f0a9708181c3ba14a418b1175fc657cf816c026b | [] | no_license | amireppel/toga | 89c018536d4501f59a880e50b0218d56602d88c8 | 679687ad5d2f0c49d3b03d688bc002095c8fc60f | refs/heads/master | 2023-03-02T05:04:47.376339 | 2021-02-01T13:49:43 | 2021-02-01T13:49:43 | 334,486,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for DirProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DirProject.settings')
application = get_asgi_application()
| [
"amireppel@gmail.com"
] | amireppel@gmail.com |
c8e82f2c45977b37051e34e6e452577eb1467a83 | 19da2e0fdea22b5f6db5ac33d5d5e1d9882ab0a6 | /learning_templates/basic_app/urls.py | 001cbb4350dbe92b2c4661bf1acd8c0eb095aeb3 | [] | no_license | Ravi95KB/django-learning-projects | d49d53f1b2e8a892ddd1426e68a23c2061554845 | 19ddba76a0b56af051255a1ccfbca20f289c8208 | refs/heads/master | 2023-02-03T13:45:44.938715 | 2020-12-24T07:34:37 | 2020-12-24T07:34:37 | 324,081,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | from django.urls import path
from basic_app import views
#TEMPLATE TAGGING
app_name = 'basic_app' #The variable name needs to be 'app_name'
urlpatterns = [
path('relative/',views.relative,name='relative'),
path('other/',views.other,name='other'),
]
| [
"ravi25ju@gmail.com"
] | ravi25ju@gmail.com |
79926eb4ed7b1cb24b624dd9df42ddf2c75ac463 | 6188f8ef474da80c9e407e8040de877273f6ce20 | /examples/docs_snippets/docs_snippets/concepts/assets/asset_input_managers_numpy.py | 41c6a5aa55fde72067e0e687ff7d0816f51b530f | [
"Apache-2.0"
] | permissive | iKintosh/dagster | 99f2a1211de1f3b52f8bcf895dafaf832b999de2 | 932a5ba35263deb7d223750f211c2ddfa71e6f48 | refs/heads/master | 2023-01-24T15:58:28.497042 | 2023-01-20T21:51:35 | 2023-01-20T21:51:35 | 276,410,978 | 1 | 0 | Apache-2.0 | 2020-07-01T15:19:47 | 2020-07-01T15:13:56 | null | UTF-8 | Python | false | false | 1,511 | py | import os
import pandas as pd
from dagster import AssetIn, Definitions, IOManager, asset, io_manager
from .asset_input_managers import (
load_numpy_array,
load_pandas_dataframe,
store_pandas_dataframe,
)
# start_numpy_example
class PandasAssetIOManager(IOManager):
def handle_output(self, context, obj):
file_path = self._get_path(context)
store_pandas_dataframe(name=file_path, table=obj)
def _get_path(self, context):
return os.path.join(
"storage",
f"{context.asset_key.path[-1]}.csv",
)
def load_input(self, context):
file_path = self._get_path(context)
return load_pandas_dataframe(name=file_path)
@io_manager
def pandas_asset_io_manager():
return PandasAssetIOManager()
class NumpyAssetIOManager(PandasAssetIOManager):
def load_input(self, context):
file_path = self._get_path(context)
return load_numpy_array(name=file_path)
@io_manager
def numpy_asset_io_manager():
return NumpyAssetIOManager()
@asset(io_manager_key="pandas_manager")
def upstream_asset():
return pd.DataFrame([1, 2, 3])
@asset(
ins={"upstream": AssetIn(key_prefix="public", input_manager_key="numpy_manager")}
)
def downstream_asset(upstream):
return upstream.shape
defs = Definitions(
assets=[upstream_asset, downstream_asset],
resources={
"pandas_manager": pandas_asset_io_manager,
"numpy_manager": numpy_asset_io_manager,
},
)
# end_numpy_example
| [
"noreply@github.com"
] | noreply@github.com |
0d0ea308d017f4b52fbf4a8bf89f384d26247131 | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Cura/tests/TestBuildVolume.py | 6ccb3d0fb7e1ed0702e8179aad7653553b069d72 | [
"LGPL-3.0-only",
"GPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 18,454 | py | from unittest.mock import MagicMock, patch
from UM.Math.AxisAlignedBox import AxisAlignedBox
import pytest
from UM.Math.Polygon import Polygon
from UM.Math.Vector import Vector
from cura.BuildVolume import BuildVolume, PRIME_CLEARANCE
import numpy
@pytest.fixture
def build_volume() -> BuildVolume:
mocked_application = MagicMock()
mocked_platform = MagicMock(name="platform")
with patch("cura.BuildVolume.Platform", mocked_platform):
return BuildVolume(mocked_application)
def test_buildVolumeSetSizes(build_volume):
build_volume.setWidth(10)
assert build_volume.getDiagonalSize() == 10
build_volume.setWidth(0)
build_volume.setHeight(100)
assert build_volume.getDiagonalSize() == 100
build_volume.setHeight(0)
build_volume.setDepth(200)
assert build_volume.getDiagonalSize() == 200
def test_buildMesh(build_volume):
mesh = build_volume._buildMesh(0, 100, 0, 100, 0, 100, 1)
result_vertices = numpy.array([[0., 0., 0.], [100., 0., 0.], [0., 0., 0.], [0., 100., 0.], [0., 100., 0.], [100., 100., 0.], [100., 0., 0.], [100., 100., 0.], [0., 0., 100.], [100., 0., 100.], [0., 0., 100.], [0., 100., 100.], [0., 100., 100.], [100., 100., 100.], [100., 0., 100.], [100., 100., 100.], [0., 0., 0.], [0., 0., 100.], [100., 0., 0.], [100., 0., 100.], [0., 100., 0.], [0., 100., 100.], [100., 100., 0.], [100., 100., 100.]], dtype=numpy.float32)
assert numpy.array_equal(result_vertices, mesh.getVertices())
def test_buildGridMesh(build_volume):
mesh = build_volume._buildGridMesh(0, 100, 0, 100, 0, 100, 1)
result_vertices = numpy.array([[0., -1., 0.], [100., -1., 100.], [100., -1., 0.], [0., -1., 0.], [0., -1., 100.], [100., -1., 100.]])
assert numpy.array_equal(result_vertices, mesh.getVertices())
def test_clamp(build_volume):
assert build_volume._clamp(0, 0, 200) == 0
assert build_volume._clamp(0, -200, 200) == 0
assert build_volume._clamp(300, -200, 200) == 200
class TestCalculateBedAdhesionSize:
setting_property_dict = {"adhesion_type": {"value": "brim"},
"skirt_brim_line_width": {"value": 0},
"initial_layer_line_width_factor": {"value": 0},
"brim_line_count": {"value": 0},
"machine_width": {"value": 200},
"machine_depth": {"value": 200},
"skirt_line_count": {"value": 0},
"skirt_gap": {"value": 0},
"raft_margin": {"value": 0}
}
def getPropertySideEffect(*args, **kwargs):
properties = TestCalculateBedAdhesionSize.setting_property_dict.get(args[1])
if properties:
return properties.get(args[2])
def createAndSetGlobalStack(self, build_volume):
mocked_stack = MagicMock()
mocked_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
build_volume._global_container_stack = mocked_stack
def test_noGlobalStack(self, build_volume: BuildVolume):
assert build_volume._calculateBedAdhesionSize([]) is None
@pytest.mark.parametrize("setting_dict, result", [
({}, 0),
({"adhesion_type": {"value": "skirt"}}, 0),
({"adhesion_type": {"value": "raft"}}, 0),
({"adhesion_type": {"value": "none"}}, 0),
({"adhesion_type": {"value": "skirt"}, "skirt_line_count": {"value": 2}, "initial_layer_line_width_factor": {"value": 1}, "skirt_brim_line_width": {"value": 2}}, 0.02),
# Even though it's marked as skirt, it should behave as a brim as the prime tower has a brim (skirt line count is still at 0!)
({"adhesion_type": {"value": "skirt"}, "prime_tower_brim_enable": {"value": True}, "skirt_brim_line_width": {"value": 2}, "initial_layer_line_width_factor": {"value": 3}}, -0.06),
({"brim_line_count": {"value": 1}, "skirt_brim_line_width": {"value": 2}, "initial_layer_line_width_factor": {"value": 3}}, 0),
({"brim_line_count": {"value": 2}, "skirt_brim_line_width": {"value": 2}, "initial_layer_line_width_factor": {"value": 3}}, 0.06),
({"brim_line_count": {"value": 9000000}, "skirt_brim_line_width": {"value": 90000}, "initial_layer_line_width_factor": {"value": 9000}}, 100), # Clamped at half the max size of buildplate
])
def test_singleExtruder(self, build_volume: BuildVolume, setting_dict, result):
self.createAndSetGlobalStack(build_volume)
patched_dictionary = self.setting_property_dict.copy()
patched_dictionary.update(setting_dict)
with patch.dict(self.setting_property_dict, patched_dictionary):
assert build_volume._calculateBedAdhesionSize([]) == result
def test_unknownBedAdhesion(self, build_volume: BuildVolume):
self.createAndSetGlobalStack(build_volume)
patched_dictionary = self.setting_property_dict.copy()
patched_dictionary.update({"adhesion_type": {"value": "OMGZOMGBBQ"}})
with patch.dict(self.setting_property_dict, patched_dictionary):
with pytest.raises(Exception):
build_volume._calculateBedAdhesionSize([])
class TestComputeDisallowedAreasStatic:
setting_property_dict = {"machine_disallowed_areas": {"value": [[[-200, 112.5], [ -82, 112.5], [ -84, 102.5], [-115, 102.5]]]},
"machine_width": {"value": 200},
"machine_depth": {"value": 200},
}
def getPropertySideEffect(*args, **kwargs):
properties = TestComputeDisallowedAreasStatic.setting_property_dict.get(args[1])
if properties:
return properties.get(args[2])
def test_computeDisallowedAreasStaticNoExtruder(self, build_volume: BuildVolume):
mocked_stack = MagicMock()
mocked_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
build_volume._global_container_stack = mocked_stack
assert build_volume._computeDisallowedAreasStatic(0, []) == {}
def test_computeDisalowedAreasStaticSingleExtruder(self, build_volume: BuildVolume):
mocked_stack = MagicMock()
mocked_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
mocked_extruder = MagicMock()
mocked_extruder.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
mocked_extruder.getId = MagicMock(return_value = "zomg")
build_volume._global_container_stack = mocked_stack
with patch("cura.Settings.ExtruderManager.ExtruderManager.getInstance"):
result = build_volume._computeDisallowedAreasStatic(0, [mocked_extruder])
assert result == {"zomg": [Polygon([[-84.0, 102.5], [-115.0, 102.5], [-200.0, 112.5], [-82.0, 112.5]])]}
def test_computeDisalowedAreasMutliExtruder(self, build_volume):
mocked_stack = MagicMock()
mocked_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
mocked_extruder = MagicMock()
mocked_extruder.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
mocked_extruder.getId = MagicMock(return_value="zomg")
extruder_manager = MagicMock()
extruder_manager.getActiveExtruderStacks = MagicMock(return_value = [mocked_stack])
build_volume._global_container_stack = mocked_stack
with patch("cura.Settings.ExtruderManager.ExtruderManager.getInstance", MagicMock(return_value = extruder_manager)):
result = build_volume._computeDisallowedAreasStatic(0, [mocked_extruder])
assert result == {"zomg": [Polygon([[-84.0, 102.5], [-115.0, 102.5], [-200.0, 112.5], [-82.0, 112.5]])]}
class TestUpdateRaftThickness:
setting_property_dict = {"raft_base_thickness": {"value": 1},
"raft_interface_thickness": {"value": 1},
"raft_surface_layers": {"value": 1},
"raft_surface_thickness": {"value": 1},
"raft_airgap": {"value": 1},
"layer_0_z_overlap": {"value": 1},
"adhesion_type": {"value": "raft"}}
def getPropertySideEffect(*args, **kwargs):
properties = TestUpdateRaftThickness.setting_property_dict.get(args[1])
if properties:
return properties.get(args[2])
def createMockedStack(self):
mocked_global_stack = MagicMock(name="mocked_global_stack")
mocked_global_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
extruder_stack = MagicMock()
mocked_global_stack.extruders = {"0": extruder_stack}
return mocked_global_stack
def test_simple(self, build_volume: BuildVolume):
build_volume.raftThicknessChanged = MagicMock()
mocked_global_stack = self.createMockedStack()
build_volume._global_container_stack = mocked_global_stack
assert build_volume.getRaftThickness() == 0
build_volume._updateRaftThickness()
assert build_volume.getRaftThickness() == 3
assert build_volume.raftThicknessChanged.emit.call_count == 1
def test_adhesionIsNotRaft(self, build_volume: BuildVolume):
patched_dictionary = self.setting_property_dict.copy()
patched_dictionary["adhesion_type"] = {"value": "not_raft"}
mocked_global_stack = self.createMockedStack()
build_volume._global_container_stack = mocked_global_stack
assert build_volume.getRaftThickness() == 0
with patch.dict(self.setting_property_dict, patched_dictionary):
build_volume._updateRaftThickness()
assert build_volume.getRaftThickness() == 0
def test_noGlobalStack(self, build_volume: BuildVolume):
build_volume.raftThicknessChanged = MagicMock()
assert build_volume.getRaftThickness() == 0
build_volume._updateRaftThickness()
assert build_volume.getRaftThickness() == 0
assert build_volume.raftThicknessChanged.emit.call_count == 0
class TestComputeDisallowedAreasPrimeBlob:
setting_property_dict = {"machine_width": {"value": 50},
"machine_depth": {"value": 100},
"prime_blob_enable": {"value": True},
"extruder_prime_pos_x": {"value": 25},
"extruder_prime_pos_y": {"value": 50},
"machine_center_is_zero": {"value": True},
}
def getPropertySideEffect(*args, **kwargs):
properties = TestComputeDisallowedAreasPrimeBlob.setting_property_dict.get(args[1])
if properties:
return properties.get(args[2])
def test_noGlobalContainer(self, build_volume: BuildVolume):
# No global container and no extruders, so we expect no blob areas
assert build_volume._computeDisallowedAreasPrimeBlob(12, []) == {}
def test_noExtruders(self, build_volume: BuildVolume):
mocked_stack = MagicMock()
mocked_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
build_volume._global_container_stack = mocked_stack
# No extruders, so still expect that we get no area
assert build_volume._computeDisallowedAreasPrimeBlob(12, []) == {}
def test_singleExtruder(self, build_volume: BuildVolume):
mocked_global_stack = MagicMock(name = "mocked_global_stack")
mocked_global_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
mocked_extruder_stack = MagicMock(name = "mocked_extruder_stack")
mocked_extruder_stack.getId = MagicMock(return_value = "0")
mocked_extruder_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
build_volume._global_container_stack = mocked_global_stack
# Create a polygon that should be the result
resulting_polygon = Polygon.approximatedCircle(PRIME_CLEARANCE)
# Since we want a blob of size 12;
resulting_polygon = resulting_polygon.getMinkowskiHull(Polygon.approximatedCircle(12))
# In the The translation result is 25, -50 (due to the settings used)
resulting_polygon = resulting_polygon.translate(25, -50)
assert build_volume._computeDisallowedAreasPrimeBlob(12, [mocked_extruder_stack]) == {"0": [resulting_polygon]}
class TestCalculateExtraZClearance:
setting_property_dict = {"retraction_hop": {"value": 12},
"retraction_hop_enabled": {"value": True}}
def getPropertySideEffect(*args, **kwargs):
properties = TestCalculateExtraZClearance.setting_property_dict.get(args[1])
if properties:
return properties.get(args[2])
def test_noContainerStack(self, build_volume: BuildVolume):
assert build_volume._calculateExtraZClearance([]) is 0
def test_withRetractionHop(self, build_volume: BuildVolume):
mocked_global_stack = MagicMock(name="mocked_global_stack")
mocked_extruder = MagicMock()
mocked_extruder.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
build_volume._global_container_stack = mocked_global_stack
# It should be 12 because we have the hop enabled and the hop distance is set to 12
assert build_volume._calculateExtraZClearance([mocked_extruder]) == 12
def test_withoutRetractionHop(self, build_volume: BuildVolume):
mocked_global_stack = MagicMock(name="mocked_global_stack")
mocked_extruder = MagicMock()
mocked_extruder.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
build_volume._global_container_stack = mocked_global_stack
patched_dictionary = self.setting_property_dict.copy()
patched_dictionary["retraction_hop_enabled"] = {"value": False}
with patch.dict(self.setting_property_dict, patched_dictionary):
# It should be 12 because we have the hop enabled and the hop distance is set to 12
assert build_volume._calculateExtraZClearance([mocked_extruder]) == 0
class TestRebuild:
def test_zeroWidthHeightDepth(self, build_volume: BuildVolume):
build_volume.rebuild()
assert build_volume.getMeshData() is None
def test_engineIsNotRead(self, build_volume: BuildVolume):
build_volume.setWidth(10)
build_volume.setHeight(10)
build_volume.setDepth(10)
build_volume.rebuild()
assert build_volume.getMeshData() is None
def test_noGlobalStack(self, build_volume: BuildVolume):
build_volume.setWidth(10)
build_volume.setHeight(10)
build_volume.setDepth(10)
# Fake the the "engine is created callback"
build_volume._onEngineCreated()
build_volume.rebuild()
assert build_volume.getMeshData() is None
def test_updateBoundingBox(self, build_volume: BuildVolume):
build_volume.setWidth(10)
build_volume.setHeight(10)
build_volume.setDepth(10)
mocked_global_stack = MagicMock()
build_volume._global_container_stack = mocked_global_stack
build_volume.getEdgeDisallowedSize = MagicMock(return_value = 0)
build_volume.updateNodeBoundaryCheck = MagicMock()
# Fake the the "engine is created callback"
build_volume._onEngineCreated()
build_volume.rebuild()
bounding_box = build_volume.getBoundingBox()
assert bounding_box.minimum == Vector(-5.0, -1.0, -5.0)
assert bounding_box.maximum == Vector(5.0, 10.0, 5.0)
class TestUpdateMachineSizeProperties:
setting_property_dict = {"machine_width": {"value": 50},
"machine_depth": {"value": 100},
"machine_height": {"value": 200},
"machine_shape": {"value": "DERP!"}}
def getPropertySideEffect(*args, **kwargs):
properties = TestUpdateMachineSizeProperties.setting_property_dict.get(args[1])
if properties:
return properties.get(args[2])
def test_noGlobalStack(self, build_volume: BuildVolume):
build_volume._updateMachineSizeProperties()
assert build_volume._width == 0
assert build_volume._height == 0
assert build_volume._depth == 0
assert build_volume._shape == ""
def test_happy(self, build_volume: BuildVolume):
mocked_global_stack = MagicMock(name="mocked_global_stack")
mocked_global_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
build_volume._global_container_stack = mocked_global_stack
build_volume._updateMachineSizeProperties()
assert build_volume._width == 50
assert build_volume._height == 200
assert build_volume._depth == 100
assert build_volume._shape == "DERP!"
class TestGetEdgeDisallowedSize:
setting_property_dict = {}
bed_adhesion_size = 1
@pytest.fixture()
def build_volume(self, build_volume):
build_volume._calculateBedAdhesionSize = MagicMock(return_value = 1)
return build_volume
def getPropertySideEffect(*args, **kwargs):
properties = TestGetEdgeDisallowedSize.setting_property_dict.get(args[1])
if properties:
return properties.get(args[2])
def createMockedStack(self):
mocked_global_stack = MagicMock(name="mocked_global_stack")
mocked_global_stack.getProperty = MagicMock(side_effect=self.getPropertySideEffect)
return mocked_global_stack
def test_noGlobalContainer(self, build_volume: BuildVolume):
assert build_volume.getEdgeDisallowedSize() == 0
def test_unknownAdhesion(self, build_volume: BuildVolume):
build_volume._global_container_stack = self.createMockedStack()
with patch("cura.Settings.ExtruderManager.ExtruderManager.getInstance"):
#with pytest.raises(Exception):
# Since we don't have any adhesion set, this should break.
build_volume.getEdgeDisallowedSize()
def test_oneAtATime(self, build_volume: BuildVolume):
build_volume._global_container_stack = self.createMockedStack()
with patch("cura.Settings.ExtruderManager.ExtruderManager.getInstance"):
with patch.dict(self.setting_property_dict, {"print_sequence": {"value": "one_at_a_time"}}):
assert build_volume.getEdgeDisallowedSize() == 0.1
| [
"t106360212@ntut.org.tw"
] | t106360212@ntut.org.tw |
fc5145f0837ea6447b109a85447c1d90938c40d6 | 8f420e1d63d69b21a1d964ae4771fbfe54b2b997 | /Python Session 6.1.1 conditional statements and loops.py | 616d674dc98133e4218c3543328435268f63cd80 | [] | no_license | Jyoti-27/Python-6 | 03eb49037c627442fe3a633f92c1ae4b4e14d801 | dc4a8d2cd65a9e4614ac6fe5c914b8cec6498f2c | refs/heads/main | 2022-12-24T16:42:23.780614 | 2020-10-01T20:01:30 | 2020-10-01T20:01:30 | 300,408,932 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,152 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# create a variable salary and assign a value to it
salary = 15000
if salary >= 35000:
print('you are eligible for loan')
else:
print('you are not eligible')
# In[1]:
# lets assume that there is a criteria to unlocl the certifications
- rules
- they should have 70% of attendance(crtieria 1)
- score >300(criteria 2)
- if they meet both the criteria then they get merit certificate
- otherwise they get participation certificate
# In[2]:
# create if else block just in terms of the attendance
attendance = int(input('enter the attendance:'))
score = int(input('enter the score:'))
if attendance >= 70 and score > 300:
print(' congrats!! you fullfill both the criteria to unlock the merit certification')
else:
print('your attendance and score is low for merit ceritificate, please collect your participation certificate')
# In[3]:
# if blocks only
attendance = 70
if attendance < 75:
print("attendance does not fullfill one of the criteria for merit certificate")
print("you are eligible for participation certificate")
# In[7]:
attendance = 80
if attendance < 75:
print("Great!! you fullfilled one of the criteria for merit certificate")
print("you are eligible for participation certificate")
# In[8]:
# check the number is even or odd
num = int(input('enter the number:'))
if num % 2 == 0:
print( num ,'is an even number')
else:
print( num , 'is an odd number')
# # else if ladder
#
# ## if test expression:
#
# Body of if
#
# ## elif test expression:
#
# Body of elif
#
# ## else:
#
# Body of else
#
# # using membership test
# - in and not in
# In[9]:
list1 = ['a','b','c']
list2 = ['abc','def','d','hello']
# In[10]:
if 'c' in list1:
print('great')
else:
print('not great')
# In[11]:
if 'c' not in list1:
print('great')
else:
print('inside else')
# ##### More on blocks
# * blocks start below a line ending in :
# * all lines in a block are indented (by 4 spaces)
# * Nested blocks --> nested indenting
# * End of block --> unindent
#
#
# In[14]:
marks = int(input('enter the marks:'))
if marks < 40:
status = 'fail'
lettergrade = 'F'
else:
status = 'pass'
if marks < 50:
lettergrade = 'D'
elif marks < 60:
lettergrade = 'C'
elif marks < 75:
lettergrade = 'B'
elif marks < 80:
lettergrade = 'A'
elif marks < 90:
lettergrade = 'A+'
print(status,lettergrade)
# # Nested if
# In[15]:
number = round(float(input('enter the number:')),2)
if number >= 0:
if number == 0:
print('number', number ,'is zero')
else:
print('number', number ,'is positive number')
else:
print('number', number , 'is negative number')
# * Write a code to select a batsman. Below given is the criteria
# * The player should belong to Hyderabad,Bengaluru or Mumbai
# * Player type should be a batsman
# * If he is a batsman he should have scored atleast 5 centuries
#
# In[16]:
location = ['hyderabad','mumbai', 'begaluru']
player_location = input('enter the location:')
player_type = input('enter the player type :')
no_of_hundreds = int(input('enter the number of hundreds: '))
if player_location in location:
if player_type.lower() == 'batsmen':
if no_of_hundreds >= 5:
print("player is in the squad")
else:
print('not selected')
else:
print('we need the batsmen')
else:
print('location not in the list')
# # write a program to find the grades of a person
# - If the scores is greater than equal to 90 , print grade A
# - If the score is between 80 to 90 then B
# - 80 to 89
#
# In[17]:
marks = int(float(input('enter the marks obtained: ')))
if marks >= 90:
print('Grade A')
elif marks > 80 and marks <= 89:
print('Grade B')
elif marks > 70 and marks <= 79:
print('Grade C')
elif marks > 60 and marks <= 69:
print('Grade D')
else:
print('Grade F')
# In[ ]:
# In[ ]:
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
1e5810523ea93878d26b6ef00317399d8e25aa25 | 1005a4290bca16dcf4c6b3415662e134044305bd | /python/Sources/gensource_Z2Jets_muhad_cfi.py | 1d71fe0e0d4998bc6ab78e9176740d7f2eb3bcf3 | [] | no_license | cms-analysis/TauAnalysis-GenSimTools | 2652bb713107bb1d459882581237662d229d3906 | b787b784ee3598c4428c4883c04cdc525eb54eb6 | refs/heads/master | 2020-12-24T15:58:14.392883 | 2013-06-28T20:10:42 | 2013-06-28T20:10:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | import FWCore.ParameterSet.Config as cms
# The Alpgen Source. It reads unweighted alpgen files
source = cms.Source("AlpgenSource",
# use an input file name without extension unw
fileNames = cms.untracked.vstring(
'file:/home/cbernet/ALPGEN/v213/zjetwork/z2j'
)
)
# The Alpgen Producer.
from GeneratorInterface.AlpgenInterface.generator_cfi import *
generator.comEnergy = 14000.0
generator.pythiaHepMCVerbosity = False
generator.maxEventsToPrint = 0
# Set the jet matching parameters as you see fit.
generator.jetMatching.applyMatching = True
generator.jetMatching.exclusive = True
generator.jetMatching.etMin = 20.0
generator.jetMatching.drMin = 0.5
# for every process including tau should be use TAUOLA
from GeneratorInterface.ExternalDecays.TauolaSettings_cff import *
generator.ExternalDecays = cms.PSet(
Tauola = cms.untracked.PSet(
TauolaPolar,
InputCards = cms.PSet(
pjak1 = cms.int32(0),
pjak2 = cms.int32(0),
#mdtau = cms.int32(116) #mdtau = 0 all decays
mdtau = cms.int32(116) #mdtau = 116 - ONE mu+-, other taus -> all channels
)
),
parameterSets = cms.vstring('Tauola')
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"sha1-5c72da6f595cce9b6b48aff6d56f01e9beb4aad1@cern.ch"
] | sha1-5c72da6f595cce9b6b48aff6d56f01e9beb4aad1@cern.ch |
96f097ae2cef0885482cea0cfb0b9c9056bdbf45 | ab4f2ec4b595aec2a0133740858f1e15bc833b19 | /3_Using_Python_to_access_web_data/week5_web_services_XML/week5_assignment_parse_XML.py | 4d25a4650fce83813995908eb8743b95d21a19a8 | [] | no_license | dexterka/coursera_files | 4bcfc838affe499be6ff3fa8d1ae3f9b9cb7f4df | c550db6161c833c6c47f0214530129ffe50ae8d8 | refs/heads/master | 2021-07-14T10:59:58.528414 | 2021-06-23T08:59:56 | 2021-06-23T08:59:56 | 143,457,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # Env setup
import urllib.request
import xml.etree.ElementTree as ET
# Temp vars
total = 0
# User's input
url_name = input('Enter location: ')
html_request = urllib.request.Request(url_name, headers={'User-Agent': 'Mozilla/5.0'})
html_read = urllib.request.urlopen(html_request).read()
# Parse XML data
xml_tree = ET.fromstring(html_read)
list_of_tags = xml_tree.findall('comments/comment')
for tag in list_of_tags:
extract = tag.find('count').text
number = int(extract)
total = total + number
# Print results
print('Retrieving:', url_name)
print('Retrieved %d characters' % len(html_read))
print('Count of comments:', len(list_of_tags))
print('Sum of numbers:', total) | [
"martina.chlebcova@gmail.com"
] | martina.chlebcova@gmail.com |
a4f40a48fe3950b604589a17e731eb39f8c0f317 | 14e434036166c14364f7c6eb5477a225037c4a5b | /src/services/csv_reader.py | 76ae89736a66abbee1267a50aaed0774fbeda0f1 | [] | no_license | kristaloverbeer/ssp | c55bad4982576c7464515a9bb87f34080e6c7d73 | 3f475ae4448f5573540323e185a4ad1b508ed868 | refs/heads/master | 2021-06-13T06:45:04.790045 | 2019-05-13T18:13:41 | 2019-07-29T06:58:37 | 186,472,786 | 0 | 0 | null | 2021-05-06T19:34:19 | 2019-05-13T18:16:24 | Python | UTF-8 | Python | false | false | 6,699 | py | """
Parse CSV to extract addresses
How to use the `csv_reader` script:
1) Save the xls data file as a csv in UTF8 encoding
2) Call the script by specifying the type of source (hotel or people)
```
$ python src/services/csv_reader.py \
-t "hotel" \
-s "/Users/fpaupier/projects/samu_social/data/hotels-generalites.csv"
```
3) A json file with the list of addresses is generated.
"""
import csv
import argparse
import json
class CsvReader(object):
def parse(self, source, csv_type):
results = []
with open(source, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=";")
data = {i: j for i, j in enumerate(reader)}
for i, line in data.items():
if csv_type == "people" and i > 0:
formatted_address = '{} {} {} {}'.format(line[2], line[3], line[4], line[5])
people = {
'name': line[0],
'surname': line[1],
'address': ' '.join(formatted_address.split()),
'postcode': line[6],
'license': line[7],
'availability': line[8],
'time_of_day': line[9],
'area1': line[10],
'area2': line[11],
'area3': line[12],
'area4': line[13],
}
results.append(people)
if csv_type == "hotel" and i > 0:
if line[2] == "0": # Only consider non removed hotel
formatted_address = "{} {}".format(line[7], line[9])
hotel = {
'hotel_status': line[2],
'nom': line[6],
'address': ' '.join(formatted_address.split()),
'postcode': line[8],
'capacity': line[41],
'bedroom_number': line[43],
'features': sum([int(line[i]) for i, line in enumerate(reader) if i in range(62, 150)]),
### Should not
}
results.append(hotel)
return results
def parse_enriched(self, source, csv_type):
results = []
with open(source, 'r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter=';')
data = {i: j for i, j in enumerate(reader)}
for i, line in data.items():
if csv_type == 'people' and i > 0:
point = {'latitude': float(line[6]), 'longitude': float(line[8])} if (line[6] and line[8]) else None
people = {
'address': line[0],
'area1': line[1],
'area2': line[2],
'area3': line[3],
'area4': line[4],
'availability': line[5],
'license': line[7],
'name': line[9],
'point': point,
'postcode': line[11],
'surname': line[12],
'time_of_day': line[13],
}
results.append(people)
if csv_type == 'hotel' and i > 0:
point = {'latitude': float(line[5]), 'longitude': float(line[6])} if (line[5] and line[6]) else None
hotel = {
'address': line[0],
'bedroom_number': line[1],
'capacity': line[2],
'features': line[3],
'hotel_status': line[4],
'nom': line[7],
'point': point,
'postcode': line[9],
}
results.append(hotel)
return results
def parse_csv(source, csv_type, write=False):
"""
Args:
source(string): absolute path to the file to process
csv_type(string): type of csv file, hotel data or volunteer data
Return:
json_path: path to the file containing the adress as json format.
"""
f_name = source.split(".")[0]
json_path = "{0}-{1}.json".format(f_name, csv_type)
results = []
with open(source, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=";")
data = {i: j for i, j in enumerate(reader)}
for i, line in data.items():
if csv_type == "people" and i > 0:
formatted_address = '{} {} {} {}'.format(line[2], line[3], line[4], line[5])
people = {
'name': line[0],
'surname': line[1],
'address': ' '.join(formatted_address.split()),
'postcode': line[6],
'license': line[7],
'availability': line[8],
'time_of_day': line[9],
'area1': line[10],
'area2': line[11],
'area3': line[12],
'area4': line[13],
}
results.append(people)
if csv_type == "hotel" and i > 0:
if line[2] == "0": # Only consider non removed hotel
formatted_address = "{} {}".format(line[7], line[9])
hotel = {
'hotel_status': line[2],
'nom': line[6],
'address': ' '.join(formatted_address.split()),
'postcode': line[8],
'capacity': line[41],
'bedroom_number': line[43],
'features': sum([int(line[i]) for i, line in enumerate(reader) if i in range(62, 150)]),
}
results.append(hotel)
if write:
with open(json_path, "w", encoding="utf8") as outfile:
json.dump(results, outfile, ensure_ascii=False)
return json_path
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A scraper to obtain addresses from a csv"
)
parser.add_argument(
"-t",
"--csv_type",
help="type of csv file to parse: `hotel` or `people`",
type=str,
default="hotel",
)
parser.add_argument("-s", "--source", help="path to the source csv file", type=str)
args = parser.parse_args()
data = parse_csv(args.source, args.csv_type, write=False)
print(data)
| [
"brisemeric@gmail.com"
] | brisemeric@gmail.com |
650652c44475815a610083d2dc1789b0fcc454a7 | 8e5ed733e258f585bec13044cc3381e32a079a47 | /manage.py | 5f0046d2c88b2f70c35f9d0dd9fb53bbcf07644a | [] | no_license | DNA5769/MyAnimeChecklist | dc797eb206e2bf655b668707cd16070a3c862a6f | b523230c84e2f3bde5c9091c91c161aa3e58e22e | refs/heads/main | 2023-04-15T21:32:30.421023 | 2021-05-03T20:09:29 | 2021-05-03T20:09:29 | 325,802,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MAC.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"dennisthomas2002@gmail.com"
] | dennisthomas2002@gmail.com |
f69d5a2c855b376f97d582465cd8c179d5452fa9 | ad570312a736a84e96c5178bc1af91c6c0b898fb | /pyth/linkedListCoppy.py | 4c1a565b380f1db25973401fb73c746e6d86da3f | [] | no_license | pritamSarkar123/Compitative20_21 | ad813d189b7388ea2179bb96f64eaa88ba75db32 | d5474b02487dc759c47e3da1047154533dc7b641 | refs/heads/master | 2023-01-14T12:04:51.756085 | 2020-11-26T09:31:02 | 2020-11-26T09:31:02 | 296,670,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | #problem description
#https://www.youtube.com/watch?v=xbpUHSKoALg&t=784s
# algorithm:
# #create intermediate nodes
# p=head
# q=NULL
# while p!=NULL:
# q=p
# p=p->next
# t=copy(q)
# q->next=t
# t->next=p
# #connecting new linked list
# p=head
# q=NULL
# while p!=NULL:
# q=p
# p=p->next->next
# q->next->random=q->random->next
# q=q->next
# if p!=NULL:
# q->next=p->next
# else:
# q->next=NULLa
# #changing head pointer
# head=head->next
class Node:
def __init__(self,value):
self.value=value
self.next=None
self.random=None
self.message="Original"
class LinkedList:
def __init__(self):
self.head=Node(1)
temp=self.head
temp.next=Node(2)
temp=temp.next
temp.next=Node(3)
temp=temp.next
temp=self.head
temp.random=temp.next.next #1->3
temp=temp.next
temp.random=self.head #2->1
temp=temp.next
temp.random=temp #3->3
def show_list(self):
temp=self.head
while temp:
print(temp.value,temp.message,temp.random.value)
temp=temp.next
def copy_list(self):
#create intermediate nodes
p=self.head
q=None
while p:
q=p
p=p.next
temp=Node(q.value);temp.message="Coppied"
q.next=temp
temp.next=p
#connecting new linked list
p=self.head
q=None
while p:
q=p
p=p.next.next
q.next.random=q.random.next
q=q.next
if p:
q.next=p.next
else:
q.next=None
#changing head pointer
self.head=self.head.next
self.show_list()
if __name__=="__main__":
myList=LinkedList()
myList.show_list()
myList.copy_list()
| [
"pritamsarkar84208220@gmail.com"
] | pritamsarkar84208220@gmail.com |
e43990df097eed0fd0d9bf0fd0fbd95e1335b29e | f6dca66d7035845869158c617cad4c3ca74210d1 | /disentangle/topic_disc.py | 2b98dfe6fb67de42c4ea2d83337c29ab10ff5531 | [
"MIT"
] | permissive | anshiquanshu66/SAVED | 6c320d8da7c2ef83e47da48dfb907941fd858491 | 130d3401a0532b6102fe4c9fcba04a85a8169b7c | refs/heads/main | 2023-06-24T22:42:33.211993 | 2021-07-20T11:39:57 | 2021-07-20T11:39:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,125 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import json
import logging
import os
import torch
import engine
from dataset import corpora
from dataset import data_loaders
from models import conv_models
from utils import str2bool, prepare_dirs_loggers, get_time
import gen_utils
arg_lists = []
parser = argparse.ArgumentParser()
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
def get_config():
config, unparsed = parser.parse_known_args()
return config, unparsed
data_arg = add_argument_group('Data')
net_arg = add_argument_group('Network')
train_arg = add_argument_group('Training')
misc_arg = add_argument_group('Misc')
# Data
data_arg.add_argument('--data_dir', type=list, default=["data/output.json"])
#'data/veracity_en_c.json',
#"data/veracity_en_f.json",
#"data/veracity_en_g.json",
#"data/veracity_en_o.json",
#"data/veracity_en_s.json"])
data_arg.add_argument('--log_dir', type=str, default='logs')
# commonly changed
net_arg.add_argument('--d', type=int, default=6) #default 50
net_arg.add_argument('--k', type=int, default=10) #default 50
train_arg.add_argument('--window_size', type=int, default=20) #default 20
train_arg.add_argument('--max_epoch', type=int, default=25) #default 100
train_arg.add_argument('--ctx_head', type=str, default="none") #stance_pred / stance_adv / veracity_pred / veracity_adv
train_arg.add_argument('--tar_head', type=str, default="none") #stance_pred / stance_adv
net_arg.add_argument('--loss_mult', type=float, default=1/200) #todo: go back to 1/200
#data_arg.add_argument('--load_sess', type=str, default="t50_d50_w20")
# Network
net_arg.add_argument('--embed_size', type=int, default=200)
net_arg.add_argument('--hidden_size', type=int, default=500)
net_arg.add_argument('--max_vocab_cnt', type=int, default=50000)
# Training / test parameters
train_arg.add_argument('--op', type=str, default='adam')
train_arg.add_argument('--step_size', type=int, default=1)
train_arg.add_argument('--init_w', type=float, default=0.1)
train_arg.add_argument('--init_lr', type=float, default=0.0003) #0.001 is default
train_arg.add_argument('--momentum', type=float, default=0.0)
train_arg.add_argument('--lr_hold', type=int, default=1)
train_arg.add_argument('--lr_decay', type=float, default=0.6)
train_arg.add_argument('--use_l1_reg', type=str2bool, default=False)
train_arg.add_argument('--improve_threshold', type=float, default=0.996)
train_arg.add_argument('--patient_increase', type=float, default=4.0)
train_arg.add_argument('--early_stop', type=str2bool, default=False)
# MISC
misc_arg.add_argument('--output_vis', type=str2bool, default=False)
misc_arg.add_argument('--save_model', type=str2bool, default=True)
misc_arg.add_argument('--use_gpu', type=str2bool, default=True)
misc_arg.add_argument('--fix_batch', type=str2bool, default=False)
misc_arg.add_argument('--print_step', type=int, default=100)
misc_arg.add_argument('--ckpt_step', type=int, default=1000)
misc_arg.add_argument('--freeze_step', type=int, default=14500)
misc_arg.add_argument('--batch_size', type=int, default=32)
data_arg.add_argument('--token', type=str, default="")
logger = logging.getLogger()
def main(config):
#print(config.k)
#exit()
config.session_dir = os.path.join(config.log_dir, "t"+str(config.k)+"_d"+str(config.d)+"_w"+str(config.window_size))
try:
os.mkdir(config.session_dir)
except:
pass
stance = []
true_stance = []
print("TRAINING (NOT FINETUNING)")
print("Events: " + str(len(config.data_dir)))
print("t" + str(config.k) + " d" + str(config.d) + " w" + str(config.window_size) + " e" + str(config.max_epoch))
prepare_dirs_loggers(config, os.path.basename(__file__))
for i in range(len(config.data_dir)):
corpus_client = corpora.TwitterCorpus(config, i)
conv_corpus = corpus_client.get_corpus_bow() #stance is included here!
train_conv, valid_conv, test_conv, vocab_size = conv_corpus['train'],\
conv_corpus['valid'],\
conv_corpus['test'],\
conv_corpus['vocab_size']
# create data loader that feed the deep models
train_feed = data_loaders.TCDataLoader("Train", train_conv, vocab_size, config) #stance should be loaded in correctly
valid_feed = data_loaders.TCDataLoader("Valid", valid_conv, vocab_size, config)
test_feed = data_loaders.TCDataLoader("Test", test_conv, vocab_size, config)
# for generation
conv_corpus_seq = corpus_client.get_corpus_seq()
train_conv_seq, valid_conv_seq, test_conv_seq = conv_corpus_seq['train'], conv_corpus_seq['valid'], conv_corpus_seq['test'] #stance is here too in case needed, but i doubt it
model = conv_models.TDM(corpus_client, config)
if config.use_gpu:
model.cuda()
s, ts, v, tv = engine.train(model, train_feed, valid_feed, test_feed, config, config.data_dir[i][-6])
stance += s
true_stance += ts
'''
# config.batch_size = 10
train_feed_output = data_loaders.TCDataLoader("Train_Output", train_conv, vocab_size, config)
test_feed_output = data_loaders.TCDataLoader("Test_Output", test_conv, vocab_size, config)
valid_feed_output = data_loaders.TCDataLoader("Valid_Output", valid_conv, vocab_size, config)
if config.output_vis:
with open(os.path.join(config.session_dir, "gen_samples.txt"), "w") as gen_f:
gen_utils.generate(model, valid_feed_output, valid_conv_seq, config, num_batch=2, dest_f=gen_f)
'''
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
a = confusion_matrix(true_stance, stance)
print("FINAL RESULTS:")
print("= Predicted F|T|U =")
print(a)
print("Stance Macro F1:", round(f1_score(true_stance, stance, average="macro"), 4))
if __name__ == "__main__":
config, unparsed = get_config()
main(config)
| [
"noreply@github.com"
] | noreply@github.com |
357153da0f198a8a44507ece863aaf188e86fb44 | 1b046f4f959fe9841c788a0b4e1016f17e267018 | /game_stats.py | 800f4fc0c7da4ee3b205d32c8a56559648208c70 | [] | no_license | belieffsy/practice-alien_invasion | 7973480d588fbd6ae4c6065da0b7869e1baa5025 | 0846fea2b65395742cb9de6a54639f5131085372 | refs/heads/master | 2020-04-06T21:11:09.281912 | 2018-11-30T03:39:20 | 2018-11-30T03:39:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py |
class GameStats():
"""跟踪游戏的统计信息"""
def __init__(self,ai_settings):
"""初始化统计信息"""
self.ai_settings = ai_settings
self.reset_stats()
#让游戏处于非活动状态
self.game_active = False
#在任何情况下都不应该重置最高得分
self.read_high_score()
def reset_stats(self):
"""初始化游戏运行期间可能变化的统计信息"""
self.ships_left = self.ai_settings.ship_limit
self.score = 0
self.level = 1
def read_high_score(self):
with open("high_score.txt",'r') as f:
lines = f.readlines()
if lines ==[]:
self.high_score = 0
else:
self.high_score = int(lines[-1])
| [
"noreply@github.com"
] | noreply@github.com |
68e264f1175e4500758f875b6b021e66b4625bc8 | 9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7 | /submission - Homework1/HW1 - Alex/index_nested_list.py | bbb999230c523e6e8d132b64459550cc015955c5 | [] | no_license | sendurr/spring-grading | 90dfdced6327ddfb5c311ae8f42ae1a582768b63 | 2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54 | refs/heads/master | 2020-04-15T17:42:10.781884 | 2016-08-29T20:38:17 | 2016-08-29T20:38:17 | 50,084,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # Alexis Thompson-Klemish
#
q = [['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h']]
# print the letter a
print q[0][0]
# print the list ['d', 'e', 'f']
print q[1]
# print the last element h
print q[-1][-1]
#print the d element
print q[1][0]
#explain why q[-1][-2] has the value g
print "negative indexes count from the right, not the left so q[-1] produces the rightmost list and q[-1][-2] produces the second to last element in the last list"
| [
"sendurr@hotmail.com"
] | sendurr@hotmail.com |
d5fa428f28b5f72ac1b246b16f8a93091fd13a8b | d2ead5ce523bb5a6c983fe4a4312ad43fe754ac0 | /evaluate_models.py | 6b1fa044133ee5742fbaa16b314e3cd7aeff7ee2 | [] | no_license | SereV94/MasterThesis | b7a0c5f5b40369df378496ce5a6527a4a4991970 | ee7c7b7b3c72d6badec71c617fab8ab6ebe9beca | refs/heads/master | 2022-11-28T09:31:50.985913 | 2020-08-09T17:25:20 | 2020-08-09T17:25:20 | 235,092,436 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,026 | py | #!/usr/bin/python
from helper import parse_dot, run_traces_on_model, dict2list, reduce_data_by_label, parse_symbolic_dot, run_traces_on_symbolic_model
from statistics import median
import pandas as pd
import numpy as np
import pickle
import re
import glob
from collections import defaultdict
from operator import add
import os
# Set debugging flag (if set to 0 the interactive version is used)
DEBUGGING = 1
def train_model(traces_filepath, indices_filepath, model, method, clustering_method=None, transformer=None):
"""
Function for training an input model given some input traces stored in the provided filepath. The traces are firstly
run on the model, so that each state of the model can be updated with the records passing from it. Subsequently, the
specified training method is applied on each state, so that a trained model can be created in each state.
:param traces_filepath: the filepath to the traces' file
:param indices_filepath: the filepath to the traces' indices limits - used later for prediction
:param model: the given model
:param method: the training method to be used (currently probabilistic | multivariate gaussian | clustering | baseline)
:param clustering_method: the clustering method to be used if clustering has been selected as method, otherwise None
:param transformer: flag showing if RobustScaler should be used to normalize and scale the data
:return: the trained model
"""
model = run_traces_on_model(traces_filepath, indices_filepath, model)
model.set_all_weights(model.get_maximum_weight(with_laplace=True), with_laplace=True)
for node_label in model.nodes_dict.keys():
if node_label != 'root' and len(model.nodes_dict[node_label].observed_indices) > 2:
if method == 'clustering':
model.nodes_dict[node_label].training_vars['clusterer'], \
model.nodes_dict[node_label].training_vars['transformer'] = model.nodes_dict[node_label].\
fit_clusters_on_observed(clustering_method, transformer)
elif method == "multivariate gaussian":
model.nodes_dict[node_label].training_vars['kernel'], \
model.nodes_dict[node_label].training_vars['transformer'] = \
model.nodes_dict[node_label].fit_multivariate_gaussian(transformer)
elif method == "probabilistic":
model.nodes_dict[node_label].training_vars['quantile_values'] = \
model.nodes_dict[node_label].fit_quantiles_on_observed()
else:
model.nodes_dict[node_label].training_vars['mi'], model.nodes_dict[node_label].training_vars['si'], \
model.nodes_dict[node_label].training_vars['transformer'] = \
model.nodes_dict[node_label].fit_baseline(transformer)
return model
def predict_on_model(model, method, weighted=True):
"""
Function for predicting based on a model supplied with the testing traces on its states.
:param model: the given model
:param method: the method that has been used for training (needed to select the appropriate prediction mechanism on
each state)
:param weighted: a flag indicating if weighted prediction will be applied based on the number of the observations of
each state (meaning the robustness of the prediction of each state)
:return: the predicted labels
"""
predictions = dict()
weights = dict()
for node_label in model.nodes_dict.keys():
# the node needs to have test set to predict on and
if node_label != 'root' and len(model.nodes_dict[node_label].testing_indices) != 0:
if len(model.nodes_dict[node_label].observed_indices) > 2:
if method == 'clustering':
pred = model.nodes_dict[node_label].predict_on_clusters(
model.nodes_dict[node_label].training_vars['clusterer'],
transformer=model.nodes_dict[node_label].training_vars['transformer'])
elif method == "multivariate gaussian":
pred = model.nodes_dict[node_label].predict_on_gaussian(
model.nodes_dict[node_label].training_vars['kernel'],
model.nodes_dict[node_label].training_vars['transformer'])
elif method == "probabilistic":
pred = model.nodes_dict[node_label].predict_on_probabilities(
model.nodes_dict[node_label].training_vars['quantile_values'])
else:
pred = model.nodes_dict[node_label].predict_on_baseline(
model.nodes_dict[node_label].training_vars['mi'],
model.nodes_dict[node_label].training_vars['si'],
model.nodes_dict[node_label].training_vars['transformer'])
else:
# if this state is unseen in training predict anomaly -> this shouldn't happen though
print('State ' + node_label + ' has less than 3 observations!!!')
pred = len(model.nodes_dict[node_label].testing_indices) * [1]
assert (len(pred) == len(model.nodes_dict[node_label].testing_indices)), "Dimension mismatch!!"
for i, ind in enumerate(model.nodes_dict[node_label].testing_indices):
if weighted:
predictions[ind] = [pred[i] * model.nodes_dict[node_label].weight] if ind not in predictions.keys() \
else predictions[ind] + [pred[i] * model.nodes_dict[node_label].weight]
weights[ind] = model.nodes_dict[node_label].weight if ind not in weights.keys() \
else weights[ind] + model.nodes_dict[node_label].weight
else:
predictions[ind] = [pred[i]] if ind not in predictions.keys() else predictions[ind] + [pred[i]]
# currently using median to aggregate different predictions for the same flow
if weighted:
predictions = dict((k, sum(v) / weights[k]) for k, v in predictions.items())
else:
predictions = dict((k, median(v)) for k, v in predictions.items())
return predictions
def produce_evaluation_metrics(predicted_labels, true_labels, detailed_labels, dst_ips, printing=True):
"""
Function for calculating the evaluation metrics of the whole pipeline. Depending on the prediction type different
metrics are calculated. For the hard type the accuracy, the precision, and the recall are provided.
:param predicted_labels: the predicted labels as a list
:param true_labels: the true labels as a list
:param detailed_labels: the detailed labels of the flows as a list
:param dst_ips: the destination ips of each flow as a list (or None in case of connection level analysis)
:param printing: a boolean flag that specifies if the results shall be printed too
:return: the needed metrics
"""
TP, TN, FP, FN = 0, 0, 0, 0
# round is applied for rounding in cases of float medians
predicted_labels = list(map(round, predicted_labels))
# use 2 dictionaries to keep information about the connections and detailed labels
detailed_results = dict()
conn_results = None
if dst_ips:
conn_results = dict()
for i in range(len(true_labels)):
if detailed_labels[i] not in detailed_results.keys():
detailed_results[detailed_labels[i]] = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}
if conn_results is not None and dst_ips[i] not in conn_results.keys():
conn_results[dst_ips[i]] = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}
if true_labels[i] == 1:
if true_labels[i] == predicted_labels[i]:
TP += 1
detailed_results[detailed_labels[i]]['TP'] += 1
if conn_results is not None:
conn_results[dst_ips[i]]['TP'] += 1
else:
FN += 1
detailed_results[detailed_labels[i]]['FN'] += 1
if conn_results is not None:
conn_results[dst_ips[i]]['FN'] += 1
else:
if true_labels[i] == predicted_labels[i]:
TN += 1
detailed_results[detailed_labels[i]]['TN'] += 1
if conn_results is not None:
conn_results[dst_ips[i]]['TN'] += 1
else:
FP += 1
detailed_results[detailed_labels[i]]['FP'] += 1
if conn_results is not None:
conn_results[dst_ips[i]]['FP'] += 1
accuracy = (TP + TN) / (TP + TN + FP + FN)
precision = -1 if TP + FP == 0 else TP / (TP + FP)
recall = -1 if TP + FN == 0 else TP / (TP + FN)
if printing:
print('TP: ' + str(TP) + ' TN: ' + str(TN) + ' FP: ' + str(FP) + ' FN:' + str(FN))
print('Accuracy: ' + str(accuracy))
print('Precision: ' + str(precision))
print('Recall: ' + str(recall))
return TP, TN, FP, FN, accuracy, precision, recall, detailed_results, conn_results
def print_total_results(results):
"""
Function for printing the total results aggregated from each connection on each scenario tested
:param results: the directory with the results for each connection of each testing set produced by each training
model
:return:
"""
for test_set_name in list(filter(lambda x: 'total' in x, results.keys())):
print('-------------------- Total results for ' + test_set_name + ' --------------------')
for model_name in results[test_set_name].keys():
print('---- Model ' + model_name + ' ----')
if 'symbolic' not in model_name:
model_TP = results[test_set_name][model_name][0]
model_TN = results[test_set_name][model_name][1]
model_FP = results[test_set_name][model_name][2]
model_FN = results[test_set_name][model_name][3]
model_accuracy = (model_TP + model_TN) / (model_TP + model_TN + model_FP + model_FN)
model_precision = -1 if model_TP + model_FP == 0 else model_TP / (model_TP + model_FP)
model_recall = -1 if model_TP + model_FN == 0 else model_TP / (model_TP + model_FN)
print('TP: ' + str(model_TP) + ' TN: ' + str(model_TN) + ' FP: ' + str(model_FP) + ' FN:' + str(model_FN))
print('Accuracy: ' + str(model_accuracy))
print('Precision: ' + str(model_precision))
print('Recall: ' + str(model_recall))
else:
model_ratio = results[test_set_name][model_name][0]
print('Ratio: ' + str(model_ratio))
if __name__ == '__main__':
# set flag for baseline results (for labelling of the results file only)
baseline_only = False
# set flag for static window (for labelling of the results file only)
static = True
# flag for modifications in model evaluation for state-of-the-art experiments
sota = True
if DEBUGGING:
# for debugging purposes the following structures can be used
debug_model_filepaths = sorted(glob.glob(
'outputs/CTU13/host_level/src_port_dst_port_protocol_num_src_bytes_dst_bytes/scenario3-*_resampled_reduced_static_dfa.dot'),
key=lambda item: re.search('-(.+?)_', item.split('/')[-1]).group(1))
debug_train_trace_filepaths = sorted(glob.glob(
'Datasets/CTU13/training/host_level/src_port_dst_port_protocol_num_src_bytes_dst_bytes/scenario3-*-traces_resampled_reduced_static.txt'),
key=lambda item: re.search('-(.+)-', item.split('/')[-1]).group(1))
debug_methods = [
# 'clustering'
# , 'multivariate gaussian'
# , 'probabilistic'
'baseline multivariate'
# 'baseline symbolic'
]
debug_clustering_methods = [
'LOF'
, 'isolation forest'
]
parameters = []
for model_filepath, trace_filepath in zip(debug_model_filepaths, debug_train_trace_filepaths):
# check that the right model and trace files are used
assert (re.search('-(.+?)_', model_filepath.split('/')[-1]).group(1) ==
re.search('-(.+)-', trace_filepath.split('/')[-1]).group(1)), "Model-trace mismatch!!"
for method in debug_methods:
if method == 'clustering':
for clutering_method in debug_clustering_methods:
parameters += [(model_filepath, trace_filepath, method, clutering_method)]
else:
parameters += [(model_filepath, trace_filepath, method)]
flag = 'CTU-bi'
n = len(parameters)
else:
flag = int(input('Provide the type of dataset to be used: '))
n = int(input('Provide the number of models to be trained: '))
models = []
methods = []
models_info = []
for i in range(n):
if DEBUGGING:
model_filepath = parameters[i][0]
traces_filepath = parameters[i][1]
method = parameters[i][2]
else:
model_filepath = input('Give the relative path of the model to be used for training: ')
traces_filepath = input('Give the relative path of the trace to be used for training on the given model: ')
method = input('Give the name of the training method to be used (clustering | multivariate gaussian | '
'probabilistic): ')
# parse the model from the model dot file
if method != 'baseline symbolic':
model = parse_dot(model_filepath)
indices_filepath = '.'.join(traces_filepath.split('.')[:-1]) + '_indices.pkl'
else:
model = parse_symbolic_dot(model_filepath)
indices_filepath = ''
clustering_method = None
if method == 'clustering':
if DEBUGGING:
clustering_method = parameters[i][3]
else:
clustering_method = input('Provide the specific clustering method to be used (hdbscan | isolation forest '
'| LOF | kmeans): ')
# train the model
print('Training on ' + '.'.join(model_filepath.split('/')[-1].split('.')[0:-1]) + '_' + method + '-' +
(clustering_method if clustering_method is not None else '') + '...')
# in case the baseline symbolic method is used then there is no need of replaying the training data on the model
if method != 'baseline symbolic':
models += [train_model(traces_filepath, indices_filepath, model, method, clustering_method=clustering_method)]
else:
models += [model]
methods += [method + '-' + (clustering_method if clustering_method is not None else '')]
# list used for better presentation of the results later on
models_info += ['.'.join(model_filepath.split('/')[-1].split('.')[0:-1]) + '_' + methods[-1]]
# start testing on each trained model
if DEBUGGING:
# get the testing traces filepath pattern
debug_test_trace_filepaths = sorted(glob.glob('Datasets/CTU13/test/host_level/src_port_dst_port_protocol_num_src_bytes_dst_bytes/scenario9-*-traces_static.txt'))
debug_test_set_filepaths = list(map(lambda x: '/'.join(x.split('/')[0:2]) + '/'
+ '-'.join(x.split('/')[-1].split('-')[:(-3 if 'connection' in x
else -2)]),
debug_test_trace_filepaths))
debug_test_filepaths = list(zip(debug_test_trace_filepaths, debug_test_set_filepaths))
m = len(debug_test_filepaths)
else:
m = int(input('Provide the number of testing sets: '))
results = defaultdict(dict)
# keep a value showing the last test set tested so that the accumulation of the aggregated results can be refreshed
prev_test_path = ''
accumulated_results = defaultdict(list)
for j in range(m):
if DEBUGGING:
test_traces_filepath = debug_test_filepaths[j][0]
else:
test_traces_filepath = input('Give the relative path of the testing traces to be used for evaluation: ')
if 'encoding' not in test_traces_filepath:
indices_filepath = '.'.join(test_traces_filepath.split('.')[:-1]) + '_indices.pkl'
else:
indices_filepath = ''
# initialize the entry in the results dictionary for the current testing trace file
test_trace_name = '.'.join(test_traces_filepath.split('/')[-1].split('.')[0:-1])
print('-------------------------------- Evaluating on ' + test_trace_name + ' --------------------------------')
results[test_trace_name] = dict()
# and retrieve the IPs to use for true label extraction
ips = []
for ip_tuple in re.findall("-(\d+\.\d+\.\d+\.\d+)|-([^-]+::[^-]+:[^-]+:[^-]+:[^-]+)", test_traces_filepath):
ips += [ip_tuple[0] if ip_tuple[0] != '' else ip_tuple[1]]
# retrieve the actual dataset so that the true labels can be extracted
if DEBUGGING:
test_data_filepath = debug_test_filepaths[j][1]
else:
test_data_filepath = input('Give the relative path of the testing dataframe to be used for evaluation: ')
if flag == 'CTU-bi':
normal = pd.read_pickle(test_data_filepath + '/binetflow_normal.pkl') if not sota else \
pd.read_pickle(test_data_filepath + '/binetflow_normal_sota.pkl')
anomalous = pd.read_pickle(test_data_filepath + '/binetflow_anomalous.pkl')
else:
normal = pd.read_pickle(test_data_filepath + '/normal.pkl')
anomalous = pd.read_pickle(test_data_filepath + '/anomalous.pkl')
all_data = pd.concat([normal, anomalous], ignore_index=True).reset_index(drop=True)
# keep only the flows currently under evaluation based on the ips extracted from the testing traces' filepath
# and sort values by date
if len(ips) == 1:
# host level analysis
if 'bdr' in test_traces_filepath:
all_data = all_data[(all_data['src_ip'] == ips[0]) | (all_data['dst_ip'] == ips[0])]\
.sort_values(by='date').reset_index(drop=True)
else:
all_data = all_data[all_data['src_ip'] == ips[0]].sort_values(by='date').reset_index(drop=True)
else:
# connection level analysis
if 'bdr' in test_traces_filepath:
all_data = all_data[((all_data['src_ip'] == ips[0]) & (all_data['dst_ip'] == ips[1])) |
((all_data['dst_ip'] == ips[0]) & (all_data['src_ip'] == ips[1]))] \
.sort_values(by='date').reset_index(drop=True)
else:
all_data = all_data[(all_data['src_ip'] == ips[0]) & (all_data['dst_ip'] == ips[1])]\
.sort_values(by='date').reset_index(drop=True)
true_labels = all_data['label'].values
# keep also the detailed labels for analysis reasons
if flag == 'UNSW':
detailed_labels = all_data['detailed_label'].values.tolist()
else:
detailed_labels = all_data['label'].values.tolist()
# keep also the destination IPs in case we are on host level analysis -> again for analysis reasons
dst_ips = None
if len(ips) == 1:
dst_ips = all_data['dst_ip'].values.tolist()
# keep one dictionary to aggregate the results of each model over all flows on the test set
if prev_test_path != test_data_filepath:
if len(prev_test_path):
results[prev_test_path + '-total'] = accumulated_results
accumulated_results = defaultdict(list)
prev_test_path = test_data_filepath
for i in range(n):
print("Let's use model " + models_info[i] + '!!!')
if methods[i].split('-')[0] != 'baseline symbolic':
models[i].reset_attributes(attribute_type='test')
models[i].reset_indices(attribute_type='test')
models[i] = run_traces_on_model(test_traces_filepath, indices_filepath, models[i], 'test')
predictions = predict_on_model(models[i], methods[i].split('-')[0])
assert (len(predictions.keys()) == np.size(true_labels, 0)), \
"Dimension mismatch between true and predicted labels!!"
# Save the results as a dictionary of dictionaries with the first level keys being the test set name,
# the second level keys being the training model information, and the values being the results
if flag == 'CTU-bi':
results[test_trace_name][models_info[i]] = produce_evaluation_metrics(dict2list(predictions),
list(map(lambda x: 1
if 'Botnet' in x
else 0, true_labels.tolist())),
detailed_labels, dst_ips)
# early stop for state-of-the-art experiments models
if sota:
temp_TP, temp_TN, temp_FP, temp_FN = results[test_trace_name][models_info[i]][0:4]
if (temp_TN + temp_FN) / (temp_TP + temp_TN + temp_FP + temp_FN) > 0.15:
break
elif flag == 'UNSW':
results[test_trace_name][models_info[i]] = produce_evaluation_metrics(dict2list(predictions),
true_labels.tolist(),
detailed_labels, dst_ips)
else:
results[test_trace_name][models_info[i]] = produce_evaluation_metrics(dict2list(predictions),
list(map(lambda x: 1
if x != 'BENIGN'
else 0, true_labels.tolist())),
detailed_labels, dst_ips)
# update also the accumulated results | only TP, TN, FP, FN are passed
if len(accumulated_results[models_info[i]]):
accumulated_results[models_info[i]] = list(map(add, accumulated_results[models_info[i]],
results[test_trace_name][models_info[i]][0:4]))
else:
accumulated_results[models_info[i]] = list(results[test_trace_name][models_info[i]][0:4])
else:
assert (re.search('-(.+?)_', models_info[i].split('/')[-1]).group(1) ==
re.search('-(.+)-', parameters[i][1].split('/')[-1]).group(1)), \
"Model-trace mismatch in symbolic version!!"
ratio = run_traces_on_symbolic_model(test_traces_filepath, models[i], eval_method='error',
train_path=parameters[i][1]) # the training trace filepath
# in the symbolic case the label of the aggregation entity is given at this point
if flag == 'CTU-bi':
agg_label = any(map(lambda x: 'Botnet' in x, true_labels.tolist()))
elif flag == 'UNSW':
agg_label = any(map(lambda x: x == 1, true_labels.tolist()))
else:
agg_label = any(map(lambda x: x != 'BENIGN', true_labels.tolist()))
print('Ratio: ' + str(ratio))
print('Is Malicious: ' + str(agg_label))
results[test_trace_name][models_info[i]] = ratio, agg_label
if len(accumulated_results[models_info[i]]):
accumulated_results[models_info[i]] = list(map(add, accumulated_results[models_info[i]],
results[test_trace_name][models_info[i]][0:1]))
else:
accumulated_results[models_info[i]] = list(results[test_trace_name][models_info[i]][0:1])
# one last addition of the accumulated results in the results dict
results[prev_test_path + '-total'] = accumulated_results
# print the aggregated results for each test set
print('------------------------- Aggregated results per test set -------------------------')
print_total_results(results)
# finally save all the results for each testing trace
if DEBUGGING:
results_filename = '/'.join(debug_test_trace_filepaths[0].split('/')[0:2]) + '/' + 'results/' + \
debug_test_trace_filepaths[0].split('/')[4] + '/' + \
'-'.join(set(map(lambda x: x.split('/')[-1], debug_test_set_filepaths))) + \
('_baseline' if baseline_only else '') + ('_static' if static else '') + '_dfa_results.pkl'
# create the directory if it does not exist
os.makedirs(os.path.dirname(results_filename), exist_ok=True)
else:
results_filename = input('Provide the relative path for the filename of the results: ')
with open(results_filename, 'wb') as f:
pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL)
| [
"seredellos@gmail.com"
] | seredellos@gmail.com |
e9b8cede519e8eecea7841170591be6130caa3c8 | 7032fd0d1652cc1bec1bff053af4f486a5704cd5 | /old/ptex_2.3.2/conanfile.py | e984816bc3004d486cfd3d4515007495bbaaa826 | [] | no_license | MercenariesEngineering/conan_recipes | c8f11ddb3bd3eee048dfd476cdba1ef84b85af5e | 514007facbd1777799d17d041fc34dffef61eff8 | refs/heads/master | 2023-07-09T08:10:35.941112 | 2023-04-19T13:36:38 | 2023-04-19T13:36:38 | 169,575,224 | 7 | 1 | null | 2023-04-19T14:11:35 | 2019-02-07T13:23:02 | C++ | UTF-8 | Python | false | false | 3,008 | py | from conans import ConanFile, CMake, tools
import os
class Ptex(ConanFile):
name = "ptex"
version = "2.3.2"
license = "Apache 2.0"
description = "Per-Face Texture Mapping for Production Rendering"
url = "https://github.com/wdas/ptex"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
requires = "zlib/1.2.11@conan/stable"
default_options = "shared=False", "fPIC=True"
generators = "cmake"
def configure(self):
if self.settings.os == "Windows":
self.options.remove("fPIC")
def source(self):
# https://github.com/wdas/ptex/archive/v2.3.2.tar.gz
filename = "v%s.tar.gz" % self.version
tools.download("https://github.com/wdas/ptex/archive/%s" % filename, filename)
tools.untargz(filename)
os.unlink(filename)
tools.replace_in_file("ptex-%s/CMakeLists.txt" % self.version,
"""# Use pkg-config to create a PkgConfig::Ptex_ZLIB imported target
find_package(PkgConfig REQUIRED)
pkg_checK_modules(Ptex_ZLIB REQUIRED zlib IMPORTED_TARGET)""",
"""include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
find_package(ZLIB)""")
tools.replace_in_file("ptex-%s/src/build/ptex-config.cmake" % self.version,
"""# Provide PkgConfig::ZLIB to downstream dependents
find_package(PkgConfig REQUIRED)
pkg_checK_modules(Ptex_ZLIB REQUIRED zlib IMPORTED_TARGET)""",
"")
tools.replace_in_file("ptex-%s/src/ptex/CMakeLists.txt" % self.version,
"""PkgConfig::Ptex_ZLIB""",
"""${ZLIB_LIBRARIES}""")
tools.replace_in_file("ptex-%s/src/utils/CMakeLists.txt" % self.version,
"""PkgConfig::Ptex_ZLIB""",
"""${ZLIB_LIBRARIES}""")
def build(self):
cmake = CMake(self)
cmake.definitions["PTEX_SHA"] = "1b8bc985a71143317ae9e4969fa08e164da7c2e5"
cmake.definitions["PTEX_VER"] = self.version
cmake.definitions["PTEX_BUILD_SHARED_LIBS"] = self.options.shared
cmake.definitions["PTEX_BUILD_STATIC_LIBS"] = not self.options.shared
cmake.definitions["ZLIB_ROOT"] = os.path.join( self.deps_cpp_info[ "zlib" ].libdirs[ 0 ], "../" )
if ("fPIC" in self.options.fields and self.options.fPIC == True):
cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = True
#if self.settings.os == "Linux" and find_executable( "lld" ) is not None:
# cmake.definitions[ "CMAKE_SHARED_LINKER_FLAGS" ] = "-fuse-ld=lld"
# cmake.definitions[ "CMAKE_EXE_LINKER_FLAGS" ] = "-fuse-ld=lld"
cmake.configure(source_dir="ptex-%s" % self.version)
cmake.build()
def package(self):
self.copy("*.h", dst="include/", keep_path=False)
self.copy("lib/Ptex.lib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
| [
"tdelame@gmail.com"
] | tdelame@gmail.com |
8b5fa9d7d5f9d64a8aa54a63c57b1dbe5ddb945a | 0c0ba361a75cfb57b0325291b52256e83c470708 | /ex_cliente_encaps.py | fac2fb54519886995dac34730baade44fb3a00ce | [] | no_license | Anna-Beatriz/lp2 | 3c598b76e76e0b7f0256d6658bb9d22f69b9a417 | 30404fc6418b92d45e866c938ddebde110fd74d3 | refs/heads/master | 2021-02-19T00:10:33.039071 | 2020-04-24T20:36:30 | 2020-04-24T20:36:30 | 245,255,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | class Cliente:
def __init__(self, nome, cpf, senha):
self.nome = nome
self.__cpf = cpf
self.__senha = senha
def get_cpf(self):
return self.__cpf
def get_senha(self):
return self.__senha
def set_cpf(self, cpf):
self.__cpf = cpf
class ContaBancaria:
def __init__(self, numero, cliente):
self.numero = numero
self.cliente = cliente
self.__saldo = 0
def get_saldo(self):
return self.__saldo
def depositar(self, valor, senha):
if self.cliente.get_senha() is senha:
self.__saldo += valor
else:
print("Senha inválida!")
def sacar(self, valor, senha):
if self.cliente.get_senha() is senha:
self.__saldo -= valor
else:
print("Senha inválida!")
cliente1 = Cliente("João", "111111111", "123")
conta = ContaBancaria(1111, cliente1)
conta.depositar(200, "123")
print(conta.get_saldo()) # Imprime 200
conta.sacar(50, "123")
print(conta.get_saldo()) # Imprime 150
conta.depositar(100, "111") # senha inválida
print(conta.get_saldo()) # Imprime 150
conta.sacar(50, "111") # senha inválida
print(conta.get_saldo()) # Imprime 150
| [
"annabeatriz.ms1@gmail.com"
] | annabeatriz.ms1@gmail.com |
f81d5965412de401884bc6ee5031ef9a0b341a68 | fbb1550dc5437d672ed0137bd7711eba3290dee3 | /students/thomas_sulgrove/lesson08/assignment/test_inventory.py | b6e6b03a82d2e4a30955d31ed0914aa95134953a | [] | no_license | JavaRod/SP_Python220B_2019 | 2cc379daf5290f366cf92dc317b9cf68e450c1b3 | 5dac60f39e3909ff05b26721d602ed20f14d6be3 | refs/heads/master | 2022-12-27T00:14:03.097659 | 2020-09-27T19:31:12 | 2020-09-27T19:31:12 | 272,602,608 | 1 | 0 | null | 2020-06-16T03:41:14 | 2020-06-16T03:41:13 | null | UTF-8 | Python | false | false | 1,769 | py | """unit tests for inventory.py"""
import os
from unittest import TestCase
from inventory import add_furniture, single_customer
def scrub_test_file(file_name):
"""remove the test file"""
try:
os.remove(file_name)
except OSError:
pass
class TestBasicOps(TestCase):
"""Class for housing the tests"""
def test_add_furniture(self):
"""imports a csv from a file path and makes a json"""
scrub_test_file("rented_items.csv")
add_furniture("rented_items.csv", "Elisa Miles", "LR04", "Leather Sofa", 25)
add_furniture("rented_items.csv", "Edward Data", "KT78", "Kitchen Table", 10)
add_furniture("rented_items.csv", "Alex Gonzales", "BR02", "Queen Mattress", 17)
test_list = []
with open("rented_items.csv", newline="") as file:
for row in file:
test_list.append(row)
self.assertEqual(test_list[0], ('Elisa Miles,LR04,Leather Sofa,25.0\r\n'))
self.assertEqual(test_list[1], ('Edward Data,KT78,Kitchen Table,10.0\r\n'))
self.assertEqual(test_list[2], ('Alex Gonzales,BR02,Queen Mattress,17.0\r\n'))
def test_single_customer(self):
"""test adding in a single customer"""
scrub_test_file("rented_items.csv")
new_invoice = single_customer("rented_items.csv", "Susan Wong")
new_invoice("data.csv")
test_list = []
with open("rented_items.csv", newline="") as file:
for row in file:
test_list.append(row)
self.assertEqual(test_list[0], ('Susan Wong,LR04,Leather Sofa,25.0\r\n'))
self.assertEqual(test_list[1], ('Susan Wong,KT78,Kitchen Table,10.0\r\n'))
self.assertEqual(test_list[2], ('Susan Wong,BR02,Queen Mattress,17.0\r\n'))
| [
"tsulgrove@gmail.com"
] | tsulgrove@gmail.com |
17182f5cae79f76332304a2abd4a7f9acf5a1442 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/layout/ternary/aaxis/_showticksuffix.py | 2a1b061ea73eecf6d8c074e3d8662b7cb67f3748 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 533 | py | import _plotly_utils.basevalidators
class ShowticksuffixValidator(
_plotly_utils.basevalidators.EnumeratedValidator
):
def __init__(
self,
plotly_name='showticksuffix',
parent_name='layout.ternary.aaxis',
**kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
values=['all', 'first', 'last', 'none'],
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
43769e07cfb176ffe2561227bd4f5b708fadbad3 | e30c23f90c37a45730aaf5366d30ff521d91a28a | /Leetcode 101/深入浅出动态规划/子序列问题/1143-medium-最长公共子序列.py | 08bf11a5a4ed57d1273e53032c75e7aeaaa1ac57 | [] | no_license | MaiziXiao/Algorithms | 0acf8ca5150deb730d32a14ac68dd78ea70d202c | f6a883b4c0d1fe06609f92f7ad8ea5317a567795 | refs/heads/master | 2023-02-13T00:29:01.634488 | 2021-01-14T14:06:45 | 2021-01-14T14:06:45 | 97,946,136 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,990 | py | # 1.1. 最长公共子序列(Longest-Common-Subsequences,LCS)
# 最长公共子序列(Longest-Common-Subsequences,LCS)是一个在一个序列集合中(通常为两个序列)
# 用来查找所有序列中最长子序列的问题。这与查找最长公共子串的问题不同的地方是:子序列不需要在原序列中占用连续的位置 。
# 最长公共子序列问题是一个经典的计算机科学问题,也是数据比较程序,比如Diff工具,和生物信息学应用的基础。
# 它也被广泛地应用在版本控制,比如Git用来调和文件之间的改变。
# 1.2 最长公共子串(Longest-Common-Substring,LCS)
# 最长公共子串(Longest-Common-Substring,LCS)问题是寻找两个或多个已知字符串最长的子串。
# 此问题与最长公共子序列问题的区别在于子序列不必是连续的,而子串却必须是连续的。
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
"""
给定两个字符串 text1 和 text2,返回这两个字符串的最长公共子序列的长度。
一个字符串的 子序列 是指这样一个新的字符串:它是由原字符串在不改变字符的相对顺序的情况下删除某些字符
(也可以不删除任何字符)后组成的新字符串。
例如,"ace" 是 "abcde" 的子序列,但 "aec" 不是 "abcde" 的子序列。两个字符串的「公共子序列」是这两个字符串所共同拥有的子序列。
若这两个字符串没有公共子序列,则返回 0。
示例 1:
输入:text1 = "abcde", text2 = "ace"
输出:3
解释:最长公共子序列是 "ace",它的长度为 3
示例 2:
输入:text1 = "abc", text2 = "abc"
输出:3
解释:最长公共子序列是 "abc",它的长度为 3。
"""
# str1 = 1a2b3c
# str2 = 123abc
# 1 a 2 b 3 c
# 1 1 1 1 1 1 1
# 2 1 1 2 2 2 2
# 3 1 1 2 2 3 3
# a 1 2 2 2 3 3
# b 1 2 2 3 3 3
# c 1 2 2 3 3 4
# 从上表可以看出:
# 当str1[i] = str2[j]时,此时的最大子序列长度应该等于左上角的值加上1(当i=0时为1,因为此时没有左上角);
# 当str1[i] != str2[j]时,此时的最大子序列长度为上方和左方的最大值(当i=0时直接为上方的值
# dp[i][j] 是str[i]和str[j]拥有的最大的子序列长度
len_1 = len(text1)
len_2 = len(text2)
dp = [[0]*(len_2+1) for _ in range(len_1+1)]
for i in range(len_1):
for j in range(len_2):
if text1[i] == text2[j]:
dp[i+1][j+1] = dp[i][j] + 1
else:
dp[i+1][j+1] = max(dp[i+1][j], dp[i][j+1])
return dp[-1][-1]
Solution().longestCommonSubsequence(text1="abcde", text2="ace")
| [
"linchen.xiao@metronom.com"
] | linchen.xiao@metronom.com |
5fc5d33ab5a3e9925b7cd21a3f6b46e5b4456f00 | 501a6115aaad277bbf42fd782965b6387c981b1a | /Chapter_10/01_A_Tale_of_Two_ifs.py | 512ecb8c7597a1bfb3f34a89fce0562304524deb | [] | no_license | dackour/python | 371a12efaeca7e4f1888b12d181c88f010691766 | 82e469a40fc73ed7cc6742e6bb86aea22378ee5d | refs/heads/master | 2020-07-27T02:59:02.945729 | 2020-04-05T10:35:07 | 2020-04-05T10:35:07 | 208,845,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # Statements
x = 2
y = 1
if x > y:
x = 1
y = 2
print(x)
print(y)
if x:
if y:
print()
else:
print()
a = 1; b = 2; print(a + b) # Three statements on one line
mylist = [1111,
2222,
3333]
A, B, C, D = 1, 2, 3, 4
X = (A + B +
C + D)
if (A == 1 and
B == 2 and
C == 3):
print('spam' * 3)
X = A + B + \
C + D # An error prone old alternative
if x > y: print(x)
if x > y:
print(x)
| [
"bartosz.smela@gmail.com"
] | bartosz.smela@gmail.com |
6db33e398073ce4c522be21e5b50608e08cd4dc3 | 3b59a41f57045c585f8f1de5db7febcde1ce2525 | /Instancias/serializers.py | 5420db2007b93896fb153aa6940e3948b2fe1d4f | [] | no_license | elioclimaco/SIJWS | 1a82d2f00a309cdce0722dd3c3b2669d6466cec9 | f035416325eef8b5b00b3ce2a81517700b39a635 | refs/heads/master | 2020-05-26T07:22:10.721399 | 2014-10-20T20:48:24 | 2014-10-20T20:48:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | # -*- coding: utf-8 -*-
__author__ = 'Elio Clímaco'
from rest_framework import serializers
from .models import *
#
# Órganos Jurisdiccionales
#
class JuzgadoSerializer(serializers.ModelSerializer):
id = serializers.Field(source='c_instancia')
text = serializers.Field(source='x_nom_instancia')
class Meta:
model = instancia
fields = ('id', 'text')
class SedeSerializer(serializers.ModelSerializer):
id = serializers.Field(source='c_sede')
text = serializers.Field(source='x_desc_sede')
children = JuzgadoSerializer(many=True, source='sede')
class Meta:
model = sede
fields = ('id', 'text', 'children')
#fields = ('id', 'text')
class DistritoSerializer(serializers.ModelSerializer):
id = serializers.Field(source='c_distrito')
text = serializers.Field(source='x_nom_distrito')
children = SedeSerializer(many=True, source='distrito')
class Meta:
model = distrito_judicial
fields = ('id', 'text', 'children')
#fields = ('id', 'text') | [
"elioclimaco@gmail.com"
] | elioclimaco@gmail.com |
cf4b67c14d7a1b9856437ecb6e313e98a2c15a74 | 30c23852ae41a7808e2a202280e973ff1a4bbe2b | /OP/op.py | 9217ca651fa7b0366b9ae90cc341da5a83482f7b | [] | no_license | rohe/oidc-oob-federation | 050ce05a1bd373795bc74c63287edeccbf1c3129 | 53517decc43f4d58aa7b825feb8c97704de8822f | refs/heads/master | 2020-03-18T04:04:10.383031 | 2018-06-07T12:15:55 | 2018-06-07T12:15:55 | 134,267,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,153 | py | import logging
import cherrypy
from cryptojwt import as_bytes
from oidcmsg.oauth2 import is_error_message
from oidcmsg.oauth2 import AuthorizationRequest
from oidcendpoint.sdb import AuthnEvent
logger = logging.getLogger(__name__)
class OpenIDProvider(object):
def __init__(self, config, endpoint_context):
self.config = config
self.endpoint_context = endpoint_context
def do_response(self, endpoint, req_args, **args):
info = endpoint.do_response(request=req_args, **args)
for key, value in info['http_headers']:
cherrypy.response.headers[key] = value
try:
_response_placement = info['response_placement']
except KeyError:
_response_placement = endpoint.response_placement
if _response_placement == 'body':
logger.info('Response: {}'.format(info['response']))
return as_bytes(info['response'])
elif _response_placement == 'url':
logger.info('Redirect to: {}'.format(info['response']))
raise cherrypy.HTTPRedirect(info['response'])
@cherrypy.expose
def service_endpoint(self, name, **kwargs):
logger.info(kwargs)
logger.info('At the {} endpoint'.format(name))
endpoint = self.endpoint_context.endpoint[name]
try:
authn = cherrypy.request.headers['Authorization']
except KeyError:
pr_args = {}
else:
pr_args = {'auth': authn}
if endpoint.request_placement == 'body':
if cherrypy.request.process_request_body is True:
_request = cherrypy.request.body.read()
else:
raise cherrypy.HTTPError(400, 'Missing HTTP body')
if not _request:
_request = kwargs
req_args = endpoint.parse_request(_request, **pr_args)
else:
req_args = endpoint.parse_request(kwargs, **pr_args)
logger.info('request: {}'.format(req_args))
if is_error_message(req_args):
return as_bytes(req_args.to_json())
args = endpoint.process_request(req_args)
return self.do_response(endpoint, req_args, **args)
@cherrypy.expose
def authn_verify(self, url_endpoint, **kwargs):
"""
Authentication verification
:param authn_method: Which authn method that was used
:param kwargs: response arguments
:return: HTTP redirect
"""
authn_method = self.endpoint_context.endpoint_to_authn_method[url_endpoint]
username = authn_method.verify(**kwargs)
if not username:
cherrypy.HTTPError(403, message='Authentication failed')
auth_args = authn_method.unpack_token(kwargs['token'])
request = AuthorizationRequest().from_urlencoded(auth_args['query'])
# uid, salt, valid=3600, authn_info=None, time_stamp=0, authn_time=None,
# valid_until=None
authn_event = AuthnEvent(username, 'salt',
authn_info=auth_args['authn_class_ref'],
authn_time=auth_args['iat'])
endpoint = self.endpoint_context.endpoint['authorization']
args = endpoint.post_authentication(request,
user=username,
authn_event=authn_event)
return self.do_response(endpoint, request, **args)
def _cp_dispatch(self, vpath):
# Only get here if vpath != None
ent = cherrypy.request.remote.ip
logger.info('ent:{}, vpath: {}'.format(ent, vpath))
if len(vpath) == 2 and vpath[0] == 'verify':
a = vpath.pop(0)
b = vpath.pop(0)
cherrypy.request.params['url_endpoint'] = '/'.join(['', a, b])
return self.authn_verify
for name, instance in self.endpoint_context.endpoint.items():
if vpath == instance.vpath:
cherrypy.request.params['name'] = name
for n in range(len(vpath)):
vpath.pop()
return self.service_endpoint
return self
| [
"roland@catalogix.se"
] | roland@catalogix.se |
9bf714a6abbcdb0385038e4cdee96b601cece13d | 06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b | /2013_adni/MMSE-AD-CTL/01_build_dataset.py | f9049d7b28f1fc1da3d70f12b740317ca04ad28d | [] | no_license | neurospin/scripts | 6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458 | f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb | refs/heads/master | 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,376 | py | # -*- coding: utf-8 -*-
"""
@author: edouard.Duchesnay@cea.fr
Compute mask, concatenate masked non-smoothed images for all the subjects.
Build X, y, and mask
INPUT:
- subject_list.txt:
- population.csv
OUTPUT:
- mask.nii.gz
- y.npy
- X.npy = intercept + Age + Gender + Voxel
"""
import os
import numpy as np
import glob
import pandas as pd
import nibabel
import brainomics.image_atlas
import shutil
#import proj_classif_config
GENDER_MAP = {'Female': 0, 'Male': 1}
BASE_PATH = "/neurospin/brainomics/2013_adni"
#INPUT_CLINIC_FILENAME = os.path.join(BASE_PATH, "clinic", "adnimerge_baseline.csv")
INPUT_SUBJECTS_LIST_FILENAME = os.path.join(BASE_PATH,
"templates",
"template_FinalQC",
"subject_list.txt")
INPUT_IMAGEFILE_FORMAT = os.path.join(BASE_PATH,
"templates",
"template_FinalQC",
"registered_images",
"mw{PTID}*_Nat_dartel_greyProba.nii")
INPUT_CSV = os.path.join(BASE_PATH, "MMSE-AD-CTL", "population.csv")
OUTPUT = os.path.join(BASE_PATH, "MMSE-AD-CTL")
OUTPUT_CS = os.path.join(BASE_PATH, "MMSE-AD-CTL_cs")
#OUTPUT_ATLAS = os.path.join(BASE_PATH, "MMSE-AD-CTL_gtvenet")
#OUTPUT_CS_ATLAS = os.path.join(BASE_PATH, "MMSE-AD-CTL_cs_gtvenet")
if not os.path.exists(OUTPUT): os.makedirs(OUTPUT)
if not os.path.exists(OUTPUT_CS): os.makedirs(OUTPUT_CS)
#os.makedirs(OUTPUT_ATLAS)
#os.makedirs(OUTPUT_CS_ATLAS)
# Read input subjects
input_subjects = pd.read_table(INPUT_SUBJECTS_LIST_FILENAME, sep=" ",
header=None)
input_subjects = [x[:10] for x in input_subjects[1]]
# Read pop csv
pop = pd.read_csv(INPUT_CSV)
pop['PTGENDER.num'] = pop["PTGENDER"].map(GENDER_MAP)
#############################################################################
# Read images
n = len(pop)
assert n == 242
Z = np.zeros((n, 3)) # Intercept + Age + Gender
Z[:, 0] = 1 # Intercept
y = np.zeros((n, 1)) # DX
images = list()
for i, PTID in enumerate(pop['PTID']):
cur = pop[pop.PTID == PTID]
print cur
imagefile_pattern = INPUT_IMAGEFILE_FORMAT.format(PTID=PTID)
imagefile_name = glob.glob(imagefile_pattern)
if len(imagefile_name) != 1:
raise ValueError("Found %i files" % len(imagefile_name))
babel_image = nibabel.load(imagefile_name[0])
images.append(babel_image.get_data().ravel())
Z[i, 1:] = np.asarray(cur[["AGE", "PTGENDER.num"]]).ravel()
y[i, 0] = cur["MMSE"]
shape = babel_image.get_data().shape
#############################################################################
# Compute mask
# Implicit Masking involves assuming that a lower than a givent threshold
# at some voxel, in any of the images, indicates an unknown and is
# excluded from the analysis.
Xtot = np.vstack(images)
mask = (np.min(Xtot, axis=0) > 0.01) & (np.std(Xtot, axis=0) > 1e-6)
mask = mask.reshape(shape)
assert mask.sum() == 313734
#############################################################################
# Compute atlas mask
babel_mask_atlas = brainomics.image_atlas.resample_atlas_harvard_oxford(
ref=imagefile_name[0],
output=os.path.join("/tmp", "mask.nii.gz"))
mask_atlas = babel_mask_atlas.get_data()
assert np.sum(mask_atlas != 0) == 638715
mask_atlas[np.logical_not(mask)] = 0 # apply implicit mask
# smooth
mask_atlas = brainomics.image_atlas.smooth_labels(mask_atlas, size=(3, 3, 3))
assert np.sum(mask_atlas != 0) == 285983
out_im = nibabel.Nifti1Image(mask_atlas,
affine=babel_image.get_affine())
out_im.to_filename(os.path.join("/tmp", "mask.nii.gz"))
im = nibabel.load(os.path.join("/tmp", "mask.nii.gz"))
assert np.all(mask_atlas == im.get_data())
#shutil.copyfile(os.path.join(OUTPUT_ATLAS, "mask.nii.gz"), os.path.join(OUTPUT_CS_ATLAS, "mask.nii.gz"))
#############################################################################
# Compute mask with atlas but binarized (not group tv)
mask_bool = mask_atlas != 0
assert mask_bool.sum() == 285983
out_im = nibabel.Nifti1Image(mask_bool.astype("int16"),
affine=babel_image.get_affine())
out_im.to_filename(os.path.join(OUTPUT, "mask.nii.gz"))
babel_mask = nibabel.load(os.path.join(OUTPUT, "mask.nii.gz"))
assert np.all(mask_bool == (babel_mask.get_data() != 0))
shutil.copyfile(os.path.join(OUTPUT, "mask.nii.gz"), os.path.join(OUTPUT_CS, "mask.nii.gz"))
#############################################################################
# X
X = Xtot[:, mask_bool.ravel()]
X = np.hstack([Z, X])
assert X.shape == (242, 285986)
n, p = X.shape
np.save(os.path.join(OUTPUT, "X.npy"), X)
fh = open(os.path.join(OUTPUT, "X.npy").replace("npy", "txt"), "w")
fh.write('shape = (%i, %i): Intercept + Age + Gender + %i voxels' % \
(n, p, mask_bool.sum()))
fh.close()
# Xcs
X = Xtot[:, mask_bool.ravel()]
X = np.hstack([Z[:, 1:], X])
assert X.shape == (242, 285985)
X -= X.mean(axis=0)
X /= X.std(axis=0)
n, p = X.shape
np.save(os.path.join(OUTPUT_CS, "X.npy"), X)
fh = open(os.path.join(OUTPUT_CS, "X.npy").replace("npy", "txt"), "w")
fh.write('Centered and scaled data. Shape = (%i, %i): Age + Gender + %i voxels' % \
(n, p, mask_bool.sum()))
fh.close()
## atlas
#X = Xtot[:, (mask_atlas.ravel() != 0)]
#X = np.hstack([Z, X])
#assert X.shape == (242, 285986)
#n, p = X.shape
#np.save(os.path.join(OUTPUT_ATLAS, "X.npy"), X)
#fh = open(os.path.join(OUTPUT_ATLAS, "X.npy").replace("npy", "txt"), "w")
#fh.write('shape = (%i, %i): Intercept + Age + Gender + %i voxels' % \
# (n, p, (mask_atlas.ravel() != 0).sum()))
#fh.close()
#
## atlas cs
#X = Xtot[:, (mask_atlas.ravel() != 0)]
#X = np.hstack([Z[:, 1:], X])
#assert X.shape == (242, 285985)
#X -= X.mean(axis=0)
#X /= X.std(axis=0)
#n, p = X.shape
#np.save(os.path.join(OUTPUT_CS_ATLAS, "X.npy"), X)
#fh = open(os.path.join(OUTPUT_CS_ATLAS, "X.npy").replace("npy", "txt"), "w")
#fh.write('Centered and scaled data. Shape = (%i, %i): Age + Gender + %i voxels' % \
# (n, p, (mask_atlas.ravel() != 0).sum()))
#fh.close()
np.save(os.path.join(OUTPUT, "y.npy"), y)
y -= y.mean()
y /= y.std()
np.save(os.path.join(OUTPUT_CS, "y.npy"), y)
#np.save(os.path.join(OUTPUT_ATLAS, "y.npy"), y)
#np.save(os.path.join(OUTPUT_CS_ATLAS, "y.npy"), y)
| [
"edouard.duchesnay@gmail.com"
] | edouard.duchesnay@gmail.com |
e2886ddba4caf4503f5d0cf9cf97f91e5c76cd44 | 3d0bb8d94a69237bf3c6ba6b2ccfdd0bc9cc162c | /addons/asterisk/agi-bin/states/get_fast_dial_destination_from_ibs.py | 4521e2804bce47ffccda4b3c9723ff47c347a06f | [] | no_license | ha8sh/IBSng | 69727a7c5476ecb8efa45b7393ffe51de37a8a10 | 596aa468f8264ab0129431e3ede6cc1282b1ebbd | refs/heads/main | 2023-08-25T18:21:28.081153 | 2021-10-02T05:03:52 | 2021-10-02T05:03:52 | 412,687,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | import xmlrpclib
import ibs_agi
from lib import request
from lib.error import *
def init():
ibs_agi.getStateMachine().registerState("GET_FAST_DIAL_DESTINATION_FROM_IBS",getFastDialIndexFromIBS)
def getFastDialIndexFromIBS(_index):
"""
get fast dial index destination from ibs
may raise an IBSException
"""
_index=int(_index)
req=request.Request()
try:
destination=req.send("getFastDialDestination",True,index=_index)
except xmlrpclib.Fault,e:
logException()
ibs_agi.getSelectedLanguage().sayPrompt("unknown_problem")
raise IBSException(e.faultString)
else:
if ibs_agi.getConfig().getValue("debug"):
toLog("getFastDialIndexFromIBS: %s"%destination)
return destination
| [
"hassanshaikhi@gmail.com"
] | hassanshaikhi@gmail.com |
555a9901a0b0cebf2a2cc73a7838cc735101a711 | 9b371bf5710042b09f5f3745b1289821d9f01448 | /Sequential_Sealed_Auction/strategies.py | 2715aa38c71228015a546e1c41983360ceb05e9a | [] | no_license | 15rsirvin/Computational-Economics | 4b505f4ddead78cb3007ce958cb2d9faa08a5d25 | d292ad627eea2ce595b8d934ee40609da338c3a1 | refs/heads/master | 2021-02-06T05:09:08.173130 | 2020-04-30T02:59:57 | 2020-04-30T02:59:57 | 243,880,497 | 0 | 0 | null | 2020-04-25T01:31:00 | 2020-02-29T00:46:43 | Python | UTF-8 | Python | false | false | 335 | py | from random import uniform, seed
from math import sqrt
import numpy
import math
class Strategy:
def get_valuation(self, bid):
return bid
class Percent_V_Strategy(Strategy):
def __init__(self, percent):
self.percent = percent
def get_bid(self, valuation):
return self.percent * valuation
| [
"irvinr@reed.edu"
] | irvinr@reed.edu |
f50fbf295e7c63db3184c8adcae01d3500afaf12 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /tools/grit/grit/format/policy_templates/writers/ios_plist_writer_unittest.py | 0fdecb1d1ef33336fc052bf6c32b3b97d6f1800a | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"MIT"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 6,923 | py | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.policy_templates.writers.ios_plist_writer'''
import base64
import functools
import os
import plistlib
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
import unittest
try:
import Cocoa
except:
Cocoa = None
from grit.format.policy_templates.writers import writer_unittest_common
class IOSPListWriterUnittest(writer_unittest_common.WriterUnittestCommon):
'''Unit tests for IOSPListWriter.'''
def _ParseWithPython(self, decode, text):
'''Parses a serialized Plist, using Python's plistlib.
If |decode| is true then |text| is decoded as Base64 before being
deserialized as a Plist.'''
if decode:
text = base64.b64decode(text)
return plistlib.readPlistFromString(text)
def _ParseWithCocoa(self, decode, text):
'''Parses a serialized Plist, using Cocoa's python bindings.
If |decode| is true then |text| is decoded as Base64 before being
deserialized as a Plist.'''
if decode:
data = Cocoa.NSData.alloc().initWithBase64EncodedString_options_(text, 0)
else:
data = Cocoa.NSData.alloc().initWithBytes_length_(text, len(text))
result = Cocoa.NSPropertyListSerialization. \
propertyListFromData_mutabilityOption_format_errorDescription_(
data, Cocoa.NSPropertyListImmutable, None, None)
return result[0]
def _VerifyGeneratedOutputWithParsers(self,
templates,
expected_output,
parse,
decode_and_parse):
_defines = { '_chromium': '1',
'mac_bundle_id': 'com.example.Test',
'version': '39.0.0.0' }
# Generate the grit output for |templates|.
output = self.GetOutput(
self.PrepareTest(templates),
'fr',
_defines,
'ios_plist',
'en')
# Parse it as a Plist.
plist = parse(output)
self.assertEquals(len(plist), 2)
self.assertTrue('ChromePolicy' in plist)
self.assertTrue('EncodedChromePolicy' in plist)
# Get the 2 expected fields.
chrome_policy = plist['ChromePolicy']
encoded_chrome_policy = plist['EncodedChromePolicy']
# Verify the ChromePolicy.
self.assertEquals(chrome_policy, expected_output)
# Decode the EncodedChromePolicy and verify it.
decoded_chrome_policy = decode_and_parse(encoded_chrome_policy)
self.assertEquals(decoded_chrome_policy, expected_output)
def _VerifyGeneratedOutput(self, templates, expected):
# plistlib is available on all Python platforms.
parse = functools.partial(self._ParseWithPython, False)
decode_and_parse = functools.partial(self._ParseWithPython, True)
self._VerifyGeneratedOutputWithParsers(
templates, expected, parse, decode_and_parse)
# The Cocoa bindings are available on Mac OS X only.
if Cocoa:
parse = functools.partial(self._ParseWithCocoa, False)
decode_and_parse = functools.partial(self._ParseWithCocoa, True)
self._VerifyGeneratedOutputWithParsers(
templates, expected, parse, decode_and_parse)
def _MakeTemplate(self, name, type, example, extra=''):
return '''
{
'policy_definitions': [
{
'name': '%s',
'type': '%s',
'desc': '',
'caption': '',
'supported_on': ['ios:35-'],
'example_value': %s,
%s
},
],
'placeholders': [],
'messages': {},
}
''' % (name, type, example, extra)
def testEmpty(self):
templates = '''
{
'policy_definitions': [],
'placeholders': [],
'messages': {},
}
'''
expected = {}
self._VerifyGeneratedOutput(templates, expected)
def testEmptyVersion(self):
templates = '''
{
'policy_definitions': [],
'placeholders': [],
'messages': {},
}
'''
expected = {}
self._VerifyGeneratedOutput(templates, expected)
def testBoolean(self):
templates = self._MakeTemplate('BooleanPolicy', 'main', 'True')
expected = {
'BooleanPolicy': True,
}
self._VerifyGeneratedOutput(templates, expected)
def testString(self):
templates = self._MakeTemplate('StringPolicy', 'string', '"Foo"')
expected = {
'StringPolicy': 'Foo',
}
self._VerifyGeneratedOutput(templates, expected)
def testStringEnum(self):
templates = self._MakeTemplate(
'StringEnumPolicy', 'string-enum', '"Foo"',
'''
'items': [
{ 'name': 'Foo', 'value': 'Foo', 'caption': '' },
{ 'name': 'Bar', 'value': 'Bar', 'caption': '' },
],
''')
expected = {
'StringEnumPolicy': 'Foo',
}
self._VerifyGeneratedOutput(templates, expected)
def testInt(self):
templates = self._MakeTemplate('IntPolicy', 'int', '42')
expected = {
'IntPolicy': 42,
}
self._VerifyGeneratedOutput(templates, expected)
def testIntEnum(self):
templates = self._MakeTemplate(
'IntEnumPolicy', 'int-enum', '42',
'''
'items': [
{ 'name': 'Foo', 'value': 100, 'caption': '' },
{ 'name': 'Bar', 'value': 42, 'caption': '' },
],
''')
expected = {
'IntEnumPolicy': 42,
}
self._VerifyGeneratedOutput(templates, expected)
def testStringList(self):
templates = self._MakeTemplate('StringListPolicy', 'list', '["a", "b"]')
expected = {
'StringListPolicy': [ "a", "b" ],
}
self._VerifyGeneratedOutput(templates, expected)
def testStringEnumList(self):
templates = self._MakeTemplate('StringEnumListPolicy',
'string-enum-list', '["a", "b"]',
'''
'items': [
{ 'name': 'Foo', 'value': 'a', 'caption': '' },
{ 'name': 'Bar', 'value': 'b', 'caption': '' },
],
''')
expected = {
'StringEnumListPolicy': [ "a", "b" ],
}
self._VerifyGeneratedOutput(templates, expected)
def testListOfDictionary(self):
templates = self._MakeTemplate(
'ManagedBookmarks', 'dict',
'''
[
{
"name": "Google Search",
"url": "www.google.com",
},
{
"name": "Youtube",
"url": "www.youtube.com",
}
]
''')
expected = {
'ManagedBookmarks': [
{ "name": "Google Search", "url": "www.google.com" },
{ "name": "Youtube", "url": "www.youtube.com" },
],
}
self._VerifyGeneratedOutput(templates, expected)
if __name__ == '__main__':
unittest.main()
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
fbf6f96f0e5b073a6b0de12bbcf2ef221caed4b3 | a7341c7e161e3fa60e6bb8c0351b13b2a155a680 | /PythonFiles/ucf_msd.py | ccfe6737bf7c51de09678163c9ff0b5650b6d35d | [] | no_license | vksh224/RecommendationSystem | fd483e353a1de288129b6fa767e4e734a8fab7ad | 5653f213e72071dd0c046f39161abcad8451b4d6 | refs/heads/master | 2021-05-14T12:37:49.369592 | 2018-01-05T18:58:30 | 2018-01-05T18:58:30 | 116,415,025 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from surprise import KNNBasic
from surprise import Dataset
from surprise import evaluate, print_perf
from surprise import Reader
import os
#load data from a file
file_path = os.path.expanduser('restaurant_ratings.txt')
reader = Reader(line_format='user item rating timestamp', sep='\t')
data = Dataset.load_from_file(file_path, reader=reader)
data.split(n_folds=3)
algo = KNNBasic(sim_options = {
'name':’MSD’,
'user_based': True
})
perf = evaluate(algo, data, measures=['RMSE', 'MAE'])
print_perf(perf) | [
"vjsah27@gmail.com"
] | vjsah27@gmail.com |
156797f1ef94e9e97124eebc01ce7344852072e0 | 8aa6d88176833b67633fb3aa9818433ec7d2802f | /app/views.py | c55b111ae896dbfd308c5ac2337e19acce1e6666 | [] | no_license | pasupulatitheja/scikey1 | 29da300c79e498654dfeb4eda30c6bc90d2c5343 | b41ed25256be0517cfb5efe6ba99076069e11376 | refs/heads/main | 2023-08-04T20:36:27.738668 | 2021-09-15T11:43:04 | 2021-09-15T11:43:04 | 406,736,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,913 | py | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from pymongo.auth import authenticate
from django.contrib.auth import authenticate, login, logout
from .forms import LoginForm,CreateUserForm,DocumentForm
from .decorators import allowed_users, unauthenticated_user
from .resources import resourcessci1
from .resources import reuser
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from tablib import Dataset
from .models import sci1,Document
from django.conf import settings
from django.core.files.storage import FileSystemStorage
# @login_required(login_url='login')
def dashboard(request):
# user = User.objects.count()
# event_ctg = EventCategory.objects.count()
# event = Event.objects.count()
# complete_event = Event.objects.filter(status='completed').count()
# events = Event.objects.all()
# context = {
# 'user': user,
# 'event_ctg': event_ctg,
# 'event': event,
# 'complete_event': complete_event,
# 'events': events
# }
return render(request, 'dashboard.html')
def login_page(request):
forms = LoginForm()
if request.method == 'POST':
forms = LoginForm(request.POST)
if forms.is_valid():
username = forms.cleaned_data['username']
password = forms.cleaned_data['password']
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect('dashboard')
context = {
'form': forms
}
return render(request, 'login.html', context)
@unauthenticated_user
def registerpage(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
messages.success(request, 'Account was created for ' + username)
return redirect('login')
context = {'form': form}
return render(request, 'basic_sign_page.html', context)
def upload_csv_file(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, "file saved")
return render(request,'all_adminhtmlpages/upload_csv.html')
else:
form = DocumentForm()
return render(request, 'all_adminhtmlpages/upload_csv.html', {
'form': form
})
# if request.method == 'POST':
# resourcesci1 = resourcessci1()
# dataset = Dataset()
# new_entity = request.FILES['myfile']
#
# imported_data = dataset.load(new_entity.read(), format='xlsx')
# for data in imported_data:
# value = sci1(
# data[0],
# data[1],
# data[2],
# data[3]
#
# )
# value.save()
#
# return render(request,'all_adminhtmlpages/upload_csv.html')
def show_all_files_list(request):
ef = Document.objects.all()
if ef:
return render(request,'all_adminhtmlpages/show_csv_file_list.html',{'form':ef})
def admin_add_persons(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
messages.success(request, 'Account was created for ' + username)
return redirect('addpersons')
context = {'form': form}
return render(request,'all_adminhtmlpages/add_person.html',context)
def show_view_contact_list(request):
return render(request,'all_adminhtmlpages/view_contact_list.html') | [
"theja.pasupulati@gmail.com"
] | theja.pasupulati@gmail.com |
bbacf865691cc8c816c5b21413555b0e174a45fc | 82c657c6d55241969147d3dc6efce18b6272d6f8 | /AdaBoost/adboost.py | 31ed31f27df12625c160fd0c5534db0890069f24 | [] | no_license | wuyanzhang/Machine-learning | 73893582fd84dcc4ebe8adedb1214cfa9f8b9ff7 | 6a8abf4762cdda4c3b1a5f579151155c5b6a8828 | refs/heads/master | 2020-05-25T05:13:26.666934 | 2019-06-16T02:41:26 | 2019-06-16T02:41:26 | 187,644,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,947 | py | import numpy as np
import matplotlib.pyplot as plt
# 函数说明:创建单层决策树的数据集
def loadSimpleData():
datMat = np.mat([[1., 2.1],
[1.5, 1.6],
[1.3, 1.],
[1., 1.],
[2., 1.]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat, classLabels
# 函数说明:数据可视化
def showDataSet(dataMat,labelMat):
data_plus = [] # 正样本
data_minus = [] # 负样本
for i in range(len(dataMat)):
if labelMat[i] > 0:
data_plus.append(dataMat[i])
else:
data_minus.append(dataMat[i])
data_plus_np = np.array(data_plus) # 转换为numpy矩阵
data_minus_np = np.array(data_minus) # 转换为numpy矩阵
plt.scatter(np.transpose(data_plus_np)[0], np.transpose(data_plus_np)[1]) # 正样本散点图
plt.scatter(np.transpose(data_minus_np)[0], np.transpose(data_minus_np)[1]) # 负样本散点图
plt.show()
# 函数说明:单层决策树分类函数
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):
retArray = np.ones((np.shape(dataMatrix)[0],1))
if threshIneq == 'lt':
retArray[dataMatrix[:,dimen] <= threshVal] = -1.0
else:
retArray[dataMatrix[:, dimen] > threshVal] = -1.0
return retArray
# 函数说明:找到数据集上最佳的单层决策树
def buildStump(dataArr,classLabels,D):
dataMatrix = np.mat(dataArr)
labelMat = np.mat(classLabels).T
m,n = np.shape(dataMatrix)
numSteps = 10.0
bestStump = {}
minError = float('inf')
bestClasEst = np.mat(np.zeros((m,1)))
for i in range(n):
rangeMin = dataMatrix[:,i].min()
rangeMax = dataMatrix[:,i].max()
stepSize = (rangeMax - rangeMin) / numSteps
for j in range(-1,int(numSteps)+1):
for inequal in ['lt','gt']:
threshVal = (rangeMin + float(j)*stepSize)
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)
errArr = np.mat(np.ones((m,1)))
errArr[predictedVals == labelMat] = 0
weightedError = D.T * errArr
print("split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (
i, threshVal, inequal, weightedError))
if weightedError < minError:
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump, minError, bestClasEst
# 函数说明:使用AdaBoost算法提升弱分类器性能
def adaBoost(dataArr,classLabels,numIt = 40):
weakClassArr = []
m = np.shape(dataArr)[0]
D = np.mat(np.ones((m, 1)) / m)
aggClassEst = np.mat(np.zeros((m,1)))
for i in range(numIt):
bestStump, error, classEst = buildStump(dataArr, classLabels, D)
print("D:", D.T)
alpha = float(0.5 * np.log((1.0 - error) / max(error, 1e-16)))
bestStump['alpha'] = alpha
weakClassArr.append(bestStump)
print("classEst: ", classEst.T)
expon = np.multiply(-1 * alpha * np.mat(classLabels).T, classEst) # 计算e的指数项
D = np.multiply(D, np.exp(expon))
D = D / D.sum() # 根据样本权重公式,更新样本权重
# 计算AdaBoost误差,当误差为0的时候,退出循环
aggClassEst += alpha * classEst
print("aggClassEst: ", aggClassEst.T)
aggErrors = np.multiply(np.sign(aggClassEst) != np.mat(classLabels).T, np.ones((m, 1))) # 计算误差
errorRate = aggErrors.sum() / m
print("total error: ", errorRate)
if errorRate == 0.0: break # 误差为0,退出循环
return weakClassArr, aggClassEst
# 函数说明:测试
# datToClass - 待分类样例
# classifierArr - 训练好的分类器
def adaClassify(datToClass,classifierArr):
dataMatrix = np.mat(datToClass)
m = np.shape(dataMatrix)[0]
aggClassEst = np.mat(np.zeros((m, 1)))
for i in range(len(classifierArr)):
classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'], classifierArr[i]['thresh'], classifierArr[i]['ineq'])
aggClassEst += classifierArr[i]['alpha'] * classEst
return np.sign(aggClassEst)
if __name__ == '__main__':
dataArr,classLabels = loadSimpleData()
weakClassArr,aggClassEst = adaBoost(dataArr,classLabels)
print(adaClassify([[0, 0], [5, 5]], weakClassArr))
# print(weakClassArr) # 每一个弱分类器的信息
# print(aggClassEst) # 最后的预测分类标签
# D = np.mat(np.ones((5,1))/5)
# bestStump,minError,bestClasEst = buildStump(dataArr,classLabels,D)
# print('bestStump:\n', bestStump)
# print('minError:\n', minError)
# print('bestClasEst:\n', bestClasEst)
# showDataSet(dataArr,classLabels) | [
"178172535@qq.com"
] | 178172535@qq.com |
dcbfd84e1b63c8cda9bfef935a96be991c62e73f | 4540049184beed3da8a88b56726d3cc2d56ed283 | /ppo_baseline_v0.06/map_and_plan_agent/slam.py | 96defb1fae970c4d5873887bc8fd4716bd829c20 | [
"MIT"
] | permissive | Jiankai-Sun/habitat-challenge | 30c4ed46053892e3700d1970795ed287d54fe60e | 83a325eebb0632596c9d6c25bbc5f13bb464ed53 | refs/heads/master | 2022-05-05T04:53:41.521735 | 2019-05-17T13:47:56 | 2019-05-17T13:47:56 | 179,702,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,085 | py | import numpy as np, imageio
import os
import depth_utils as du
import rotation_utils as ru
import skimage
import matplotlib.pyplot as plt
import subprocess as sp
from astar_planner import ASTAR_Planner
def subplot(plt, Y_X, sz_y_sz_x=(10, 10)):
Y, X = Y_X
sz_y, sz_x = sz_y_sz_x
plt.rcParams['figure.figsize'] = (X * sz_x, Y * sz_y)
fig, axes = plt.subplots(Y, X)
plt.subplots_adjust(wspace=0.1, hspace=0.1)
return fig, axes
class DepthMapperAndPlanner(object):
def __init__(self, dt=10, camera_height=125., upper_lim=150., map_size_cm=6000, out_dir=None,
mark_locs=False, reset_if_drift=False, count=-1, close_small_openings=False,
recover_on_collision=False, fix_thrashing=False, goal_f=1.1, point_cnt=2, thrashing_threshold=12,
success_distance=-1):
self.map_size_cm = map_size_cm
self.dt = dt
self.count = count
self.out_dir = out_dir
self.mark_locs = mark_locs
self.reset_if_drift = reset_if_drift
self.elevation = 0. # np.rad2deg(env_config.SIMULATOR.DEPTH_SENSOR.ORIENTATION[0])
self.camera_height = camera_height
self.upper_lim = upper_lim
self.lower_lim = 20
self.close_small_openings = close_small_openings
self.num_erosions = 2
self.recover_on_collision = recover_on_collision
self.fix_thrashing = fix_thrashing
self.goal_f = goal_f
self.point_cnt = point_cnt
self.thrashing_threshold = thrashing_threshold
self.success_distance = success_distance
self.num_rotation = 0
self.action_counter = 0
print('self.elevation: {0}, self.camera_height: {1}, self.upper_lim: {2}, self.lower_lim: {3}.'
.format(self.elevation, self.camera_height, self.upper_lim, self.lower_lim))
self.step = 0
def reset(self):
self.RESET = True
# def _reset(self, goal_dist, soft=False):
# # Create an empty map of some size
# resolution = self.resolution = 5
# self.selem = skimage.morphology.disk(10 / resolution)
# self.selem_small = skimage.morphology.disk(1)
# # 0 agent moves forward. Agent moves in the direction of +x
# # 1 rotates left
# # 2 rotates right
# # 3 agent stop
# self.z_bins = [self.lower_lim, self.upper_lim]
# map_size_cm = np.maximum(self.map_size_cm, goal_dist * 2 * self.goal_f) // resolution
# map_size_cm = int(map_size_cm * resolution)
# self.map = np.zeros((map_size_cm // resolution + 1, map_size_cm // resolution + 1, len(self.z_bins) + 1),
# dtype=np.float32)
# self.loc_on_map = np.zeros((map_size_cm // resolution + 1, map_size_cm // resolution + 1), dtype=np.float32)
# self.current_loc = np.array([(self.map.shape[0] - 1) / 2, (self.map.shape[0] - 1) / 2, 0], np.float32)
# self.current_loc[:2] = self.current_loc[:2] * resolution
# self.camera = du.get_camera_matrix(256, 256, 90)
# self.goal_loc = None
# self.last_act = 3
# self.locs = []
# self.acts = []
# self.last_pointgoal = None
# if not soft:
# self.num_resets = 0
# self.count = self.count + 1
# self.trials = 0
# # self.rgbs = []
# # self.depths = []
# # self.fmms = []
# # self.maps = []
# self.rgbs_depths_fmms_maps = []
# self.recovery_actions = []
# self.thrashing_actions = []
def _reset(self, goal_dist, soft=False):
if not soft:
self.action_counter = 0
# Create an empty map of some size
resolution = self.resolution = 5
self.selem = skimage.morphology.disk(10 / resolution)
self.selem_small = skimage.morphology.disk(1)
# 0 agent moves forward. Agent moves in the direction of +x
# 1 rotates left
# 2 rotates right
# 3 agent stop
self.z_bins = [self.lower_lim, self.upper_lim]
self.camera = du.get_camera_matrix(256, 256, 90)
map_size_cm = np.maximum(self.map_size_cm, goal_dist * 2 * self.goal_f) // resolution
map_size_cm = int(map_size_cm * resolution)
self.loc_on_map = np.zeros((map_size_cm // resolution + 1, map_size_cm // resolution + 1), dtype=np.float32)
self.map = np.zeros((map_size_cm // resolution + 1, map_size_cm // resolution + 1, len(self.z_bins) + 1),
dtype=np.float32)
self.current_loc = np.array([(self.map.shape[0] - 1) / 2, (self.map.shape[0] - 1) / 2, 0], np.float32)
self.current_loc[:2] = self.current_loc[:2] * resolution
self.goal_loc = None
self.last_act = 3
self.last_pointgoal = None
self.locs = []
self.acts = []
self.num_resets = 0
self.count = self.count + 1
self.trials = 0
# self.rgbs = []
# self.depths = []
# self.fmms = []
# self.maps = []
self.rgbs_depths_fmms_maps = []
self.recovery_actions = []
self.thrashing_actions = []
def add_observation(self, depth):
# depth is in cm
d = depth[:, :, 0] * 1
d[d == 0] = np.NaN
d[d > 990] = np.NaN
XYZ1 = du.get_point_cloud_from_z(d, self.camera)
# print(np.min(XYZ1.reshape(-1,3), 0), np.max(XYZ1.reshape(-1,3), 0))
XYZ2 = du.make_geocentric(XYZ1 * 1, self.camera_height, self.elevation)
# print(np.min(XYZ2.reshape(-1,3), 0), np.max(XYZ2.reshape(-1,3), 0))
# Transform pose
# Rotate and then translate by agent center
XYZ3 = self.transform_to_current_frame(XYZ2)
# print(np.min(XYZ3.reshape(-1,3), 0), np.max(XYZ3.reshape(-1,3), 0))
counts, is_valids = du.bin_points(XYZ3, self.map.shape[0], self.z_bins, self.resolution)
self.map = self.map + counts
def plan_path(self, goal_loc_orig):
state = self.current_loc * 1.
state[:2] = state[:2] / self.resolution
goal_loc = goal_loc_orig / self.resolution
obstacle = self.map[:, :, 1] >= self.point_cnt
traversible = skimage.morphology.binary_dilation(obstacle, self.selem) != True
if self.mark_locs:
traversible_locs = skimage.morphology.binary_dilation(self.loc_on_map, self.selem) == True
traversible = np.logical_or(traversible_locs, traversible)
if self.close_small_openings:
n = self.num_erosions
while n >= 0:
traversible_open = traversible.copy()
for i in range(n):
traversible_open = skimage.morphology.binary_erosion(traversible_open, self.selem_small)
for i in range(n):
traversible_open = skimage.morphology.binary_dilation(traversible_open, self.selem_small)
planner = ASTAR_Planner(traversible_open, 360 // self.dt, state, goal_loc)
n = n - 1
else:
planner = ASTAR_Planner(traversible, 360 // self.dt, state, goal_loc)
self.fmm_dist = planner.traversible
action = planner.get_action(self.step)
self.step += 1
return action
def get_best_action(self):
None
def transform_to_current_frame(self, XYZ):
R = ru.get_r_matrix([0., 0., 1.], angle=self.current_loc[2] - np.pi / 2.)
XYZ = np.matmul(XYZ.reshape(-1, 3), R.T).reshape(XYZ.shape)
XYZ[:, :, 0] = XYZ[:, :, 0] + self.current_loc[0]
XYZ[:, :, 1] = XYZ[:, :, 1] + self.current_loc[1]
return XYZ
def update_loc(self, last_act, pointgoal=None):
# Currently ignores goal_loc.
if last_act == 1:
self.current_loc[2] = self.current_loc[2] + self.dt * np.pi / 180.
elif last_act == 2:
self.current_loc[2] = self.current_loc[2] - self.dt * np.pi / 180.
elif last_act == 0:
# self.current_loc[0] = self.current_loc[0] + 25 * np.cos(self.current_loc[2])
# self.current_loc[1] = self.current_loc[1] + 25 * np.sin(self.current_loc[2])
# print(1, self.current_loc)
self.current_loc[0] = np.clip(self.current_loc[0] + 25 * np.cos(self.current_loc[2]), 0, self.loc_on_map.shape[1] * self.resolution - 1)
self.current_loc[1] = np.clip(self.current_loc[1] + 25 * np.sin(self.current_loc[2]), 0, self.loc_on_map.shape[0] * self.resolution - 1)
# print(2, self.current_loc)
self.locs.append(self.current_loc + 0)
self.mark_on_map(self.current_loc)
def mark_on_map(self, loc):
x = int(loc[0] // self.resolution)
y = int(loc[1] // self.resolution)
self.loc_on_map[y, x] = 1
def save_vis(self):
if self.trials < 20:
fig, axes = subplot(plt, (1, 3))
axes = axes.ravel()[::-1].tolist()
ax = axes.pop()
locs = np.array(self.locs).reshape([-1, 3])
acts = np.array(self.acts).reshape([-1])
ax.imshow(self.map[:, :, 1] > 0, origin='lower')
ax.plot(locs[:, 0] / 5, locs[:, 1] / 5, 'm.', ms=3)
if locs.shape[0] > 0:
ax.plot(locs[0, 0] / 5, locs[0, 1] / 5, 'bx')
ax.plot(self.current_loc[0] / 5, self.current_loc[1] / 5, 'b.')
ax.plot(self.goal_loc[0] / 5, self.goal_loc[1] / 5, 'y*')
ax = axes.pop()
ax.imshow(self.fmm_dist, origin='lower')
ax.plot(locs[:, 0] / 5, locs[:, 1] / 5, 'm.', ms=3)
if locs.shape[0] > 0:
ax.plot(locs[0, 0] / 5, locs[0, 1] / 5, 'bx')
ax.plot(self.current_loc[0] / 5, self.current_loc[1] / 5, 'b.')
ax.plot(self.goal_loc[0] / 5, self.goal_loc[1] / 5, 'y*')
ax = axes.pop()
ax.plot(acts)
plt.savefig(os.path.join(self.out_dir, '{0:04d}.png'.format(self.count)),
bbox_inches='tight')
plt.close()
def soft_reset(self, pointgoal):
# This reset is called if there is drift in the position of the goal
# location, indicating that there had been collisions.
if self.out_dir is not None:
self.save_vis()
self._reset(pointgoal[0] * 100., soft=True)
self.trials = self.trials + 1
self.num_resets = self.num_resets + 1
xy = self.compute_xy_from_pointnav(pointgoal)
# self.current_loc has been set inside reset
goal_loc = xy * 1
# self.goal_loc[0] = self.goal_loc[0] + self.current_loc[0]
# self.goal_loc[1] = self.goal_loc[1] + self.current_loc[1]
self.current_loc[0] = np.clip(self.goal_loc[0] - goal_loc[0], 0, self.loc_on_map.shape[1] * self.resolution - 1)
self.current_loc[1] = np.clip(self.goal_loc[1] - goal_loc[1], 0, self.loc_on_map.shape[0] * self.resolution - 1)
self.mark_on_map(self.goal_loc)
self.mark_on_map(self.current_loc)
# if self.num_resets == 6:
# # We don't want to keep resetting. First few resets fix themselves,
# # so do it for later resets.
# num_rots = int(np.round(180 / self.dt))
# self.recovery_actions = [1] * num_rots + [0] * 6
# self.num_resets = 0
if self.num_resets == 6:
# We don't want to keep resetting. First few resets fix themselves,
# so do it for later resets.
num_rots = int(np.round(180 / self.dt))
self.recovery_actions = [1] * num_rots + [0] * 6
self.num_resets = 0
# if self.num_resets == 3:
# # We don't want to keep resetting. First few resets fix themselves,
# # so do it for later resets.
# num_rots = int(np.round(110 / self.dt))
# self.recovery_actions = [1] * num_rots + [0] * 2
# self.num_resets = 0
else:
self.recovery_actions = []
def check_drift(self, pointgoal):
xy = self.compute_xy_from_pointnav(pointgoal)
goal_loc = xy * 1
goal_loc[0] = goal_loc[0] + self.current_loc[0]
goal_loc[1] = goal_loc[1] + self.current_loc[1]
# np.set_printoptions(precision=3, suppress=True)
# print(self.last_act, self.current_loc, goal_loc, self.goal_loc, xy, pointgoal)
return np.linalg.norm(goal_loc - self.goal_loc) > 5
def check_thrashing(self, n, acts):
thrashing = False
if len(acts) > n:
last_act = acts[-1]
thrashing = last_act == 1 or last_act == 2
for i in range(2, n + 1):
if thrashing:
thrashing = acts[-i] == 3 - last_act
last_act = acts[-i]
else:
break
return thrashing
def compute_xy_from_pointnav(self, pointgoal):
xy = np.array([np.cos(pointgoal[1] + self.current_loc[2]),
np.sin(pointgoal[1] + self.current_loc[2])], dtype=np.float32)
xy = xy * pointgoal[0] * 100
return xy
def act(self, observations):
if self.RESET:
self.RESET = False
return self._act(0, observations, True)
else:
return self._act(0, observations, False)
def _act(self, i, obs, done):
# depth = obs['depth'][i,...].detach().cpu().numpy()
# pointgoal = obs['pointgoal'][i,...].detach().cpu().numpy()
# rgb = obs['rgb'][i,...].detach().cpu().numpy().astype(np.uint8)
rgb = obs['rgb'].astype(np.uint8)
depth = obs['depth']
pointgoal = obs['pointgoal']
# self.num_rotation = (self.num_rotation + 1) % 36
# if self.num_rotation % 36 == 0:
# self.add_observation(depth * 1000)
# act = 1
# return act
# print(np.min(depth), np.max(depth))
if done:
if self.out_dir is not None and hasattr(self, 'locs'):
self.save_vis()
if self.last_pointgoal is not None and self.last_pointgoal[0] > self.success_distance:
self.write_mp4_imageio()
self._reset(pointgoal[0] * 100.)
# self.current_loc has been set inside reset
xy = self.compute_xy_from_pointnav(pointgoal)
self.goal_loc = xy * 1
self.goal_loc[0] = self.goal_loc[0] + self.current_loc[0]
self.goal_loc[1] = self.goal_loc[1] + self.current_loc[1]
self.mark_on_map(self.goal_loc)
self.mark_on_map(self.current_loc)
self.update_loc(self.last_act)
drift = self.check_drift(pointgoal)
if self.reset_if_drift and drift:
# import pdb; pdb.set_trace()
self.soft_reset(pointgoal)
self.add_observation(depth * 1000)
act = self.plan_path(self.goal_loc)
if self.recover_on_collision:
if len(self.recovery_actions) > 0:
# print('Recovery actions ...')
act = self.recovery_actions[0]
self.recovery_actions = self.recovery_actions[1:]
thrashing = self.check_thrashing(self.thrashing_threshold, self.acts)
if thrashing and len(self.thrashing_actions) == 0:
self.thrashing_actions = [1] * 18 + [0] * 3
if self.fix_thrashing:
if len(self.thrashing_actions) > 0:
act = self.thrashing_actions[0]
self.thrashing_actions = self.thrashing_actions[1:]
# print('Thrashing actions: ', self.thrashing_actions)
self.acts.append(act)
self.last_act = act
locs = np.array(self.locs).reshape([-1, 3])
map = self.map.astype(np.uint8)
# fmm_dist = self.fmm_dist.astype(np.uint8)
# print((locs[:, 0] / 5).astype(int), (locs[:, 1] / 5).astype(int))
map[(locs[:, 1] / 5).astype(int), (locs[:, 0] / 5).astype(int)] = 255
# fmm_dist[(locs[:, 1] / 5).astype(int), (locs[:, 0] / 5).astype(int)] = 255
# if self.out_dir is not None:
# self.rgbs_depths_fmms_maps.append(np.concatenate((
# (depth[..., 0] * 255)[..., np.newaxis].astype(np.uint8).repeat(
# [3], axis=2), rgb,
# np.flip(cv2.resize(map, (256, 256)), 0),
# np.flip(cv2.resize(self.fmm_dist.astype(np.uint8), (256, 256))[
# ..., np.newaxis].repeat([3], axis=2), 0)
# ), axis=1))
self.last_pointgoal = pointgoal + 0
self.action_counter += 1
# print('act: ', act)
return act
def write_mp4_imageio(self):
# out_file_name = os.path.join(self.out_dir, '{0:04d}.gif'.format(self.count))
# imageio.mimsave(out_file_name, self.rgbs)
#
# out_file_name = os.path.join(self.out_dir, '{0:04d}_d.gif'.format(self.count))
# imageio.mimsave(out_file_name, self.depths)
#
# out_file_name = os.path.join(self.out_dir, '{0:04d}_map.gif'.format(self.count))
# imageio.mimsave(out_file_name, self.maps)
#
# out_file_name = os.path.join(self.out_dir, '{0:04d}_fmm.gif'.format(self.count))
# imageio.mimsave(out_file_name, self.fmms)
out_file_name = os.path.join(self.out_dir, '{0:04d}_rgbs_depths_fmms_maps.gif'.format(self.count))
imageio.mimsave(out_file_name, self.rgbs_depths_fmms_maps)
def write_mp4_cv2(self):
sz = self.rgbs[0].shape[0]
out_file_name = os.path.join(self.out_dir, '{0:04d}.mp4'.format(self.count))
video = cv2.VideoWriter(out_file_name, -1, 10, (sz, sz))
for rgb in self.rgbs:
video.write(rgb[:, :, ::-1])
video.release()
def write_mp4(self):
sz = self.depths[0].shape[0]
out_file_name = os.path.join(self.out_dir, '{0:04d}.mp4'.format(self.count))
ffmpeg_bin = 'ffmpeg'
command = [ffmpeg_bin,
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', '{:d}x{:d}'.format(sz, sz), # size of one frame
'-pix_fmt', 'rgb24',
'-r', '4', # frames per second
'-i', '-', # The imput comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', 'mpeg', out_file_name]
pipe = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)
for rgb in self.rgbs:
pipe.proc.stdin.write(rgb.tostring())
# self.add_observation(depth, goal_vec, self.last_act)
# act = get_action(self.map)
# self.last_act = act
| [
"sjkai1@126.com"
] | sjkai1@126.com |
2bb5e9136817920f0a118765a28bf286b13b41be | c86277d74266b90b64774bc924b041009d697b2e | /source/nextdoor/wsgi.py | 2ae744a10344445edcd3ffe9adf052710f84605a | [] | no_license | rakeshsukla53/facebook-for-neighbours | dcd0c564530404e5415fa08b184398d10b1170ba | 3d6c1430ab4f7ac8f668626c82705552da9f6566 | refs/heads/master | 2021-01-10T04:54:00.962831 | 2015-12-25T17:32:45 | 2015-12-25T17:32:45 | 46,942,279 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for nextdoor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nextdoor.settings")
application = get_wsgi_application()
| [
"rakesh.sukla53@gmail.com"
] | rakesh.sukla53@gmail.com |
7ce5908f731cec9669227dd9adcc97767bac29df | 990a225852967395a164fd4ac9b55381b5e2f08c | /renu/wsgi.py | 04d2bab4841bc5e573212a9df240bea76532b4dd | [] | no_license | ueslialmeida/renu | aadc144548256c44fe7925b9015dce20e462a9bb | 9a8c8c3bf777dacf3a5efa6585d0065d11067313 | refs/heads/master | 2022-07-22T23:39:44.276708 | 2020-05-20T01:42:25 | 2020-05-20T01:42:25 | 265,418,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for renu project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'renu.settings')
application = get_wsgi_application()
| [
"uesli.ti@gmail.com"
] | uesli.ti@gmail.com |
73c1cb0d4aa4a86afefeb3fd74e8241edec6456a | 7620893c7d253a4d8c6f5aef2cfda6c72b777d49 | /src/Camera/DisplayImage.py | 8d1b1ee14891406071b40da9d5bf7f304a4aa7ad | [] | no_license | garridoH/cameraGUI | cacc549a9da0bcb6c3b9be04ef9783c653300118 | cb6ac1d54dd8651da974ed058990c8212d145415 | refs/heads/master | 2021-01-17T22:38:53.398306 | 2012-06-12T20:41:44 | 2012-06-12T20:41:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | '''
Adapted from online sources, including http://www.blog.pythonlibrary.org/2010/03/26/creating-a-simple-photo-viewer-with-wxpython/
'''
import wx
class displayImage(wx.App):
def __init__(self, redirect=False):
wx.App.__init__(self, redirect)
self.frame = wx.Frame(None, title='Prosilica Viewer', pos=(100,300), size=(1360,1024))
self.panel = wx.Panel(self.frame)
self.Image = wx.StaticBitmap(self.frame, bitmap=wx.EmptyBitmap(1360,1024))
#self.panel.Layout()
self.frame.Show()
def showImage(self, bmpImg):
h=bmpImg.GetHeight()
w=bmpImg.GetWidth()
print "Image is " + str(h) + " x " + str(w)
self.Image.SetBitmap(bmpImg)
self.Image.Refresh()
def OnClose(self, event):
self.Destroy()
| [
"raedwards@gmail.com"
] | raedwards@gmail.com |
6b51914d0ededa34e2f7f91d51b1d1e0903779f5 | 66383f31f2c2a2c976add955f743c81a35d5e457 | /django_petproject/blog/utils.py | 30bf0f910c5840995af015196613e2f08741c34e | [] | no_license | KrutP/petproject | 30dcca922513fcf6aecb88ca1230cb4314bddb60 | a4c44807b6bd748039b238cb5cd5f539bef07ecc | refs/heads/main | 2023-08-15T03:51:58.101520 | 2021-09-23T05:10:19 | 2021-09-23T05:10:19 | 408,832,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | from django.db.models import Count
from .models import *
menu = [{'title': "About", 'url_name': 'about'},
{'title': "Add page", 'url_name': 'add_page'},
{'title': "Contact", 'url_name': 'contact'},
]
class DataMixin:
paginate_by = 10
def get_user_context(self, **kwargs):
context = kwargs
cats = Category.objects.annotate(Count('blog'))
user_menu = menu.copy()
if not self.request.user.is_authenticated:
user_menu.pop(1)
context['menu'] = user_menu
context['cats'] = cats
if 'cat_selected' not in context:
context['cat_selected'] = 0
return context
| [
"pavel565123@gmail.com"
] | pavel565123@gmail.com |
a3638dc9c57862fe4f551550ed3bfb9f97e9d20f | 32821d668a3d3d8cbcbaea355fb1cead4acb4253 | /apps/users/migrations/0002_banner_emailverifyrecord.py | 846e6a4b09698eb7f6623fbfbdf9e5171d2044e2 | [] | no_license | FigGG16/P2PLoan | 7d6c09534da26126bbf7d490cc4accce3d0184c3 | 8ed1bac0102aca9b979dfa4ae9e14c49fb022799 | refs/heads/master | 2021-01-02T15:55:50.540243 | 2020-05-14T09:27:32 | 2020-05-14T09:27:32 | 239,690,998 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | # Generated by Django 2.2.6 on 2019-10-26 13:26
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('image', models.ImageField(upload_to='banner/%Y/%m', verbose_name='轮播图')),
('url', models.URLField(verbose_name='访问地址')),
('index', models.IntegerField(default=100, verbose_name='顺序')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '轮播图',
'verbose_name_plural': '轮播图',
},
),
migrations.CreateModel(
name='EmailVerifyRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20, verbose_name='验证码')),
('email', models.EmailField(max_length=50, verbose_name='邮箱')),
('send_type', models.CharField(choices=[('register', '注册'), ('forget', '找回密码'), ('update_email', '修改邮箱')], max_length=30, verbose_name='验证码类型')),
('send_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='发送时间')),
],
options={
'verbose_name': '邮箱验证码',
'verbose_name_plural': '邮箱验证码',
},
),
]
| [
"pub_fei_xiang2017@163.com"
] | pub_fei_xiang2017@163.com |
36fc09b70887ca5efb8fd3fbe506ff6a7780b731 | 4fb8f91b5d276fb9c9eb4e8a902fc1b89c99d318 | /les_1_task_1.py | 3acb2c8140e11264d46d8f66970f7a9ca8f86253 | [] | no_license | YakovBoiev/AlgPython | 876be85eb0d127176a118292bb0d6eada662b15b | a6001b540600c7eae19a35e4d45c06e67f50b3b4 | refs/heads/main | 2023-04-10T17:59:46.526245 | 2021-04-15T19:13:40 | 2021-04-15T19:13:40 | 358,362,152 | 0 | 0 | null | 2021-04-22T17:47:18 | 2021-04-15T18:54:22 | Python | UTF-8 | Python | false | false | 523 | py | """
Найти сумму и произведение цифр трехзначного числа, которое вводит пользователь.
https://drive.google.com/file/d/1pFXRM_lzv1wZ8hM0hx0sCNVnDCsogzZa/view?usp=sharing
"""
print('Введите трехзначное число')
a = int(input())
a = abs(a)
h = a // 100
t = a % 100 // 10
u = a % 10
s = h + t + u
m = h * t * u
print(f'Сумма цифр числа = {s}')
print(f'Произведение цифр числа = {m}')
| [
"noreply@github.com"
] | noreply@github.com |
7a78bab1a7c668a9b26dfe834a4c903b5273b3e3 | d833e1643f799d8979ae385992be9f3012af23a5 | /examples/c60_find_submit.py | 848dd0dd9378059f4770fd68b09de99329f047ff | [
"BSD-3-Clause"
] | permissive | ZhouHUB/simdb | 05906505d549cbf584dcdcc91c9cebe95c2d349b | 33fa21ddcc683e1618dfb337f5f928363c902a1e | refs/heads/master | 2020-04-01T22:32:36.665260 | 2016-04-15T19:20:46 | 2016-04-15T19:20:46 | 36,950,426 | 0 | 0 | null | 2018-07-24T19:56:44 | 2015-06-05T19:06:15 | Python | UTF-8 | Python | false | false | 1,051 | py | __author__ = 'christopher'
import ase
from simdb.insert import *
from simdb.search import *
from pyiid.utils import build_sphere_np
from copy import deepcopy as dc
target_config, = find_atomic_config_document(name='C60 DFT')
parent_atoms = target_config.file_payload[-1]
# find the combined Potential Energy Surface (PES)
pes, = find_pes_document(name='C60 PDF Spring')
# find the simulation parameters
params, = find_simulation_parameter_document(name='T=1, iter=100, accept=.65')
rattles = [.05, .07, .08, .1]
for rattle in rattles:
# find starting_config
try:
start_config, = find_atomic_config_document(name='C60' + str(rattle))
except ValueError:
starting_atoms = dc(parent_atoms)
starting_atoms.rattle(rattle, 42)
# Add the atoms to the DB
start_config = insert_atom_document('C60 ' + str(rattle), starting_atoms)
# Finally create the simulation
sim = insert_simulation('C60 rattle->DFT ' + str(rattle), params, start_config, pes)
print 'simulation added, number ', sim.id
| [
"cjwright4242@gmail.com"
] | cjwright4242@gmail.com |
44c07e30ca5523015f24e844a3fdda7a79daddbf | 9fbb987c920cafaaf3a0c5aa8b8ac2a8abfaa887 | /Eat/eat_mt/yts/html_get.py | c990abd4ce4b9781e8ecd83e6c00ceb34b94560d | [] | no_license | lwd1132438569/Data_baby | 11f82a488bad86d46780b4a9df3de6b7c325ea27 | 94a068302555635325b43608b0e3e955e0196864 | refs/heads/master | 2021-01-23T07:37:51.833744 | 2017-09-18T09:47:37 | 2017-09-18T09:47:37 | 86,433,465 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,858 | py | # -*- coding: utf-8 -*-
import requests
import time
import random
import sys
# from imp import reload
#
# reload(sys)
# sys.setdefaultencoding('utf-8')
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Accept':'*/*',
'Accept-Language':'zh-CN,zh;q=0.8',
'Connection':'keep-alive',
'Referer':'https://item.jd.com/3219817.html'
}
cookie = {
'unpl':'V2_ZzNtbRBeQhciCxUAKE5YBmIARQ0SAkMccVgUVisQWQYzUBtdclRCFXMUR1FnGFoUZwIZXkRcQhdFCHZXchBYAWcCGllyBBNNIEwHDCRSBUE3XHxcFVUWF3RaTwEoSVoAYwtBDkZUFBYhW0IAKElVVTUFR21yVEMldQl2VH0RVQZvABRdQ2dzEkU4dlJ4Gl4MYzMTbUNnAUEpC0RRexxcSGcFGlRBX0ATdQl2VUsa',
'__jdv':'122270672|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_c803f2bebf5242faad185ac3a842eb81|1491532024032',
'ipLoc-djd':'1-72-2799-0',
'ipLocation':'%u5317%u4EAC',
'user-key':'e0dd75a9-a9a1-4b83-a107-cafa54043b74',
'cn':'0',
'__jda':'122270672.1798292710.1491532023.1491532023.1491532024.1',
'__jdb':'122270672.14.1798292710|1.1491532024',
'__jdc':'122270672',
'__jdu':'1798292710'
}
url1 = "http://i.meituan.com/poi/6374048/feedbacks/page_"
ran_num = random.sample(range(214), 214)
for i in ran_num:
a = ran_num[0]
if i == a:
i = str(i)
url = (url1 + i)
r = requests.get(url = url,headers = headers,cookies=cookie)
# r = requests.get(url=url, headers=headers, cookies=cookie)
html = r.content
else:
i = str(i)
url = (url1 + i)
r = requests.get(url=url, headers=headers,cookies=cookie)
html2 = r.content
html = html + html2
time.sleep(5)
print("当前抓取页面" + url + "状态" + str(r))
html = str(html)
file = open("D:\\yts.txt", "w")
file.write(html)
file.close() | [
"1132438569@qq.com"
] | 1132438569@qq.com |
051d9853c1a80138b3692ffc5620491001b13b3e | 84166b6d374529f01532194ec7729ab2c7f24ae8 | /login.py | 87d15a93df8e5bc11707c833d1aebca1ebd8baba | [] | no_license | sbalagudas/unicorn | c592a908220300b5e7fbc2a602dab56984664e0c | 2c3208b70e6275cbbaf54a1d37f39abf99195950 | refs/heads/master | 2020-04-16T01:45:12.258498 | 2017-12-26T14:19:02 | 2017-12-26T14:19:02 | 83,402,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,848 | py | #!/usr/bin/python
import wx
from DBOperation import DBOperation as dbo
import time
import common as cmm
import fonts
import Main
import enDecryption as ed
class logInPanel(wx.Panel):
def __init__(self,
parent,
ID,
pos=wx.DefaultPosition,
size=(600,400)):
wx.Panel.__init__(self,parent,ID,pos,size)
self.frame = parent
#self.backgroundImage()
self.fontBold = wx.Font(18,wx.ROMAN,wx.ITALIC,wx.BOLD)
self.bgColor = self.GetBackgroundColour()
self.createLoginButton()
self.createPromptText()
(textSizer,self.textList) = cmm.createStaticTextControl(self,self.textInfo(),fonts.Fonts.romanBold16())
for item in self.textList:
item.SetForegroundColour('Blue')
self.SetBackgroundColour('#CCFFCC')
self.layout(textSizer)
def createLoginButton(self):
self.loginBtn = wx.Button(self,label="Log In",size=(100,50))
self.loginBtn.SetFont(fonts.Fonts.romanBold18())
self.loginBtn.SetForegroundColour('Blue')
self.loginBtn.Bind(wx.EVT_BUTTON,self.authentication,self.loginBtn)
def createPromptText(self):
self.pmt = wx.StaticText(self,id=-1,label="",size=(400,50))
self.pmt.SetFont(self.fontBold)
self.pmt.SetForegroundColour('RED')
def textInfo(self):
return [("User Name : ",wx.ROMAN,'static'),
(wx.TE_NOHIDESEL,'ctrl'),
("Password : ",wx.ROMAN,'static'),
(wx.TE_PASSWORD|wx.TE_PROCESS_ENTER,'ctrl')]
def layout(self,textSizer):
boxSizer = wx.BoxSizer(wx.VERTICAL)
boxSizer.Add((0,20))
boxSizer.Add(textSizer,1,wx.EXPAND|wx.ALL,5)
boxSizer.Add(self.pmt,0,wx.ALIGN_CENTER|wx.EXPAND,5)
boxSizer.Add(self.loginBtn,0,wx.ALIGN_CENTER|wx.ALL,5)
boxSizer.Add((0,20))
self.SetSizer(boxSizer)
self.Layout()
def authentication(self,event):
db = dbo()
#db.initialization()
###print "self.textList : ",self.textList
#self.userName = self.textList[1].GetLabelText()
self.userName = self.textList[1].GetValue()
#print "user name : ",self.userName
password = self.textList[3].GetValue()
#print "password : ",password
enUserName = ed.enDecryption.encryption(self.userName)
enUserName.strip()
dbPwd = db.getBanana(enUserName)
#need decryption process, will add later.
if dbPwd :
#print "ed.enDecryption.decryption(dbPwd[0][0])",ed.enDecryption.decryption(dbPwd[0][0])
if ed.enDecryption.decryption(dbPwd[0][0]) == password:
self.pmt.SetLabel("log in success!")
time.sleep(1)
self.frame.Destroy()
self.mainFrame = Main.mainFrame()
self.mainFrame.Show()
else :
self.pmt.SetLabel("invalid username or password.")
else :
self.pmt.SetLabel("invalid username or password.")
class loginApp(wx.App):
def __init__(self,redirect=False,filename=None):
wx.App.__init__(self,redirect,filename)
def OnInit(self):
self.loginFrame = logInFrame()
self.SetTopWindow(self.loginFrame)
self.loginFrame.Show()
return True
class logInFrame(wx.Frame) :
def __init__(self,
parent=None,
id=-1,
title="log in window",
pos=wx.DefaultPosition,
size=(400,300),
style=wx.DEFAULT_FRAME_STYLE^(wx.RESIZE_BORDER | wx.MINIMIZE_BOX |wx.MAXIMIZE_BOX)):
wx.Frame.__init__(self,parent,id,title,pos,size,style)
self.panel = logInPanel(self,-1)
if __name__ == "__main__":
app = loginApp()
app.MainLoop()
| [
"371962715@qq.com"
] | 371962715@qq.com |
8988be20a22fb57d4719c31b7d397fab4a3163b1 | 7adecd15af359d19a611ce24b3ae8765b7af2a46 | /reset_db.py | b3e0e8d8e3eae2acf91da93a40e1f8e255bd9183 | [] | no_license | jkrovitz/MovieRatingSite | 83996ec6856a31a04d59cadc89196d9819d022ed | bc9f1930595d578268e754078d4d374a0fbcedc5 | refs/heads/master | 2020-04-05T14:22:50.341159 | 2019-09-13T22:55:48 | 2019-09-13T22:55:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | print('Resetting database...')
from server import db
# Reset the database
db.db.drop_all()
# Create the tables
db.db.create_all()
print('Database reset: success!')
| [
"jkrovitz@macalester.edu"
] | jkrovitz@macalester.edu |
5adc121c249783a3723d7534db002449e0dd665f | 48bdf7214235a2d60787fa8c5312f0461f688dde | /src/test_01.py | 7f4e802f6ec4b9f4f09ebb16784db702f558eb91 | [] | no_license | hechangfei1123/FaceRecognition | 148b16a21b781bc915c131f92341012e54f4f0d3 | 6010f5c68aa7d4043dc256f19f3d53e192683e8d | refs/heads/master | 2020-03-28T12:43:48.807655 | 2018-11-11T03:48:47 | 2018-11-11T03:48:47 | 148,328,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,780 | py | from PIL import Image
from PIL import ImageDraw
# im = Image.open(r"E:\BaiduNetdiskDownload\CelebA\Img\img_align_celeba\000011.jpg")
# imDraw = ImageDraw.Draw(im)
# imDraw.ellipse((68,111,69,112),fill="red")
# im.show(im)
import utils
import numpy as np
import os
import numpy.random as npr
import cv2
anno_file = r"E:\save_path\landmarks\0\landark.txt"
data_file = r"E:\save_path\landmarks\0\images"
save_path = r'E:\save_path\landmarks'
for size in [48]:
# size = 48
image_id = 0
landmark_imgs_save_dir = os.path.join(save_path,str(size))
if not os.path.exists(landmark_imgs_save_dir):
os.makedirs(landmark_imgs_save_dir)
landmark_imgs_save_dir_img = os.path.join(landmark_imgs_save_dir,"images")
if not os.path.exists(landmark_imgs_save_dir_img):
os.makedirs(landmark_imgs_save_dir_img)
save_landmark_anno = os.path.join(landmark_imgs_save_dir,"landark.txt")
f = open(save_landmark_anno, 'w')
# dstdir = "train_landmark_few"
with open(anno_file, 'r') as f2:
annotations = f2.readlines()
num = len(annotations)
print("%d total images" % num)
l_idx =0
idx = 0
# image_path bbox landmark(5*2)
for annotation in annotations:
# print imgPath
annotation = annotation.strip().split(' ')
assert len(annotation)==15,"each line should have 15 element"
im_path = os.path.join(data_file,annotation[0])
# gt_box = map(float, annotation[1:5])
# gt_box = [gt_box[0], gt_box[2], gt_box[1], gt_box[3]]
# gt_box = np.array(gt_box, dtype=np.int32)
gt_box = np.array(annotation[1:5], dtype=np.int32)
# landmark = map(float, annotation[5:])
landmark = np.array(annotation[5:], dtype=np.float)
img = cv2.imread(im_path)
assert (img is not None)
height, width, channel = img.shape
# crop_face = img[gt_box[1]:gt_box[3]+1, gt_box[0]:gt_box[2]+1]
# crop_face = cv2.resize(crop_face,(size,size))
idx = idx + 1
if idx % 100 == 0:
print("%d images done, landmark images: %d"%(idx,l_idx))
x1, y1, x2, y2 = gt_box
# gt's width
w = x2 - x1 + 1
# gt's height
h = y2 - y1 + 1
if max(w, h) < 40 or x1 < 0 or y1 < 0:
continue
# random shift
for i in range(10):
bbox_size = npr.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h)))
delta_x = npr.randint(-w * 0.2, w * 0.2)
delta_y = npr.randint(-h * 0.2, h * 0.2)
nx1 = max(x1 + w / 2 - bbox_size / 2 + delta_x, 0)
ny1 = max(y1 + h / 2 - bbox_size / 2 + delta_y, 0)
nx2 = nx1 + bbox_size
ny2 = ny1 + bbox_size
if nx2 > width or ny2 > height:
continue
crop_box = np.array([nx1, ny1, nx2, ny2])
cropped_im = img[int(ny1):int(ny2 + 1), int(nx1):int(nx2 + 1), :]
resized_im = cv2.resize(cropped_im, (size, size),interpolation=cv2.INTER_LINEAR)
offset_x1 = (x1 - nx1) / float(bbox_size)
offset_y1 = (y1 - ny1) / float(bbox_size)
offset_x2 = (x2 - nx2) / float(bbox_size)
offset_y2 = (y2 - ny2) / float(bbox_size)
offset_left_eye_x = (landmark[0] - nx1) / float(bbox_size)
offset_left_eye_y = (landmark[1] - ny1) / float(bbox_size)
offset_right_eye_x = (landmark[2] - nx1) / float(bbox_size)
offset_right_eye_y = (landmark[3] - ny1) / float(bbox_size)
offset_nose_x = (landmark[4] - nx1) / float(bbox_size)
offset_nose_y = (landmark[5] - ny1) / float(bbox_size)
offset_left_mouth_x = (landmark[6] - nx1) / float(bbox_size)
offset_left_mouth_y = (landmark[7] - ny1) / float(bbox_size)
offset_right_mouth_x = (landmark[8] - nx1) / float(bbox_size)
offset_right_mouth_y = (landmark[9] - ny1) / float(bbox_size)
# cal iou
# iou = utils.IoU(crop_box.astype(np.float), np.expand_dims(gt_box.astype(np.float), 0))
box_ = gt_box.reshape(1, -1)
iou = utils.iou(crop_box,box_)
if iou > 0.65:
save_file = os.path.join(landmark_imgs_save_dir_img, "%s.jpg" % l_idx)
cv2.imwrite(save_file, resized_im)
f.write(save_file + ' 1 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f \n' % \
(offset_x1, offset_y1, offset_x2, offset_y2, \
offset_left_eye_x,offset_left_eye_y,offset_right_eye_x,offset_right_eye_y,offset_nose_x,offset_nose_y,offset_left_mouth_x,offset_left_mouth_y,offset_right_mouth_x,offset_right_mouth_y))
l_idx += 1
f.close() | [
"569594060@qq.com"
] | 569594060@qq.com |
f50f764b1502409cb63f656caee642bd0a491879 | dc9650ad04552a1fae325209868ad60fa8301abb | /BotTelegram/migrations/0002_auto_20161010_1545.py | 7ab3390bcaf122e887383cf99ef75d136f3ca619 | [
"Apache-2.0"
] | permissive | manuggz/memes_telegram_bot | a284ae11f99650ea3feb61c58aeccaa47d25b53b | 2ed73aac099923d08c89616ec35c965204cac119 | refs/heads/master | 2021-03-27T20:27:25.624851 | 2017-02-23T01:23:03 | 2017-02-23T01:23:03 | 70,193,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-10 19:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('BotTelegram', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DatosImagenBorrador',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('upper_text', models.CharField(default=b'Upper TEXT', max_length=200, null=True)),
('lower_text', models.CharField(default=b'Lower TEXT', max_length=200, null=True)),
('color', models.CharField(default=b'Red', max_length=200, null=True)),
],
),
migrations.AddField(
model_name='usuario',
name='datos_imagen_borrador',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='BotTelegram.DatosImagenBorrador'),
),
]
| [
"manuelggonzalezm@gmail.com"
] | manuelggonzalezm@gmail.com |
ec36939630b71f42dfc1ceb9a3c56d293db45181 | 95c1ee50b12ba1735eaddfecf39861a1b43f3e66 | /maoyan_spider.py | 74680c748e820a4530cc2afe93032dc12013ad92 | [] | no_license | JonLuGitHub/spider_maoyan | 988232f27e64c18dff0eb112bd7ce835a0b2e2c7 | 538474818b1349cc0f547f563874231c784dbbc8 | refs/heads/master | 2020-03-24T10:00:21.496508 | 2018-08-22T08:22:35 | 2018-08-22T08:22:35 | 142,644,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | # -*- coding: utf-8 -*-
import json
import requests
from requests.exceptions import RequestException
import re
import time
def get_one_page(url):
"""下载数据"""
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse_one_page(html):
"""解析数据"""
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?).*?name"><a'
+ '.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+ '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2],
'actor': item[3].strip()[3:],
'time': item[4].strip()[5:],
'score': item[5] + item[6]
}
def write_to_file(content):
"""存储数据"""
with open('result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n')
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__ == '__main__':
for i in range(10):
main(offset=i * 10)
time.sleep(1)
| [
"lq_hut@126.com"
] | lq_hut@126.com |
d155f26c15d744cdfd16276623f1b50bcc247c91 | ab9151dc5f98feaff5ad8971f3ace43f4b1feb09 | /buildbad.py | 3523be2fb5182eb8d1ddd4d4dca1bc48403b6821 | [] | no_license | evangambit/Codex | c889d5d9dd601d09d9941e28ae6a08ab33205451 | 004609f0bf31add72d3101efe7fd47544342cf83 | refs/heads/master | 2022-12-31T19:21:03.545226 | 2020-10-24T20:03:46 | 2020-10-24T20:03:46 | 257,159,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | import json, sqlite3
conn = sqlite3.connect('new.db')
c = conn.cursor()
c.execute('SELECT json FROM comments')
F = {}
n = 100000
for it in range(n):
j = json.loads(c.fetchone()[0])
for token in j['tokens'].split(' '):
F[token] = F.get(token, 0) + 1
badkeys = []
for token in F:
F[token] /= n
if F[token] < 0.01:
badkeys.append(token)
for key in badkeys:
del F[key]
A = list(zip(F.values(), F.keys()))
A.sort(key=lambda x:-x[0])
for a in A[:64]:
print(a) | [
"morganfredding@gmail.com"
] | morganfredding@gmail.com |
048dd77d7a67072915ca12a4be0d9bb4cc71a927 | 5d68003304258314eab41444bc27be45921787cb | /aws_token_refresh.py | 14da8c89abbbf2428c9cb7f308107eb5abe85798 | [] | no_license | manas86/aws-auto-token-refresh | b921b19070e921cb058bcc0b28dffd2081ab0e13 | 9e04b35de3c531fa60fa3126b7ea1041c1cb781b | refs/heads/master | 2022-11-12T22:24:49.755310 | 2020-07-05T21:40:37 | 2020-07-05T21:40:37 | 277,386,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | import boto3
from botocore.credentials import RefreshableCredentials
from botocore.session import get_session
from boto3 import Session
aws_region="eu-west-1"
sts_client = boto3.client("sts", region_name=aws_region)
role_name="dummy-role-1"
session_name="Session_name"
def _refresh():
" Refresh tokens by calling assume_role again "
params = {
"RoleArn": role_name,
"RoleSessionName": session_name,
"DurationSeconds": 3600,
}
response = sts_client.assume_role(**params).get("Credentials")
credentials = {
"access_key": response.get("AccessKeyId"),
"secret_key": response.get("SecretAccessKey"),
"token": response.get("SessionToken"),
"expiry_time": response.get("Expiration").isoformat(),
}
return credentials
session_credentials = RefreshableCredentials.create_from_metadata(
metadata=_refresh(),
refresh_using=_refresh,
method="sts-assume-role",
)
# Now we can use this as long as possible
session = get_session()
session._credentials = session_credentials
session.set_config_variable("region", aws_region)
autorefresh_session = Session(botocore_session=session)
ec2_client = autorefresh_session.client("ec2", region_name=aws_region)
| [
"manassamantaray@Manass-MacBook-Air.local"
] | manassamantaray@Manass-MacBook-Air.local |
92c1bf03bc0279955a5ec39a71b9ec7617f7296f | 27e8bb104c3b102d433f910561ffa80461c4887a | /spamNN.py | 5aee9f09dc3c692da0f5d70cb3b67adcea0a473a | [] | no_license | entradajuan/spam2 | fdcaa3aa2f2ddeff31ef7b9bc7ae64e5bb10ffd5 | 993f4042397910c75506e1c4fc318cb4ce98a70e | refs/heads/master | 2023-05-31T03:32:31.820147 | 2021-06-06T18:59:55 | 2021-06-06T18:59:55 | 373,096,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,559 | py | %tensorflow_version 2.x
import tensorflow as tf
#from tf.keras.models import Sequential
#from tf.keras.layers import Dense
import os
import io
import numpy as np
tf.__version__
#path_to_zip = tf.keras.utils.get_file("smsspamcollection.zip",
# origin="https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip",
# extract=True)
#print(path_to_zip)
#print(type(path_to_zip))
#!unzip $path_to_zip -d data
#lines = io.open('/content/spam2/data/SMSSpamCollection').read().strip().split('\n')
lines = io.open('data/SMSSpamCollection').read().strip().split('\n')
print(lines)
print(type(lines))
print(lines[0])
data = []
count = 0
for e in lines:
label, text = e.split('\t')
if (label.lower().strip() =='spam'):
data.append((1, text.strip()))
else:
data.append((0, text.strip()))
print(data)
print(len(data))
print(data[0][1])
print(type(data[0]))
import pandas as pd
df = pd.DataFrame(data, columns=['spam', 'text'])
print(df.head())
print(df.shape)
import re
def message_length(x):
return len(x)
def num_capitals(x):
_, count = re.subn(r'[A-Z]' , '', x)
return count
cap_count = num_capitals('Adsd Aggggg')
print(cap_count)
def num_punctuation(x):
_, num = re.subn(r'\W', '', x)
return num
df['long'] = df['text'].apply(message_length)
df['caps'] = df['text'].apply(num_capitals)
df['punct'] = df['text'].apply(num_punctuation)
print(df.head().to_string)
train = df.sample(frac=0.8, random_state=42)
test = df.drop(train.index)
print()
print(train.describe())
def make_model(inputs=3, num_units=12):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(num_units, input_dim=inputs, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='relu'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
x_train = train[['long', 'punct', 'caps']]
y_train = train[['spam']]
print(y_train)
print(y_train.shape)
print(y_train['spam'].sum())
print(type(y_train))
# Pay attention, the 2 DataFrames do NOT share memo!!
ser1 = train[['spam']]
print(ser1.head)
#print(ser1.iloc[0:3].index)
print(ser1.loc[ser1.iloc[0:3].index, 'spam'])
ser1.loc[ser1.iloc[0:3].index, 'spam'] = np.nan
print('train spam shape', train[['spam']].isna().sum())
print('serie spam shape', ser1[['spam']].isna().sum())
x_test = test[['long', 'punct', 'caps']]
y_test = test[['spam']]
model = make_model(inputs=3, num_units=12)
print(type(model))
model.fit(x_train, y_train, epochs=10, batch_size=10)
model.evaluate(x_test, y_test)
y_train_pred = model.predict(x_train)
print(y_train_pred)
print(tf.math.confusion_matrix(tf.constant(y_train.spam), y_train_pred))
y_test_pred = model.predict_classes(x_test)
print(y_test_pred)
print(tf.math.confusion_matrix(tf.constant(y_test.spam), y_test_pred))
!pip install stanza
import stanza
snlp = stanza.download('en')
en = stanza.Pipeline(lang='en', processors='tokenize')
def word_counts(x, pipeline=en):
docu = pipeline(x)
count = sum([len(sen.tokens) for sen in docu.sentences])
return count
df['words'] = df['text'].apply(word_counts)
train['words'] = train['text'].apply(word_counts)
test['words'] = test['text'].apply(word_counts)
x_train = train[[ 'long', 'caps', 'punct', 'words']]
y_train = train[['spam']]
x_test = test[[ 'long', 'caps', 'punct', 'words']]
y_test = test[['spam']]
#model = make_model(inputs=4)
model = make_model(inputs=4, num_units=1200)
model.fit(x_train, y_train, epochs=40, batch_size=100)
model.evaluate(x_test, y_test)
| [
"entradajuan@yahoo.es"
] | entradajuan@yahoo.es |
dee08669b1d02ebe27c6c89f6953a3892a7f80ac | bd8a9afcf75a0a4048bffdfd37089d4e44299301 | /node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi | d5e44d8de6462d315d8f2caf33ad03d6f3716250 | [
"Apache-2.0",
"MIT"
] | permissive | deanvlue/meantest | f0c836692903e83c205bc3934c9f81170ff6f5c3 | 5c4a0e039085cd8216202f18f49fba64b93dbc0d | refs/heads/master | 2016-09-05T23:52:23.269296 | 2015-04-24T04:44:27 | 2015-04-24T04:44:27 | 34,538,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,385 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 49,
"host_arch": "ia32",
"icu_small": "false",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "ia32",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/deploy/.node-gyp/0.12.2",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/2.7.4 node/v0.12.2 linux ia32",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "1000",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/deploy/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/home/deploy/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "0.12.2",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/deploy/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": "",
"spin": "true"
}
}
| [
"munoz.josecarlos@gmail.com"
] | munoz.josecarlos@gmail.com |
3199220b5d3f26c2b9848fbeaf6d987c1f2e0c37 | f6f2665598646c35f63aed9ae77bf3b8158bb81b | /DecoratorPattern/simple_func_decorator.py | 4c8495494a0a305a51d3f3f32dfaf66426ff2c8e | [] | no_license | Stuming/designpattern | 30a83185d1b04a2e67c9e7243a2e21ff5f9810a6 | 5a58e99e8a2ff1173e608edd26b1b41254940424 | refs/heads/master | 2020-04-01T20:11:50.751189 | 2016-11-23T14:55:14 | 2016-11-23T14:55:14 | 68,522,482 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
def deco(func):
def _deco():
print("-------Start-------")
func()
print("-------Done-------")
return 0
return _deco()
@deco
def process1():
print("This is processs1.")
@deco
def process2():
print("This is processs2.")
if __name__ == '__main__':
process1()
process2()
| [
"1361046649@qq.com"
] | 1361046649@qq.com |
5c373349176db66ba2f7617dfca9fa2c18ee4d78 | 94724578994ab1438dcefb51b7ef4d8570da5d4c | /calibre/draveness.recipe | 361ce64106650ddf8643557d49369ebfec882386 | [] | no_license | PegasusWang/collection_python | 6648d83203634abf44fd42c0b37b0bf7cc406d8f | 9ef019a737a0817860d3184924c67a0833bd1252 | refs/heads/master | 2023-09-01T23:15:39.813635 | 2023-08-24T06:46:12 | 2023-08-24T06:46:12 | 43,693,872 | 130 | 90 | null | 2021-04-26T15:12:55 | 2015-10-05T15:28:15 | JavaScript | UTF-8 | Python | false | false | 2,028 | recipe | #!/usr/bin/python
# encoding: utf-8
from calibre.web.feeds.recipes import BasicNewsRecipe # 引入 Recipe 基础类
"""
教程:
- https://bookfere.com/tools#calibre
- https://www.jianshu.com/p/0bcb92509309
- https://snowdreams1006.github.io/myGitbook/advance/export.html
命令:
ebook-convert draveness.recipe draveness.mobi --output-profile=kindle
"""
class DravenessBlog(BasicNewsRecipe): # 继承 BasicNewsRecipe 类的新类名
# ///////////////////
# 设置电子书元数据
# ///////////////////
title = "draveness" # 电子书名
description = u"draveness的博客" # 电子书简介
# cover_url = '' # 电子书封面
# masthead_url = '' # 页头图片
__author__ = "draveness" # 作者
language = "zh" # 语言
encoding = "utf-8" # 编码
# ///////////////////
# 抓取页面内容设置
# ///////////////////
# keep_only_tags = [{ 'class': 'example' }] # 仅保留指定选择器包含的内容
no_stylesheets = True # 去除 CSS 样式
remove_javascript = True # 去除 JavaScript 脚本
auto_cleanup = True # 自动清理 HTML 代码
delay = 5 # 抓取页面间隔秒数
max_articles_per_feed = 100 # 抓取文章数量
timeout = 10
# ///////////////////
# 页面内容解析方法
# ///////////////////
def parse_index(self):
site = "https://draveness.me/whys-the-design/"
soup = self.index_to_soup(site) # 解析列表页返回 BeautifulSoup 对象
articles = [] # 定义空文章资源数组
ultag = soup.findAll("ul")[6]
urls = ultag.findAll("li")
urls.reverse()
for link in urls:
title = link.a.contents[0].strip() # 提取文章标题
url = link.a.get("href") # 提取文章链接
print(title, url)
articles.append({"title": title, "url": url})
ans = [(self.title, articles)] # 组成最终的数据结构
return ans # 返回可供 Calibre 转换的数据结构
| [
"291374108@qq.com"
] | 291374108@qq.com |
0ce8d2ab48fab5b95445e205d1b406f8dbfdb76e | 0ba1393b6c472000ccd16297915843377fb16338 | /venv/space_ship.py | 85bc8ee70126af92a544cee64f742a2a749dc44c | [] | no_license | Gautham116006/Alien_Invasion1 | 5fa8d6916b2a119d736b4cdfe48983f85dced728 | e3f47db90b1e99e2f1b69b3c948a992ede1f756d | refs/heads/master | 2022-12-11T14:15:07.358962 | 2020-09-11T18:06:53 | 2020-09-11T18:06:53 | 294,343,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | import pygame
class Ship():
def __init__(self,game_settings,screen):
# initialize the ship and set it's starting position
self.screen = screen
self.game_settings = game_settings
#load ship image and get its rectangle
self.image = pygame.image.load("Include\images\space_ship.jpeg")
self.ship_rect = self.image.get_rect()
self.screen_rect = self.screen.get_rect()
#start each ship at bottom centre of screen
self.ship_rect.centerx = self.screen_rect.centerx
self.ship_rect.bottom = self.screen_rect.bottom
#store a decimal value for the ships center
self.center = float(self.ship_rect.centerx)
self.moving_right = False
self.moving_left = False
def update(self):
if self.moving_right == True:
if self.ship_rect.centerx<=1050:
self.center+= self.game_settings.ship_speed
elif self.moving_left == True:
if self.ship_rect.centerx >= 50 :
self.center -= self.game_settings.ship_speed
# update rectangle from self.center
self.ship_rect.centerx = self.center
def blitme(self):
#draw ship at current location
self.screen.blit(self.image,self.ship_rect)
| [
"63862992+Gautham116006@users.noreply.github.com"
] | 63862992+Gautham116006@users.noreply.github.com |
1fdebe6ed9f7b1888ae11d36cdcefdc246e740f5 | 543b0bcb81b16674c81be082824adca4d8ac509a | /apps/urls.py | a36a6454ca4fd7f22b2812aa610b1b12488eb2e9 | [] | no_license | alxpolyakov/test-apps | a85d2a3828cdde056c3320be6f4a9c0fb52bb170 | 254a535c278ae34ec0e1d177eec2fac84b12daef | refs/heads/master | 2020-09-15T12:24:59.963107 | 2019-11-25T04:39:03 | 2019-11-25T04:39:03 | 223,444,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | """apps URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from api.views import AppsListView, CreateAPIKeyView
urlpatterns = [
path('admin/', admin.site.urls),
path('apps/', AppsListView.as_view(), name='apps'),
path('api-key/', CreateAPIKeyView.as_view(), name='create-api-key')
]
| [
"alx.polyakov@gmail.com"
] | alx.polyakov@gmail.com |
eec8efa198fdd6ce3ad3070fc8265762caf05d1c | 58141d7fc37854efad4ad64c74891a12908192ed | /tests/test_storage2.py | b09918d1388d18d6fb7fb62fa653799306d5de22 | [] | no_license | stanleylio/fishie | b028a93b2093f59a8ceee4f78b55a91bb1f69506 | 0685045c07e4105934d713a0fd58c4bc28821ed6 | refs/heads/master | 2022-08-14T13:08:55.548830 | 2022-07-29T01:32:28 | 2022-07-29T01:32:28 | 30,433,819 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | import unittest,sys
from os.path import expanduser
sys.path.append(expanduser('~'))
from node.storage.storage2 import storage
class TestStorage2(unittest.TestCase):
def test_read_latest_non_null(self):
s = storage()
self.assertTrue(s.read_latest_non_null('node-008', 'ReceptionTime', 'idx'))
#self.assertTrue(parse_SeaFET(m) is not None)
def test_read_last_N_minutes(self):
s = storage()
self.assertTrue(s.read_last_N_minutes('node-007', 'ReceptionTime', 1, 'T_280'))
if __name__ == '__main__':
unittest.main()
| [
"stanleylio@gmail.com"
] | stanleylio@gmail.com |
f8de786a0f3c8b0ba99882fe7407050b11316930 | 55b57d64ec547869835334318f3059fbb507558c | /Fred2/Data/pssms/tepitopepan/mat/DRB5_0111_9.py | fd2bb78b87e863070732f2fef744ff47908e3877 | [
"BSD-3-Clause"
] | permissive | FRED-2/Fred2 | 9845f6678d4011cb746c7a5a6f283eea68077a02 | b3e54c8c4ed12b780b61f74672e9667245a7bb78 | refs/heads/master | 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 | Python | UTF-8 | Python | false | false | 2,095 | py | DRB5_0111_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.004754, 'I': -0.99525, 'H': -999.0, 'K': -999.0, 'M': -0.99525, 'L': -0.99525, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.004754, 'V': -0.99525, 'Y': -0.004754}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.2876, 'D': -1.8782, 'G': -1.5864, 'F': -0.56917, 'I': 1.2917, 'H': -1.3705, 'K': -1.69, 'M': 1.6918, 'L': 0.60019, 'N': -1.6785, 'Q': -0.69644, 'P': -1.4887, 'S': -0.4961, 'R': -1.6837, 'T': 0.29244, 'W': -1.3798, 'V': 1.0861, 'Y': -0.57262}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -2.0, 'D': -2.0, 'G': -0.30001, 'F': -1.7, 'I': -1.4, 'H': -1.2, 'K': -1.5, 'M': -1.5, 'L': -1.0, 'N': -1.3, 'Q': -1.4, 'P': 0.19998, 'S': -0.49997, 'R': -1.3, 'T': -0.79998, 'W': -1.7, 'V': -1.3, 'Y': -1.0}, 6: {'A': 0.0, 'E': -0.89606, 'D': -1.4918, 'G': 0.58996, 'F': 1.4949, 'I': 1.2005, 'H': 1.1907, 'K': 0.88503, 'M': 0.41266, 'L': 0.61161, 'N': 0.50177, 'Q': 0.68943, 'P': -0.59131, 'S': -0.1948, 'R': 1.2786, 'T': 0.29867, 'W': 0.39933, 'V': -0.29253, 'Y': 1.1918}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.60069, 'D': -1.4932, 'G': 0.39612, 'F': 1.1915, 'I': 1.1912, 'H': 0.98443, 'K': 2.6667, 'M': 0.50264, 'L': 1.283, 'N': -0.0088368, 'Q': 0.69103, 'P': -0.79719, 'S': 0.70285, 'R': 2.4734, 'T': -0.20486, 'W': -0.70351, 'V': -0.19682, 'Y': 1.2852}} | [
"schubert@informatik.uni-tuebingen.de"
] | schubert@informatik.uni-tuebingen.de |
3490b1bb4e1c131229f6c34d0a5be01c481e3222 | f7dd967f82902ecfcd4825a579bbd9d1f05d8fbd | /TwitterTrends/urls.py | 93e0ef3a2eec347d7870b478c1cf38b448105789 | [] | no_license | Pravin-Rathod/TwitterTrends | 1f55d7134a400dd9b2c137a4af4b43b93d2681d1 | 54112039b0ff23cc19820ae3bce5940aaf135d17 | refs/heads/master | 2022-09-15T20:23:07.908981 | 2022-07-24T18:31:02 | 2022-07-24T18:31:02 | 517,396,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | """TwitterTrends URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('TwitterTrends/',include('Index.urls')),
path('TwitterTrends/',include('Dashboard.urls')),
path('admin/', admin.site.urls),
]
| [
"43312928+Pravin-Rathod@users.noreply.github.com"
] | 43312928+Pravin-Rathod@users.noreply.github.com |
ba2b047f942cd1f395b5a28b8a0a65fd974e1b9b | 87de2ede5daf6138100e87a817d1625545a384ac | /selfdrive/thermald/thermald.py | 04f21911a42b9fdb1e3fb5c1b6f2033b90aa3557 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | DS1SQM/OPKR084test_20210505 | 26acbe9d381753cbe72cb0271c163b588343918a | 76fc12bff1472b8bbe62206cb8ae014f4c2fb969 | refs/heads/main | 2023-04-12T18:19:39.391330 | 2021-05-02T07:51:00 | 2021-05-02T07:51:00 | 364,593,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,482 | py | #!/usr/bin/env python3
import datetime
import os
import time
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
from smbus2 import SMBus
import cereal.messaging as messaging
from cereal import log
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import clip, interp
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from common.dict_helpers import strip_deprecated_keys
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.hardware import EON, TICI, HARDWARE
from selfdrive.loggerd.config import get_available_percent
from selfdrive.pandad import get_expected_signature
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.version import get_git_branch, terms_version, training_version
import re
import subprocess
FW_SIGNATURE = get_expected_signature()
DISABLE_LTE_ONROAD = os.path.exists("/persist/disable_lte_onroad") or TICI
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
CPU_TEMP_TAU = 5. # 5s time constant
DAYS_NO_CONNECTIVITY_MAX = 7 # do not allow to engage after a week without internet
DAYS_NO_CONNECTIVITY_PROMPT = 4 # send an offroad prompt after 4 days with no internet
DISCONNECT_TIMEOUT = 3. # wait 5 seconds before going offroad after disconnect so you get an alert
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
last_eon_fan_val = None
mediaplayer = '/data/openpilot/selfdrive/assets/addon/mediaplayer/'
prebuiltfile = '/data/openpilot/prebuilt'
sshkeyfile = '/data/public_key'
pandaflash_ongoing = '/data/openpilot/pandaflash_ongoing'
def read_tz(x):
if x is None:
return 0
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.batteryTempC = read_tz(thermal_config.bat[0]) / thermal_config.bat[1]
return dat
def setup_eon_fan():
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
def set_eon_fan(val):
global last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val - 1) << 6])
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
# max fan speed only allowed if battery is hot
_BAT_TEMP_THRESHOLD = 45.
def handle_fan_eon(max_cpu_temp, bat_temp, fan_speed, ignition):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
if bat_temp < _BAT_TEMP_THRESHOLD:
# no max fan speed unless battery is hot
fan_speed = min(fan_speed, _FAN_SPEEDS[-2])
set_eon_fan(fan_speed // 16384)
return fan_speed
def handle_fan_uno(max_cpu_temp, bat_temp, fan_speed, ignition):
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [0, 80]))
if not ignition:
new_speed = min(30, new_speed)
return new_speed
def check_car_battery_voltage(should_start, pandaState, charging_disabled, msg):
battery_charging_control = Params().get_bool('OpkrBatteryChargingControl')
battery_charging_min = int(Params().get('OpkrBatteryChargingMin'))
battery_charging_max = int(Params().get('OpkrBatteryChargingMax'))
# charging disallowed if:
# - there are pandaState packets from panda, and;
# - 12V battery voltage is too low, and;
# - onroad isn't started
print(pandaState)
if charging_disabled and msg.deviceState.batteryPercent < battery_charging_min:
charging_disabled = False
os.system('echo "1" > /sys/class/power_supply/battery/charging_enabled')
elif not charging_disabled and msg.deviceState.batteryPercent > battery_charging_max:
charging_disabled = True
os.system('echo "0" > /sys/class/power_supply/battery/charging_enabled')
elif msg.deviceState.batteryCurrent < 0 and msg.deviceState.batteryPercent > battery_charging_max:
charging_disabled = True
os.system('echo "0" > /sys/class/power_supply/battery/charging_enabled')
if not battery_charging_control:
charging_disabled = False
return charging_disabled
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def thermald_thread():
pm = messaging.PubMaster(['deviceState'])
pandaState_timeout = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
pandaState_sock = messaging.sub_sock('pandaState', timeout=pandaState_timeout)
location_sock = messaging.sub_sock('gpsLocationExternal')
managerState_sock = messaging.sub_sock('managerState', conflate=True)
fan_speed = 0
count = 0
startup_conditions = {
"ignition": False,
}
startup_conditions_prev = startup_conditions.copy()
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
current_branch = get_git_branch()
network_type = NetworkType.none
network_strength = NetworkStrength.unknown
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
cpu_temp_filter = FirstOrderFilter(0., CPU_TEMP_TAU, DT_TRML)
pandaState_prev = None
charging_disabled = False
should_start_prev = False
handle_fan = None
is_uno = False
ui_running_prev = False
params = Params()
power_monitor = PowerMonitoring()
no_panda_cnt = 0
thermal_config = HARDWARE.get_thermal_config()
# CPR3 logging
if EON:
base_path = "/sys/kernel/debug/cpr3-regulator/"
cpr_files = [p for p in Path(base_path).glob("**/*") if p.is_file()]
cpr_data = {}
for cf in cpr_files:
with open(cf, "r") as f:
try:
cpr_data[str(cf)] = f.read().strip()
except Exception:
pass
cloudlog.event("CPR", data=cpr_data)
ts_last_ip = 0
ip_addr = '255.255.255.255'
# sound trigger
sound_trigger = 1
opkrAutoShutdown = 0
shutdown_trigger = 1
is_openpilot_view_enabled = 0
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = mediaplayer
getoff_alert = params.get_bool("OpkrEnableGetoffAlert")
hotspot_on_boot = params.get_bool("OpkrHotspotOnBoot")
hotspot_run = False
if int(params.get('OpkrAutoShutdown')) == 0:
opkrAutoShutdown = 0
elif int(params.get('OpkrAutoShutdown')) == 1:
opkrAutoShutdown = 5
elif int(params.get('OpkrAutoShutdown')) == 2:
opkrAutoShutdown = 30
elif int(params.get('OpkrAutoShutdown')) == 3:
opkrAutoShutdown = 60
elif int(params.get('OpkrAutoShutdown')) == 4:
opkrAutoShutdown = 180
elif int(params.get('OpkrAutoShutdown')) == 5:
opkrAutoShutdown = 300
elif int(params.get('OpkrAutoShutdown')) == 6:
opkrAutoShutdown = 600
elif int(params.get('OpkrAutoShutdown')) == 7:
opkrAutoShutdown = 1800
elif int(params.get('OpkrAutoShutdown')) == 8:
opkrAutoShutdown = 3600
elif int(params.get('OpkrAutoShutdown')) == 9:
opkrAutoShutdown = 10800
else:
opkrAutoShutdown = 18000
while 1:
ts = sec_since_boot()
pandaState = messaging.recv_sock(pandaState_sock, wait=True)
msg = read_thermal(thermal_config)
if pandaState is not None:
usb_power = pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client
# If we lose connection to the panda, wait 5 seconds before going offroad
if pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
no_panda_cnt += 1
if no_panda_cnt > DISCONNECT_TIMEOUT / DT_TRML:
if startup_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
startup_conditions["ignition"] = False
shutdown_trigger = 1
else:
no_panda_cnt = 0
startup_conditions["ignition"] = pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan
sound_trigger == 1
#startup_conditions["hardware_supported"] = pandaState.pandaState.pandaType not in [log.PandaState.PandaType.whitePanda,
# log.PandaState.PandaType.greyPanda]
#set_offroad_alert_if_changed("Offroad_HardwareUnsupported", not startup_conditions["hardware_supported"])
# Setup fan handler on first connect to panda
if handle_fan is None and pandaState.pandaState.pandaType != log.PandaState.PandaType.unknown:
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
if (not EON) or is_uno:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
setup_eon_fan()
handle_fan = handle_fan_eon
# Handle disconnect
if pandaState_prev is not None:
if pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown and \
pandaState_prev.pandaState.pandaType != log.PandaState.PandaType.unknown:
params.panda_disconnect()
pandaState_prev = pandaState
elif params.get_bool("IsOpenpilotViewEnabled") and not params.get_bool("IsDriverViewEnabled") and is_openpilot_view_enabled == 0:
is_openpilot_view_enabled = 1
startup_conditions["ignition"] = True
elif not params.get_bool("IsOpenpilotViewEnabled") and not params.get_bool("IsDriverViewEnabled") and is_openpilot_view_enabled == 1:
shutdown_trigger = 0
sound_trigger == 0
is_openpilot_view_enabled = 0
startup_conditions["ignition"] = False
# get_network_type is an expensive call. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
network_strength = HARDWARE.get_network_strength(network_type)
except Exception:
cloudlog.exception("Error getting network status")
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = int(round(psutil.cpu_percent()))
msg.deviceState.networkType = network_type
msg.deviceState.networkStrength = network_strength
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryStatus = HARDWARE.get_battery_status()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.batteryVoltage = HARDWARE.get_battery_voltage()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
# Fake battery levels on uno for frame
if (not EON) or is_uno:
msg.deviceState.batteryPercent = 100
msg.deviceState.batteryStatus = "Charging"
msg.deviceState.batteryTempC = 0
# update ip every 10 seconds
ts = sec_since_boot()
if ts - ts_last_ip >= 10.:
try:
result = subprocess.check_output(["ifconfig", "wlan0"], encoding='utf8') # pylint: disable=unexpected-keyword-arg
ip_addr = re.findall(r"inet addr:((\d+\.){3}\d+)", result)[0][0]
except:
ip_addr = 'N/A'
ts_last_ip = ts
msg.deviceState.ipAddr = ip_addr
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
# TODO: add car battery voltage check
max_cpu_temp = cpu_temp_filter.update(max(msg.deviceState.cpuTempC))
max_comp_temp = max(max_cpu_temp, msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
bat_temp = msg.deviceState.batteryTempC
if handle_fan is not None:
fan_speed = handle_fan(max_cpu_temp, bat_temp, fan_speed, startup_conditions["ignition"])
msg.deviceState.fanSpeedPercentDesired = fan_speed
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
# We only do this if there is a relay that prevents the car from faulting
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if max_cpu_temp > 107. or bat_temp >= 63. or (is_offroad_for_5_min and max_cpu_temp > 70.0):
# onroad not allowed
thermal_status = ThermalStatus.danger
elif max_comp_temp > 96.0 or bat_temp > 60.:
# hysteresis between onroad not allowed and engage not allowed
thermal_status = clip(thermal_status, ThermalStatus.red, ThermalStatus.danger)
elif max_cpu_temp > 94.0:
# hysteresis between engage not allowed and uploader not allowed
thermal_status = clip(thermal_status, ThermalStatus.yellow, ThermalStatus.red)
elif max_cpu_temp > 80.0:
# uploader not allowed
thermal_status = ThermalStatus.yellow
elif max_cpu_temp > 75.0:
# hysteresis between uploader not allowed and all good
thermal_status = clip(thermal_status, ThermalStatus.green, ThermalStatus.yellow)
else:
thermal_status = ThermalStatus.green # default to good condition
# **** starting logic ****
# Check for last update time and display alerts if needed
now = datetime.datetime.utcnow()
# show invalid date/time alert
startup_conditions["time_valid"] = True if ((now.year > 2020) or (now.year == 2020 and now.month >= 10)) else True # set True for battery less EON otherwise, set False.
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
# Show update prompt
# try:
# last_update = datetime.datetime.fromisoformat(params.get("LastUpdateTime", encoding='utf8'))
# except (TypeError, ValueError):
# last_update = now
# dt = now - last_update
# update_failed_count = params.get("UpdateFailedCount")
# update_failed_count = 0 if update_failed_count is None else int(update_failed_count)
# last_update_exception = params.get("LastUpdateException", encoding='utf8')
# if update_failed_count > 15 and last_update_exception is not None:
# if current_branch in ["release2", "dashcam"]:
# extra_text = "Ensure the software is correctly installed"
# else:
# extra_text = last_update_exception
# set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", False)
# set_offroad_alert_if_changed("Offroad_UpdateFailed", True, extra_text=extra_text)
# elif dt.days > DAYS_NO_CONNECTIVITY_MAX and update_failed_count > 1:
# set_offroad_alert_if_changed("Offroad_UpdateFailed", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", True)
# elif dt.days > DAYS_NO_CONNECTIVITY_PROMPT:
# remaining_time = str(max(DAYS_NO_CONNECTIVITY_MAX - dt.days, 0))
# set_offroad_alert_if_changed("Offroad_UpdateFailed", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", True, extra_text=f"{remaining_time} days.")
# else:
# set_offroad_alert_if_changed("Offroad_UpdateFailed", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", False)
#startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
panda_signature = params.get("PandaFirmware")
startup_conditions["fw_version_match"] = (panda_signature is None) or (panda_signature == FW_SIGNATURE) # don't show alert is no panda is connected (None)
set_offroad_alert_if_changed("Offroad_PandaFirmwareMismatch", (not startup_conditions["fw_version_match"]))
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
(current_branch in ['dashcam', 'dashcam-staging'])
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
startup_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not startup_conditions["device_temp_good"]))
# Handle offroad/onroad transition
should_start = all(startup_conditions.values())
if should_start:
if not should_start_prev:
params.delete("IsOffroad")
if TICI and DISABLE_LTE_ONROAD:
os.system("sudo systemctl stop --no-block lte")
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if startup_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions)
if should_start_prev or (count == 0):
params.put_bool("IsOffroad", True)
if TICI and DISABLE_LTE_ONROAD:
os.system("sudo systemctl start --no-block lte")
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
if shutdown_trigger == 1 and sound_trigger == 1 and msg.deviceState.batteryStatus == "Discharging" and started_seen and (sec_since_boot() - off_ts) > 1 and getoff_alert:
subprocess.Popen([mediaplayer + 'mediaplayer', '/data/openpilot/selfdrive/assets/sounds/eondetach.wav'], shell = False, stdin=None, stdout=None, stderr=None, env = env, close_fds=True)
sound_trigger = 0
# shutdown if the battery gets lower than 3%, it's discharging, we aren't running for
# more than a minute but we were running
if shutdown_trigger == 1 and msg.deviceState.batteryStatus == "Discharging" and \
started_seen and opkrAutoShutdown and (sec_since_boot() - off_ts) > opkrAutoShutdown and not os.path.isfile(pandaflash_ongoing):
os.system('LD_LIBRARY_PATH="" svc power shutdown')
charging_disabled = check_car_battery_voltage(should_start, pandaState, charging_disabled, msg)
if msg.deviceState.batteryCurrent > 0:
msg.deviceState.batteryStatus = "Discharging"
else:
msg.deviceState.batteryStatus = "Charging"
msg.deviceState.chargingDisabled = charging_disabled
prebuiltlet = params.get_bool("PutPrebuiltOn")
if not os.path.isfile(prebuiltfile) and prebuiltlet:
os.system("cd /data/openpilot; touch prebuilt")
elif os.path.isfile(prebuiltfile) and not prebuiltlet:
os.system("cd /data/openpilot; rm -f prebuilt")
sshkeylet = params.get_bool("OpkrSSHLegacy")
if not os.path.isfile(sshkeyfile) and sshkeylet:
os.system("cp -f /data/openpilot/selfdrive/assets/addon/key/GithubSshKeys_legacy /data/params/d/GithubSshKeys; chmod 600 /data/params/d/GithubSshKeys; touch /data/public_key")
elif os.path.isfile(sshkeyfile) and not sshkeylet:
os.system("cp -f /data/openpilot/selfdrive/assets/addon/key/GithubSshKeys_new /data/params/d/GithubSshKeys; chmod 600 /data/params/d/GithubSshKeys; rm -f /data/public_key")
# opkr hotspot
if hotspot_on_boot and not hotspot_run and sec_since_boot() > 60:
os.system("service call wifi 37 i32 0 i32 1 &")
hotspot_run = True
# Offroad power monitoring
power_monitor.calculate(pandaState)
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
# # Check if we need to disable charging (handled by boardd)
# msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(pandaState, off_ts)
#
# # Check if we need to shut down
# if power_monitor.should_shutdown(pandaState, off_ts, started_seen):
# cloudlog.info(f"shutting device down, offroad since {off_ts}")
# # TODO: add function for blocking cloudlog instead of sleep
# time.sleep(10)
# HARDWARE.shutdown()
# If UI has crashed, set the brightness to reasonable non-zero value
manager_state = messaging.recv_one_or_none(managerState_sock)
if manager_state is not None:
ui_running = "ui" in (p.name for p in manager_state.managerState.processes if p.running)
if ui_running_prev and not ui_running:
HARDWARE.set_screen_brightness(20)
ui_running_prev = ui_running
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
location = messaging.recv_sock(location_sock)
cloudlog.event("STATUS_PACKET",
count=count,
pandaState=(strip_deprecated_keys(pandaState.to_dict()) if pandaState else None),
location=(strip_deprecated_keys(location.gpsLocationExternal.to_dict()) if location else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
thermald_thread()
if __name__ == "__main__":
main()
| [
""
] | |
0c3d842557c9376a3e85eb48319735d211b4170d | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/Naca/main.py | 5645aca52186859c629e2d833d00d1e431940170 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 432 | py | T = int(input());
data = [];
for i in range(T) :
data.append(int(input()));
for i in range(T) :
if (data[i] == 0) :
print("Case #" + str(i + 1) + ": INSOMNIA");
else :
digits = [];
sumN = data[i];
while (len(digits) < 10) :
tmp = sumN;
while (tmp > 0) :
if (tmp % 10 not in digits) :
digits.append(tmp % 10);
tmp //= 10;
sumN += data[i];
print("Case #" + str(i + 1) + ": " + str(sumN - data[i])); | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
4a8c1adfb8aa3073a222cc3288e94790b685ca45 | 15dffdb8fa0cf1782cbc8c18cd5d1c7c31aa07a8 | /learners/maml_learner.py | d176869af5d2c89efe41aaff525c0d684d93230e | [] | no_license | jinxu06/binary-pixelcnn | c5becd70ea440727dc6868ea27c236ca07c84215 | d2e0ae24b6da9482a042a6f2ef1acf9aa24e1c92 | refs/heads/master | 2020-03-22T12:53:50.769396 | 2018-07-30T02:00:59 | 2018-07-30T02:00:59 | 140,069,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,217 | py | import sys
import random
import numpy as np
import tensorflow as tf
from learners.learner import Learner
from blocks.optimizers import adam_updates
import matplotlib.pyplot as plt
plt.style.use("ggplot")
from blocks.plots import sort_x
# from blocks.plots import visualize_func
# from data.dataset import Dataset
class MAMLLearner(Learner):
def __init__(self, session, parallel_models, optimize_op, train_set=None, eval_set=None, variables=None, lr=0.001, device_type='gpu', save_dir="test"):
super().__init__(session, parallel_models, optimize_op, train_set, eval_set, variables)
self.lr = lr
self.save_dir = save_dir
grads = []
for i in range(self.nr_model):
grads.append(self.parallel_models[i].grads)
with tf.device('/' + device_type + ':0'):
for i in range(1, self.nr_model):
for j in range(len(grads[0])):
grads[0][j] += grads[i][j]
self.aggregated_grads = grads[0]
self.optimize_op = adam_updates(variables, self.aggregated_grads, lr=self.lr)
def set_session(self, sess):
self.session = sess
def get_session(self):
return self.session
def train(self, meta_batch, num_shots, test_shots):
assert meta_batch==self.nr_model, "nr_model != meta_batch"
tasks = self.train_set.sample(meta_batch)
feed_dict = {}
for i, task in enumerate(tasks):
num_shots = np.random.randint(low=5, high=30)
test_shots = np.random.randint(low=5, high=30)
#num_shots, test_shots = 20, 10
test_shots = 50
X_value, y_value = task.sample(num_shots+test_shots)
X_c_value, X_t_value = X_value[:num_shots], X_value[num_shots:]
y_c_value, y_t_value = y_value[:num_shots], y_value[num_shots:]
feed_dict.update({
self.parallel_models[i].X_c: X_c_value,
self.parallel_models[i].y_c: y_c_value,
self.parallel_models[i].X_t: X_value,
self.parallel_models[i].y_t: y_value,
self.parallel_models[i].is_training: True,
})
self.get_session().run(self.optimize_op, feed_dict=feed_dict)
def evaluate(self, eval_samples, num_shots, test_shots):
ls = []
for _ in range(eval_samples):
num_shots = np.random.randint(low=5, high=30)
test_shots = np.random.randint(low=5, high=30)
#num_shots, test_shots = 20, 10
test_shots = 50
X_value, y_value = self.eval_set.sample(1)[0].sample(num_shots+test_shots)
X_c_value, X_t_value = X_value[:num_shots], X_value[num_shots:]
y_c_value, y_t_value = y_value[:num_shots], y_value[num_shots:]
l = [m.compute_loss(self.get_session(), X_c_value, y_c_value, X_value, y_value, is_training=False) for m in self.parallel_models]
ls.append(l)
return np.mean(ls)
def test(self, num_function, num_shots, test_shots, epoch=1, input_range=(-2., 2.)):
fig = plt.figure(figsize=(10,10))
# a = int(np.sqrt(num_function))
for i in range(num_function):
# ax = fig.add_subplot(a,a,i+1)
ax = fig.add_subplot(4,3,i+1)
sampler = self.eval_set.sample(1)[0]
c = [1, 4, 8, 16, 32, 64]
num_shots = c[(i%6)]
X_value, y_value = sampler.sample(num_shots+test_shots)
X_c_value, X_t_value = X_value[:num_shots], X_value[num_shots:]
y_c_value, y_t_value = y_value[:num_shots], y_value[num_shots:]
m = self.parallel_models[0]
X_gt, y_gt = sampler.get_all_samples()
ax.plot(*sort_x(X_gt[:,0], y_gt), "-")
ax.scatter(X_c_value[:,0], y_c_value)
X_eval = np.linspace(self.eval_set.input_range[0], self.eval_set.input_range[1], num=100)[:,None]
# step 1
y_hat = m.predict(self.session, X_c_value, y_c_value, X_eval, step=1)
ax.plot(X_eval[:,0], y_hat, ":", color='gray', alpha=0.3)
# step 5
y_hat = m.predict(self.session, X_c_value, y_c_value, X_eval, step=5)
ax.plot(X_eval[:,0], y_hat, "--", color='gray', alpha=0.3)
# step 10
y_hat = m.predict(self.session, X_c_value, y_c_value, X_eval, step=10)
ax.plot(X_eval[:,0], y_hat, "-", color='gray', alpha=0.3)
fig.savefig("figs/maml-{0}-{1}.pdf".format(self.eval_set.dataset_name, epoch))
plt.close()
def run_eval(self, num_func, num_shots=1, test_shots=50):
m = self.parallel_models[0]
saver = tf.train.Saver(var_list=self.variables)
ckpt_file = self.save_dir + '/params.ckpt'
print('restoring parameters from', ckpt_file)
saver.restore(self.session, ckpt_file)
evals = []
for _ in range(num_func):
sampler = self.eval_set.sample(1)[0]
X_value, y_value = sampler.sample(num_shots+test_shots)
X_c_value, X_t_value = X_value[:num_shots], X_value[num_shots:]
y_c_value, y_t_value = y_value[:num_shots], y_value[num_shots:]
y_t_hat = m.predict(self.session, X_c_value, y_c_value, X_t_value, step=10)
evals.append(np.mean(np.power(y_t_value - y_t_hat, 2)))
eval = np.nanmean(evals)
print(".......... EVAL : num_func {0} num_shots {1} test_shots {2}............".format(num_func, num_shots, test_shots))
print("\t{0}".format(eval))
fig = plt.figure(figsize=(10,10))
for i in range(4):
# ax = fig.add_subplot(a,a,i+1)
ax = fig.add_subplot(4,1,i+1)
sampler = self.eval_set.sample(1)[0]
c = [5, 10, 15, 20]
num_shots = c[(i%4)]
test_shots = 0
X_value, y_value = sampler.sample(num_shots+test_shots)
X_c_value, X_t_value = X_value[:num_shots], X_value[num_shots:]
y_c_value, y_t_value = y_value[:num_shots], y_value[num_shots:]
m = self.parallel_models[0]
X_gt, y_gt = sampler.get_all_samples()
ax.plot(*sort_x(X_gt[:,0], y_gt), "-")
ax.scatter(X_c_value[:,0], y_c_value)
X_eval = np.linspace(self.eval_set.input_range[0], self.eval_set.input_range[1], num=100)[:,None]
# step 1
y_hat = m.predict(self.session, X_c_value, y_c_value, X_eval, step=1)
ax.plot(X_eval[:,0], y_hat, ":", color='gray', alpha=0.3)
# step 5
y_hat = m.predict(self.session, X_c_value, y_c_value, X_eval, step=5)
ax.plot(X_eval[:,0], y_hat, "--", color='gray', alpha=0.3)
# step 10
y_hat = m.predict(self.session, X_c_value, y_c_value, X_eval, step=10)
ax.plot(X_eval[:,0], y_hat, "-", color='gray', alpha=0.3)
fig.savefig("figs/maml-{0}-{1}.pdf".format(self.eval_set.dataset_name, "eval"))
plt.close()
def run(self, num_epoch, eval_interval, save_interval, eval_samples, meta_batch, num_shots, test_shots, load_params=False):
num_figures = 12
saver = tf.train.Saver(var_list=self.variables)
if load_params:
ckpt_file = self.save_dir + '/params.ckpt'
print('restoring parameters from', ckpt_file)
saver.restore(self.session, ckpt_file)
self.test(num_figures, num_shots, test_shots, epoch=0)
for epoch in range(1, num_epoch+1):
self.qclock()
for k in range(1000):
self.train(meta_batch, num_shots, test_shots)
train_time = self.qclock()
print("Epoch {0}: {1:0.3f}s ...................".format(epoch, train_time))
if epoch % eval_interval == 0:
v = self.evaluate(eval_samples, num_shots, test_shots)
print(" Eval Loss: ", v)
if epoch % save_interval == 0:
print("\tsave figure")
self.test(num_figures, num_shots, test_shots, epoch=epoch)
print("\tsave checkpoint")
saver.save(self.session, self.save_dir + '/params.ckpt')
sys.stdout.flush()
| [
"aaron.jin.xu@gmail.com"
] | aaron.jin.xu@gmail.com |
018d19b621c159f89c4517aa9df136159ebc55b2 | 997645d6bb9c404f2f195328f29afa0eaa3c55b4 | /profiling/run_profile.py | 9dc53e081ca3be9c68699ab3a3bd28528130a0b7 | [
"MIT"
] | permissive | piccolo-orm/piccolo | e43ea13c05c53ac00d9d20474c53ad2c49a40e80 | 83ea66323ef5a8e4010ea3ee19f34163bc881ace | refs/heads/master | 2023-08-08T19:31:37.783445 | 2023-07-28T06:19:35 | 2023-07-28T06:19:35 | 155,008,334 | 1,139 | 90 | MIT | 2023-09-08T16:54:45 | 2018-10-27T20:53:26 | Python | UTF-8 | Python | false | false | 829 | py | import asyncio
from viztracer import VizTracer
from piccolo.columns.column_types import Varchar
from piccolo.engine.postgres import PostgresEngine
from piccolo.table import Table
DB = PostgresEngine(config={"database": "piccolo_profile"})
class Band(Table, db=DB):
name = Varchar()
async def setup():
await Band.alter().drop_table(if_exists=True)
await Band.create_table(if_not_exists=True)
await Band.insert(*[Band(name="test") for _ in range(1000)])
class Trace:
def __enter__(self):
self.tracer = VizTracer(log_async=True)
self.tracer.start()
def __exit__(self, *args):
self.tracer.stop()
self.tracer.save()
async def run_queries():
await setup()
with Trace():
await Band.select()
if __name__ == "__main__":
asyncio.run(run_queries())
| [
"noreply@github.com"
] | noreply@github.com |
7f06aa3882b4fc1e0e5f3f8bc66e51bcb16b8038 | 5730e8d500a65992bb21094ffed26e21ccc7c0fd | /augment_dnase_pipeline_outputs/metadata/aggregate_ataqc.py | 2963e434753969d97146b33d89c1eb86a8843a62 | [] | no_license | kundajelab/atlas_resources | 35f1df4c09356d7256a6667700b88020396d5642 | 89bcde11921526b9956be48bf367617db4974d31 | refs/heads/master | 2021-10-25T07:01:30.127405 | 2021-10-25T00:55:25 | 2021-10-25T00:55:25 | 160,546,622 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,942 | py | import argparse
import collections
import json
import pdb
def parse_args():
parser=argparse.ArgumentParser(description="aggregate ataqc metrics for all samples in a single report")
parser.add_argument("--ataqc_files",default="/oak/stanford/groups/akundaje/projects/atlas/dnase_processed/aggregate_outputs/qc.json.txt")
parser.add_argument("--outf",default="atlas.metadata.report.txt")
parser.add_argument("--mitra_prefix",default="http://mitra.stanford.edu/kundaje/projects/atlas/")
parser.add_argument("--prefix_to_drop_for_oak",default="/oak/stanford/groups/akundaje/projects/atlas/")
parser.add_argument("--hash_to_id",default="/oak/stanford/groups/akundaje/projects/atlas/dnase_processed/processed_all.txt")
parser.add_argument("--fname_hash_index",type=int,default=9)
return parser.parse_args()
def flatten(d, parent_key='', sep='.'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def iterate_json(data,val_dict,all_keys,cur_id):
flat_data=flatten(data)
for key in flat_data:
if key not in all_keys:
all_keys.add(key)
val_dict[cur_id][key]=flat_data[key]
return val_dict,all_keys
def main():
args=parse_args()
ataqc_files=open(args.ataqc_files,'r').read().strip().split('\n')
val_dict=dict()
all_keys=set([])
outf=open(args.outf,'w')
hash_to_id=open(args.hash_to_id,'r').read().strip().split('\n')
hash_to_id_dict=dict()
for line in hash_to_id:
tokens=line.split('\t')
cur_hash=tokens[0]
cur_id=tokens[1]
hash_to_id_dict[cur_hash]=cur_id
for fname in ataqc_files:
with open(fname,'r') as cur_f:
data=json.load(cur_f)
#get the report title
report_title=fname.replace(args.prefix_to_drop_for_oak,args.mitra_prefix).replace(".json",".html")
#get the hash
cur_hash=fname.split('/')[args.fname_hash_index]
cur_id=hash_to_id_dict[cur_hash]
print(cur_id+" : "+report_title)
val_dict[cur_id]=dict()
val_dict[cur_id]['path']=report_title
all_keys.add('path')
#iterate through the json file recursively
val_dict,all_keys=iterate_json(data,val_dict,all_keys,cur_id)
outf.write('Dataset')
all_keys=list(all_keys)
for key in all_keys:
outf.write('\t'+key)
outf.write('\n')
for dataset in val_dict:
outf.write(dataset)
for key in all_keys:
if key in val_dict[dataset]:
outf.write('\t'+str(val_dict[dataset][key]))
else:
outf.write('\tNA')
outf.write('\n')
outf.close()
if __name__=="__main__":
main()
| [
"annashcherbina@gmail.com"
] | annashcherbina@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.