blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13fc2d742161aea7d1a51f351cac30e21bcd181e
|
172eb751b879d36d95b04d81db87a501cd18d8a1
|
/ImageNetGroundTruth/utils.py
|
ade17f70a1d804e602c97056b666102575e5f3e0
|
[] |
no_license
|
byh1321/Pruning_Quantization_Estimation
|
447bd3d806fe17611d665e56d7796af4e05ee400
|
772969c5a58259e387c88829dd936274199212e8
|
refs/heads/master
| 2023-05-03T19:25:29.957732
| 2021-06-03T17:53:38
| 2021-06-03T17:53:38
| 289,804,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,298
|
py
|
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import scipy.misc
from scipy import ndimage
import numpy as np
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 40.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
##########################################################################
# Codes under this line is written by YH.Byun
def print_4Dtensor_to_png(tensor, filename):
npimg = np.array(tensor,dtype=float)
npimg = npimg.squeeze(0)
scipy.misc.toimage(npimg).save(filename+".png")
def genblurkernel(sigma):
order = 0
radius = int(4 * float(sigma) + 0.5)
kernel = scipy.ndimage.filters._gaussian_kernel1d(sigma, order, radius)
return kernel
def setMask(net, area, val):
mask = maskGen(net)
for i in range(len(mask)):
num_filter = mask[i].size()[0]
depth = mask[i].size()[1]
if len(mask[i].size()) == 2:
if i == (len(mask)-1):
mask[i][:,0:int(depth*area)] = val
#print(mask[i].size())
#print('0, ',depth*area)
else:
mask[i][0:int(num_filter*area),0:int(depth*area)] = val
#print(mask[i].size())
#print(num_filter*area,',',depth*area)
elif len(mask[i].size()) == 4:
if i == 0:
mask[i][0:int(num_filter*area),:,:,:] = val
#print(mask[i].size())
#print(num_filter*area,',0,0,0')
else:
mask[i][0:int(num_filter*area),0:int(depth*area),:,:] = val
#print(mask[i].size())
#print(num_filter*area,',',depth*area,',0,0')
return mask
def saveInitialParameter(net, initparam):
net_param = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
net_param.append(m.weight.data)
elif isinstance(m, nn.Linear):
net_param.append(m.weight.data)
torch.save(net_param, initparam)
print("saving initial parameters")
def quantize(net, pprec):
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.round(m.weight.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.weight.data = torch.clamp(m.weight.data, -2, 2 - 2**(-pprec))
elif isinstance(m, nn.Linear):
m.weight.data = torch.round(m.weight.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.weight.data = torch.clamp(m.weight.data, -2, 2 - 2**(-pprec))
return net
def printLayers(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
print(m)
elif isinstance(m, nn.Linear):
print(m)
def maskGen(net, isbias=0, isempty = 1):
mask = []
if isempty:
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append(torch.zeros(m.weight.data.size()))
if isbias == 1:
mask.append(torch.zeros(m.bias.data.size()))
#print(torch.zeros(m.weight.data.size()).size())
elif isinstance(m, nn.Linear):
mask.append(torch.zeros(m.weight.data.size()))
if isbias == 1:
mask.append(torch.zeros(m.bias.data.size()))
#print(torch.zeros(m.weight.data.size()).size())
else:
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append(torch.ones(m.weight.data.size()))
if isbias == 1:
mask.append(torch.ones(m.bias.data.size()))
#print(torch.ones(m.weight.data.size()).size())
elif isinstance(m, nn.Linear):
mask.append(torch.ones(m.weight.data.size()))
if isbias == 1:
mask.append(torch.zeros(m.bias.data.size()))
#print(torch.ones(m.weight.data.size()).size())
return mask
def pruneNetwork(net, mask):
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.grad.data = torch.mul(m.weight.grad.data,mask[index].cuda())
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.grad.data = torch.mul(m.weight.grad.data,mask[index].cuda())
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
return net
def paramsGet(net):
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
if index == 0:
params = m.weight.view(-1,)
index += 1
else:
params = torch.cat((params,m.weight.view(-1,)),0)
index += 1
elif isinstance(m, nn.Linear):
params = torch.cat((params,m.weight.view(-1,)),0)
index += 1
return params
def findThreshold(params, pr):
thres=0
while 1:
tmp = (torch.abs(params.data)<thres).type(torch.FloatTensor)
result = torch.sum(tmp)/params.size()[0]
if (pr/100)<result:
#print("threshold : {}".format(thres))
return thres
else:
thres += 0.0001
#def findThreshold(params, pr):
# params_sorted, indice = torch.sort(params)
# index = int(pr * params.size()[0] / 100)
# print(params_sorted[13228760])
# print(params.size())
# print(index)
# return params_sorted[index].item()
def getPruningMask(net, thres):
index = 0
mask = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append((torch.abs(m.weight.data)>thres).type(torch.FloatTensor))
index += 1
elif isinstance(m, nn.Linear):
mask.append((torch.abs(m.weight.data)>thres).type(torch.FloatTensor))
index += 1
return mask
def netMaskMul(net, mask, isbias=0, isbatch=0):
index = 0
if isbatch:
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
m.bias.data = torch.mul(m.bias.data,mask[index].cuda())
index += 1
else:
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.mul(m.bias.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.mul(m.bias.data,mask[index].cuda())
index += 1
return net
def addNetwork(net, net2, isbias=0):
index = 0
mask = saveNetwork(net2, isbias)
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
return net
def netMaskAdd(net, mask, isbias=0, isbatch=0):
index = 0
if isbatch:
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
else:
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
return net
def saveNetwork(net, isbias=0):
mask = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append(m.weight.data)
if isbias:
mask.append(m.bias.data)
elif isinstance(m, nn.Linear):
mask.append(m.weight.data)
if isbias:
mask.append(m.bias.data)
return mask
def saveBatch(net, isempty=0):
mask = []
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
if isempty:
mask.append(torch.zeros(m.weight.size()))
mask.append(torch.zeros(m.bias.size()))
else:
mask.append(m.weight.data)
mask.append(m.bias.data)
return mask
def printLayerName(net):
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
print(index, " : Conv2d layer, ", m.weight.size())
index += 1
elif isinstance(m, nn.Linear):
print(index, " : FC layer, ", m.weight.size())
index += 1
elif isinstance(m, nn.BatchNorm2d):
print(index, " : BatchNorm2d layer, ", m.weight.size())
index += 1
return net
def freezeNetwork(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
for param in m.parameters():
param.requires_grad = False
elif isinstance(m, nn.Linear):
for param in m.parameters():
param.requires_grad = False
elif isinstance(m, nn.BatchNorm2d):
for param in m.parameters():
param.requires_grad = False
return net
def absorb_bn(module, bn_module):
w = module.weight.data
if module.bias is None:
zeros = torch.Tensor(module.out_channels).zero_().type(w.type())
module.bias = nn.Parameter(zeros)
b = module.bias.data
invstd = bn_module.running_var.clone().add_(bn_module.eps).pow_(-0.5)
w.mul_(invstd.view(w.size(0), 1, 1, 1).expand_as(w))
b.add_(-bn_module.running_mean).mul_(invstd)
if bn_module.affine:
w.mul_(bn_module.weight.data.view(w.size(0), 1, 1, 1).expand_as(w))
b.mul_(bn_module.weight.data).add_(bn_module.bias.data)
bn_module.register_buffer('running_mean', torch.zeros(module.out_channels).cuda())
bn_module.register_buffer('running_var', torch.ones(module.out_channels).cuda())
bn_module.register_parameter('weight', None)
bn_module.register_parameter('bias', None)
bn_module.affine = False
def is_bn(m):
return isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)
def is_absorbing(m):
return isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear)
def search_absorbe_bn(model):
prev = None
for m in model.children():
if is_bn(m) and is_absorbing(prev):
m.absorbed = True
absorb_bn(prev, m)
search_absorbe_bn(m)
prev = m
#swap bias in net with bias in net2
def swapBias(net, net2):
mask_bias = saveBias(net2)
mask_bias_null = saveBias(net2, isempty=1)
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
return net
def saveBias(net, isempty=0):
mask = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
if isempty:
mask.append(torch.zeros(m.bias.data.size()))
else:
mask.append(m.bias.data)
elif isinstance(m, nn.Linear):
if isempty:
mask.append(torch.zeros(m.bias.data.size()))
else:
mask.append(m.bias.data)
return mask
def concatMask(mask1, mask2):
index = 0
for i in range(len(mask1)):
mask1[index] = ((mask1[index] + mask2[index]) != 0).type(torch.FloatTensor)
index += 1
return mask1
def getExtendedMask(mask):
index = torch.FloatTensor()
for i in range(len(mask)):
if mask[i].dim() == 4:
mask_size = mask[i].size()[0] * mask[i].size()[1] * mask[i].size()[2] * mask[i].size()[3]
if mask[i].size()[2] == 1:
if mask[i].size()[1] % 3 == 1:
index_for_print = torch.zeros(mask[i].size()[0], mask[i].size()[1]+2,1,1)
index_for_print[:,:-2,:,:] = mask[i].data
elif mask[i].size()[1] % 3 == 2:
index_for_print = torch.zeros(mask[i].size()[0], mask[i].size()[1]+1,1,1)
index_for_print[:,:-1,:,:] = mask[i].data
else:
index_for_print = mask[i].data
index_for_print = index_for_print.view(-1,3)
index_for_print = (torch.sum(index_for_print, dim=1) != 0).type(torch.FloatTensor)
index = torch.cat((index, index_for_print),0)
else:
index_for_print = mask[i].data
index_for_print = index_for_print.view(-1,3)
index_for_print = (torch.sum(index_for_print, dim=1) != 0).type(torch.FloatTensor)
index = torch.cat((index, index_for_print),0)
else:
mask_size = mask[i].size()[0] * mask[i].size()[1]
index_for_print = torch.zeros(mask[i].size()[0], mask[i].size()[1] + 1)
index_for_print[:,:-1] = mask[i].data
index_for_print = index_for_print.view(-1,3)
index_for_print = (torch.sum(index_for_print, dim=1) != 0).type(torch.FloatTensor)
index = torch.cat((index, index_for_print),0)
return index
def quantBatch(net, intbit, pprec):
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.running_var.data = torch.round(m.running_var.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.running_var.data = torch.clamp(m.running_var.data, max=1, min=2**(-intbit))
m.weight.data = torch.round(m.weight.data / (2 ** -(15))) * (2 ** -(15))
m.weight.data = torch.clamp(m.weight.data,-(2) ** intbit, 2 ** intbit)
m.bias.data = torch.round(m.bias.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.bias.data = torch.clamp(m.bias.data,-(2) ** intbit, 2 ** intbit)
m.running_mean.data = torch.round(m.running_mean.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.running_mean.data = torch.clamp(m.running_mean.data,-(2) ** intbit, 2 ** intbit)
return net
def swapBiasandBatch(net, net2):
mask_bias = saveBias(net2, isbatch=1)
mask_bias_null = saveBias(net2, isempty=1, isbatch=1)
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
elif isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.mul(m.weight.data,mask_weight_null[index].cuda())
m.weight.data = torch.add(m.weight.data,mask_weight[index].cuda())
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
m.running_mean.data = torch.mul(m.running_mean.data,mask_running_mean_null[index].cuda())
m.running_mean.data = torch.add(m.running_mean.data,mask_running_mean[index].cuda())
m.running_var.data = torch.mul(m.running_var.data,mask_running_var_null[index].cuda())
m.running_var.data = torch.add(m.running_var.data,mask_running_var[index].cuda())
return net
def swapBatch(net, net2):
mask_batch = saveBatch(net2)
mask_batch_null = saveBatch(net2, isempty=1)
index = 0
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.mul(m.weight.data,mask_batch_null[index].cuda())
m.weight.data = torch.add(m.weight.data,mask_batch[index].cuda())
index += 1
m.bias.data = torch.mul(m.bias.data,mask_batch_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_batch[index].cuda())
index += 1
m.running_mean.data = torch.mul(m.running_mean.data,mask_batch_null[index].cuda())
m.running_mean.data = torch.add(m.running_mean.data,mask_batch[index].cuda())
index += 1
m.running_var.data = torch.mul(m.running_var.data,mask_batch_null[index].cuda())
m.running_var.data = torch.add(m.running_var.data,mask_batch[index].cuda())
index += 1
return net
def saveBatch(net, isempty=0):
mask = []
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
if isempty:
mask.append(torch.zeros(m.weight.data.size()))
mask.append(torch.zeros(m.bias.data.size()))
mask.append(torch.zeros(m.running_mean.data.size()))
mask.append(torch.zeros(m.running_var.data.size()))
else:
mask.append(m.weight.data)
mask.append(m.bias.data)
mask.append(m.running_mean.data)
mask.append(m.running_var.data)
return mask
def printFeature(feature, filename):
f = open(filename, 'w')
for i in range(feature.data.size()[1]):
for j in range(feature.data.size()[2]):
for k in range(feature.data.size()[3]):
print(feature.data[0,i,j,k].item(), file=f, end=',')
print('',file=f)
print('',file=f)
f.close()
return
def printconv1_0(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
print(m.weight[0])
try:
print(m.bias[0])
except:
print("There is no bias")
pass
return
def printbatch1(net):
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
print(m.weight)
print(m.bias)
print(m.running_mean)
print(m.running_var)
return
def printlinear1_0(net):
for m in net.modules():
if isinstance(m, nn.Linear):
print(m.weight[0])
try:
print(m.bias[0])
except:
print("There is no bias")
pass
return
def float_to_hex(float_):
temp = float_ * 2**7 # Scale the number up.
temp = torch.round(temp) # Turn it into an integer.
temp = int(temp)
temp = temp & 0xff
return '{:02x}'.format(temp)
def float_to_hex_16(float_):
temp = float_ * 2**8 # Scale the number up.
temp = torch.round(temp) # Turn it into an integer.
temp = int(temp)
temp = temp & 0xffff
return '{:04x}'.format(temp)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
from math import cos, pi
def adjust_learning_rate(optimizer, epoch, iteration, num_iter, ne, init_lr):
lr = optimizer.param_groups[0]['lr']
warmup_epoch = 5
warmup_iter = warmup_epoch * num_iter
current_iter = iteration + epoch * num_iter
max_iter = ne * num_iter
lr = init_lr * (1 + cos(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))) / 2
if epoch < warmup_epoch:
lr = init_lr * current_iter / warmup_iter
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
[
"byh1321@naver.com"
] |
byh1321@naver.com
|
df82e709433df0b153edd7d9aea14060851ad2cf
|
c31c8095ce4d4e9686e3e7ad6b004342e49671fa
|
/forum/classes/archives/CLASS_Lieu.py
|
c5b8db114583e2f045264fd8b45f2735706e116e
|
[] |
no_license
|
Lionalisk/arrakambre
|
7bcc96dea2ca2a471572bfb1646256f1382ce25b
|
2caece9be5eebf21ddfa87a6c821c32b5d5019a2
|
refs/heads/master
| 2020-12-07T19:31:24.471090
| 2020-01-09T10:14:29
| 2020-01-09T10:14:29
| 232,782,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
from django.db import models
from forum.models import Maison
from forum.classes.CLASS_Perso import *
print('BBBB')
class Lieu(models.Model):
nom = models.CharField(max_length=100, unique=True)
description = models.TextField(default='')
image = models.CharField(max_length=40, default = 'lieu_none.jpg')
maison = models.ForeignKey(Maison, verbose_name="Maison", null=True, on_delete=models.SET_NULL, blank=True)
passages = models.ManyToManyField('self', blank=True)
lieu_parent = models.ForeignKey('self', verbose_name="Lieu", null=True, on_delete=models.DO_NOTHING, blank=True)
dissimulation = models.SmallIntegerField(default=0)
defense_garde = models.SmallIntegerField(default=0)
defense_assault = models.SmallIntegerField(default=0)
defense_intrusion = models.SmallIntegerField(default=0)
perso_autorise = models.ManyToManyField('Perso', blank=True, related_name = 'persos_autorises') # liste des personnes autorisees par le maitre des lieux a entrer
secret = models.BooleanField(default=False)
proprietaire = models.ForeignKey('Perso', null=True, on_delete=models.SET_NULL, blank=True, related_name = 'proprietaire')
#action =
def __str__(self):
return self.nom
|
[
"lionel.varaire@free.fr"
] |
lionel.varaire@free.fr
|
c04720b7f2c90ddef000767741021aff00156ee6
|
f05a08881b606d593bb76fa725d62187fb8e6cc0
|
/cache_ensembl/cache_ensembl_version.py
|
ddb8c6109f3c0db85deb10e5082eaa4b9b65cad7
|
[] |
no_license
|
bunbun/cache-ensembl
|
6cf109dd0a9f6dad15744d4583ab701f7bda5a35
|
02ce50016321fecb5f9f784c63ce4f8e5066d74b
|
refs/heads/master
| 2021-01-23T13:58:36.493124
| 2011-12-06T21:45:04
| 2011-12-06T21:45:04
| 32,793,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
#!/usr/bin/env python
################################################################################
#
# version.py
#
#
# Copyright (c) 11/3/2010 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
__version__ = "1.0"
|
[
"bunbun68@localhost"
] |
bunbun68@localhost
|
ecbc2f6361f9a3096a212d2d31bb8b2868fa553e
|
bc29abf638643339025f2f9eebaec136f45deba6
|
/CMDB/Equipment/views.py
|
a4552ecb9baec83287f52cb574a3b596dfaf0be1
|
[] |
no_license
|
enet01/CMDB
|
351642106996681064f8b40e3e699664d678d38c
|
e0cab4c38c28c5d92f5658cfe132167d5b64afdf
|
refs/heads/master
| 2021-05-08T12:04:20.605748
| 2018-02-07T12:24:51
| 2018-02-07T12:24:51
| 119,920,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,741
|
py
|
#coding:utf-8
import chardet
import paramiko
from django.shortcuts import render
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_protect,csrf_exempt
from models import *
from WhileCMDB.views import getpage
from django.shortcuts import HttpResponseRedirect
def eq_add(request):
pass
def eq_drop(request):
pass
def eq_alter(request):
pass
def eq_list(request):
if request.method == "GET":
requestData = request.GET
page = requestData.get("page")
num = requestData.get("num")
sql = "select * from Equipment_equipment"
if page and num:
result = getpage(sql = sql,page = page,num = num)
elif page :
result = getpage(sql=sql, page=page)
else:
result = {
"page_data": "",
"page_range": ""
}
else:
result = {
"page_data": "",
"page_range": ""
}
return JsonResponse(result)
def eq_list_page(request):
eq_List = Equipment.objects.all()
return render(request,"equipmentList.html",locals())
def eq_connect(request):
"""
connect 方法实现 远程登录
connect 方法实现 脚本上传
connect 方法实现 脚本远程执行
:param request:
:return:
"""
result = {"state":"error","data":""}
if request.method == "POST":
data = request.POST
ip = data.get("ip")
port = data.get("port")
username = data.get("username")
password = data.get("password")
if ip and port and username and password:
equpment = Equipment()
equpment.IP = ip
equpment.user = username
equpment.Password = password
try:
trans = paramiko.Transport(ip,port)
trans.connect(username = username,password = password)
sftp = paramiko.SFTPClient.from_transport(trans) #用于文件的上传和下载的sftp服务
ssh = paramiko.SSHClient() #远程执行命令的服务
ssh._transport = trans
#创建目录
stdin,stdout,stderr = ssh.exec_command("mkdir CMDBClient")
#上传文件
sftp.put("sftpDir/getData.py","/root/CMDBClient/getData.py")
sftp.put("sftpDir/sendData.py", "/root/CMDBClient/sendData.py")
sftp.put("sftpDir/main.py", "/root/CMDBClient/main.py")
#调用脚本
stdin, stdout, stderr = ssh.exec_command("python /root/CMDBClient/main.py")
trans.close()
equpment.Statue = "True"
except:
equpment.Statue = "False"
finally:
equpment.save()
else:
pass
else:
pass
return JsonResponse(result)
@csrf_exempt
def eq_save(request):
ip = request.META["REMOTE_ADDR"]
if request.method == "POST":
data = request.POST
hostname = data.get("get_hostname")
system = data.get("get_system")
mac = data.get("get_mac")
equpment = Equipment.objects.get(IP = ip)
equpment.hostname = hostname
equpment.System = system
equpment.Mac = mac
equpment.save()
return JsonResponse({"state":"this only a test"})
terminal_dict = {}
def shell(request):
if request.method == "GET":
id = request.GET["id"]
if id:
equipment = Equipment.objects.get(id = int(id))
ip = equipment.IP
username = equipment.user
password = equipment.Password
if ip and username and password:
try:
result = {"status":"success","ip":ip,}
trans = paramiko.Transport(sock = (ip,22))
trans.connect(
username = username,
password = password
)
ssh = paramiko.SSHClient()
ssh._transport = trans
terminal = ssh.invoke_shell()
terminal.settimeout(2)
terminal.send("\n")
login_data = ""
while True:
try:
recv = terminal.recv(9999)
if recv:
login_data += recv
else:
continue
except:
break
result["data"] = login_data.replace("\r\n","<br>")
terminal_dict[ip] = terminal
response = render(request, "shell.html", locals())
response.set_cookie("ip",ip)
return response
except Exception as e:
print(e)
return HttpResponseRedirect("/eq/")
def command(request):
ip = request.COOKIES.get("ip")
if ip:
if request.method == "GET":
cmd = request.GET.get("command")
if cmd:
terminal = terminal_dict[ip]
terminal.send(cmd+"\n")
login_data = ""
while True:
try:
recv = terminal.recv(9999)
if recv:
line_list = recv.split("\r\n")
result_list= []
for line in line_list:
l = line.replace(u"\u001B","").replace("[01;34m","").replace("[0m","").replace("[01;32m","")
result_list.append(l)
login_data = "<br>".join(result_list)
else:
continue
except:
break
result = {"result":login_data}
return JsonResponse(result)
else:
return HttpResponseRedirect("/eq/")
else:
return HttpResponseRedirect("/eq/")
else:
return HttpResponseRedirect("/eq/")
# import random
# def add_eq(request):
# for i in range(100):
# e = Equipment()
# e.hostname = "localhost_%s"%i
# e.IP = "192.168.1.%s"%(i+2)
# e.System = random.choice(["win7_32","win7_64","centos.6_32","centos.7",])
# e.Statue = random.choice(["True","False"])
# e.Mac = random.choice(["00:0c:29:92:85:4e","00:0c:29:5b:2a:a1"])
# e.user = "root"
# e.Password = "123"
# e.save()
# return JsonResponse({"statue":"ahh"})
# Create your views here.
|
[
"root@xuegod62.cn"
] |
root@xuegod62.cn
|
6900fdaae92eb7e538bb2dc5b81957fb00c5b18e
|
b7449f1162b5fb8ea371b80ef0d99154fac35620
|
/Users/migrations/0001_initial.py
|
bf5f8dbe0ee72f9f6b0b3fab5414812eb9576641
|
[] |
no_license
|
shimaa3434/SafeBook
|
93f69e5228adeae33adfb5a21d2c666b47d1a2b6
|
8ede2f9da4f6daf224fe203454525ff3d811ed51
|
refs/heads/master
| 2022-12-27T02:01:14.987227
| 2020-10-16T18:12:49
| 2020-10-16T18:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
# Generated by Django 2.2.5 on 2019-10-23 00:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='owner', to=settings.AUTH_USER_MODEL)),
('users', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"30625967+Shreyansh7499@users.noreply.github.com"
] |
30625967+Shreyansh7499@users.noreply.github.com
|
718c1a3aa265318be8f270943122a2fef285e6e9
|
59d48214613a195573b5a0a1f10b32c889172155
|
/alexa/reciPullLambda/ask_sdk_model/canfulfill/can_fulfill_intent_request.py
|
61ffc9fb00f47a05ab691639b45bca434c75fe2e
|
[
"MIT"
] |
permissive
|
ReciPull/recipull.github.io
|
60861ebb7a6d77d39907c6332e346194ce4ad107
|
e6b800af02658bb7948297c4ddc1b7af6d978839
|
refs/heads/master
| 2023-01-08T19:03:11.864298
| 2019-06-13T05:07:39
| 2019-06-13T05:07:39
| 180,684,629
| 1
| 0
|
MIT
| 2022-12-09T22:33:18
| 2019-04-11T00:33:03
|
Python
|
UTF-8
|
Python
| false
| false
| 6,414
|
py
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.dialog_state import DialogState
from ask_sdk_model.intent import Intent
class CanFulfillIntentRequest(Request):
"""
An object that represents a request made to skill to query whether the skill can understand and fulfill the intent request with detected slots, before actually asking the skill to take action. Skill should be aware this is not to actually take action, skill should handle this request without causing side-effect, skill should not modify some state outside its scope or has an observable interaction with its calling functions or the outside world besides returning a value, such as playing sound,turning on/off lights, committing a transaction or a charge.
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param dialog_state:
:type dialog_state: (optional) ask_sdk_model.dialog_state.DialogState
:param intent:
:type intent: (optional) ask_sdk_model.intent.Intent
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'dialog_state': 'ask_sdk_model.dialog_state.DialogState',
'intent': 'ask_sdk_model.intent.Intent'
} # type: Dict
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'dialog_state': 'dialogState',
'intent': 'intent'
} # type: Dict
def __init__(self, request_id=None, timestamp=None, locale=None, dialog_state=None, intent=None):
# type: (Optional[str], Optional[datetime], Optional[str], Optional[DialogState], Optional[Intent]) -> None
"""An object that represents a request made to skill to query whether the skill can understand and fulfill the intent request with detected slots, before actually asking the skill to take action. Skill should be aware this is not to actually take action, skill should handle this request without causing side-effect, skill should not modify some state outside its scope or has an observable interaction with its calling functions or the outside world besides returning a value, such as playing sound,turning on/off lights, committing a transaction or a charge.
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param dialog_state:
:type dialog_state: (optional) ask_sdk_model.dialog_state.DialogState
:param intent:
:type intent: (optional) ask_sdk_model.intent.Intent
"""
self.__discriminator_value = "CanFulfillIntentRequest" # type: str
self.object_type = self.__discriminator_value
super(CanFulfillIntentRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.dialog_state = dialog_state
self.intent = intent
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, CanFulfillIntentRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
[
"alexiscole@umail.ucsb.edu"
] |
alexiscole@umail.ucsb.edu
|
0179e7a8a04e4b16368086eadecffb7dd7768d15
|
d51010a7f51a9cb8bf307f7d6ebed8a9903cd7be
|
/backend/base/urls/product_urls.py
|
6bf8f12f47dc186906b94797e0489eb0facebea2
|
[] |
no_license
|
seidiv/ecommerce
|
d435fed53187316baf944f54632e7579372ea075
|
b5c7de1f635ec2f12213dbbe6367f890465f2f7b
|
refs/heads/master
| 2023-07-13T19:30:14.831156
| 2021-08-24T06:25:01
| 2021-08-24T06:25:01
| 392,608,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
from django.urls import path
from base.views import product_views as views
urlpatterns = [
path('', views.getProducts, name="products"),
path('create/', views.createProduct, name="product-create"),
path('upload/', views.uploadImage, name="image-upload"),
path('<str:pk>/reviews/', views.createProductReview, name="create-review"),
path('top/', views.getTopProducts, name='top-products'),
path('<str:pk>/', views.getProduct, name="product"),
path('update/<str:pk>/', views.updateProduct, name="product-update"),
path('delete/<str:pk>/', views.deleteProduct, name="product-delete"),
]
|
[
"sajadeydi8@gmail.com"
] |
sajadeydi8@gmail.com
|
855119d2ca75bde3daef04448842f58070657e77
|
63e1c4a67d5317d945b284877b57560ab2ee0a1a
|
/TextGame_main.py
|
2ae69326e45d0de6a04d60087357a93359d63d4a
|
[] |
no_license
|
BugBiteSquared/pythonTextAdventure
|
5cca0c60e47858da1d901ca11fb828bf34869ad3
|
6b07f55b1076946bb4296502b7dcd30d8a5d7e90
|
refs/heads/master
| 2021-05-03T10:48:26.911437
| 2018-07-31T17:14:44
| 2018-07-31T17:14:44
| 69,376,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,496
|
py
|
"""This is a work in progress. The game currently doesn't run."""
frontDoorOpen = False
balconyDoorOpen = False
class item(Object):
name = None
description = None
class consumable(item):
class roomState(Object):
"""There are 4 room states: kitchen, bedroom, living room, balcony"""
class livingRoomState(roomState):
"""Every room is connected to the living room. None of the other rooms are directly connected to each other."""
def enter(self):
balconyDoorState = 'open' if balconyDoorOpen else 'closed'
frontDoorState = 'open' if frontDoorOpen else 'closed'
print("You're in the living room. There's a couch and a TV. The kitchen area is behind you. The bedroom door is lone gone and it's doorway is clear.")
print("The door to the balcony is" + balconyDoorState "The door to the apartment is" + frontDoorState +".")
def exit(self):
print("")
class inventoryItem(Object):
itemReference = None
itemCopyCount = None
def __init__(self):
self.itemCopyCount = 0
class inventory(Object):
items = None # items contains a dictionary with string keys and dictionary values
maxNumItems = None
numItemsHeld = None
def __init__():
self.items = {}
class playerInventory(inventory):
def __init__(self):
super(playerInventory, self).__init__(self)
self.maxNumItems = 8
self.numItemsHeld = 0
def insert(self, itemToInsert):
if(len(numItemsHeld < maxNumItems))
if(itemToInsert.name not in items):
items[itemToInsert.name] = inventoryItem()
items[itemToInset.name].itemReference = itemToInsert
items[itemToInsert.name].itemCopyCount += 1
numItemsHeld += 1
else:
print("Inventory's full, sorry.")
def remove(self, nameOfItemToDelete):
if(nameOfItemToDelete in items):
if(items[nameOfItemToDelete].itemCopyCount < 2):
del items[nameOfItemToDelete]
else:
items[nameOfItemToDelete].itemCopyCount -=1
numItemsHeld -= 1
else:
print("Yeah, you don't have one of those in your inventory.")
def getItemFromInv(self, nameOfItem):
return items[nameOfItem].itemReference
def checkInventory(self):
print("This is what you have on you:")
for item in self.items:
print("Number of" + item + " : " + item.itemCopyCount)
class gameActor(Object):
health = None
#methods: move,minusHealth,plusHealth
class npc(gameActor):
alignment = None #alignment = False -> enemy, True -> friend, None -> neutral
attackDmg = None
class evilRobot(npc):
alignment = False
attackDmg = 1
def __init__(self):
self.health = 10
def minusHealth(self, healthUnits):
self.health -= healthUnits
def plusHealth(self, healthUnits):
self.health += healthUnits
def attack(self, gameActorToAttack):
gameActorToAttack.minusHealth(self.attackDmg)
class player(gameActor):
equippedItemName = None
playerHealth = None
inventory = None
locationState = None
def __init__(self):
self.playerHealth = 100
self.inventory = playerInventory()
self.locationState = bedroomState()
actionVocab = {
#parse returns a parse tree which the execute function can use
def parse(inputString):
if
def getInput():
lineRead = input('>>').split(" ")
parsedActions = parse(lineRead)
execute(parsedActions)
if __name__ == '__main__':
playerOne = player()
while(True):
getInput()
|
[
"blackboxhazard@gmail.com"
] |
blackboxhazard@gmail.com
|
da6fa81c852b746e1fded343f4e04a7e146e335e
|
39b8aa964883b2bde4349e0c9c38e3233c310548
|
/src/Power of Four.py
|
96d2db9a48b59d6376e2dbcb8be1027d9d34085f
|
[] |
no_license
|
orifake/leetcode-python
|
053b82491e0b8d6197dd12d92eec5883211285db
|
8e375ebebe0a0285efefc33ed61afb22f41d0c75
|
refs/heads/master
| 2023-03-09T14:32:17.833456
| 2021-02-26T16:09:31
| 2021-02-26T16:09:31
| 264,466,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
import math
class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
return num > 0 and (num & (num - 1)) == 0 and \
((num & 0b01010101010101010101010101010101) == num)
class Solution2:
def isPowerOfFour(self, num: int) -> bool:
if num <= 0:
return False
return (math.log10(num) / math.log10(4)) % 1 == 0
t = Solution()
print(t.isPowerOfFour(4))
|
[
"349758699@qq.com"
] |
349758699@qq.com
|
07770f3574d74405c9660790d89873ae61cebd92
|
b2e2277208f22fdd1654e7a2a19d49a0bdcb0ef6
|
/twitterstream3.py
|
0e5a9245d51619a2176e62ef1002a82e392e7b3c
|
[] |
no_license
|
13537875570/General-Urban-Evaluation
|
504d3fa3c32f69940c454f13ac401be12d3d03ea
|
513922d01d5b23ba9244f3704dab5d0793ecf165
|
refs/heads/master
| 2020-10-02T10:25:24.572538
| 2019-12-13T05:19:05
| 2019-12-13T05:19:05
| 227,756,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import time
consumerkey='qgEq1bQqaPBtE9MUe9iXjel5J'
consumersecret='gZOzN5oQswfcfqkdTzLd49DgibiCKdVNY2hYuzQakwX4GYCnIR'
accesstoken='2780294182-MvbzCoYYsdiCgr5I2tzT9FSbqObkQhaYfbNlSA9'
accesssecret='kR7TQ3yNkCkArHVwrzxgNUUjGelDejEfJBocMB0gw2ke1'
class listener(StreamListener):
def on_data(self,data):
try:
if 'virginia' in data:
print (data)
saveFile=open('twitDB3.csv','a')
saveFile.write(data)
saveFile.write('\n')
saveFile.close()
return True
except BaseException (e):
print ('failed ondata,') ,str(e)
time.sleep(5)
def on_error(self,status):
print (status)
auth=OAuthHandler(consumerkey,consumersecret)
auth.set_access_token(accesstoken,accesssecret)
twitterstream=Stream(auth,listener())
twitterstream.filter(track=["car"])
|
[
"noreply@github.com"
] |
13537875570.noreply@github.com
|
2da5ce9852293d22aeae8c7605f8082ca24e70ee
|
1ba58b17f33122abf4236e9e430a51d375e0eb53
|
/km73/Zeleniy_Dmytro/4/task9.py
|
8b6465f8fc2d6fdbe15585e505253054fa9dbeed
|
[] |
no_license
|
igortereshchenko/amis_python
|
c4f8d86b88ab036d08ff0ce35c9b42ebeabecc42
|
c6f0f2a70c82d5f269b3078eb296f82271b5bb10
|
refs/heads/master
| 2021-10-22T16:21:19.990650
| 2017-11-01T07:26:54
| 2017-11-01T07:26:54
| 104,785,028
| 0
| 139
| null | 2020-04-21T21:27:09
| 2017-09-25T18:11:42
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
start_row = int(input("Enter start row: "))
start_column = int(input("Enter start column: "))
finish_row = int(input("Enter finish row: "))
finish_column = int(input("Enter finish column: "))
if (start_row > 0 and start_row <= 8
and start_column > 0 and start_column <= 8
and finish_row > 0 and finish_row <= 8
and finish_column > 0 and finish_column <= 8):
if (abs(start_row - start_column) == abs(finish_row - finish_column)
or (start_column + start_row) == (finish_column + finish_row)):
answer = "Yes"
else:
answer = "No"
else:
answer = "NOT CORRET DATA!"
print(answer)
|
[
"dzeleniy9@gmail.com"
] |
dzeleniy9@gmail.com
|
3bdcee9dd0423ab3902eff3a04ab25cae6306da5
|
e49d91c15a95fb00e3b46f212237045c923a9035
|
/nothashtag-figsenti/src/small_features/ngrams_classes.py
|
8b4732558c90ca0cc08902ba252e4b8e4056b6e9
|
[] |
no_license
|
samtmcg/semevel_t11_2015
|
dc1319c20836d4d5d2ec7d2d7260ebdcad04025b
|
394e778dcb597c2e01de12e6fd2017416d96a85d
|
refs/heads/master
| 2020-12-31T00:19:22.840011
| 2015-10-22T19:05:19
| 2015-10-22T19:05:19
| 44,766,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,334
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import sys
import argparse
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn import linear_model
from functools import partial
from scipy.sparse import *
import sklearn.metrics as metrics
parser = argparse.ArgumentParser(description="""Creates baseline predictions using a BOW representation and a Classification Model.""")
parser.add_argument('train')
parser.add_argument('test')
parser.add_argument("-t","--type", help="one of the next values: count (default), binary, tfidf", required=False, type=str, default="count")
parser.add_argument("-rsw","--removeStopWords",help="remove stop words: True, False (default)", required=False, type=str, default="False")
parser.add_argument("-rht","--removeHashTags",help="remove Hash Tags: True, False (default)", required=False, type=str, default="False")
parser.add_argument("-ngrams","--useNgrams",help="the order of ngrams: two integers, an upper and a lower bound separated by a space. Default is 1 2 , unigrams and bigrams",required=False,type=str,default="1 2")
parser.add_argument("-badvalues","--WhatBadValues",help="what to do with predicted values that go outside the value range of this experiment. 'cap' (default) which brings the value back to a suitable value in the range by setting it to max or min. 'rescale', which rescales the distribution of answers",required=False,type=str,default=None)
parser.add_argument("-classification","--ClassificationMethod",help="the type of classification method used: DT (default), NB, NBM,KNN",required=False, type=str, default="DT")
parser.add_argument("-testScore","--testScores",help="A boolean value that indicates if testScores are available",required=False,type=str,default="False")
args = parser.parse_args()
# given a list of scores assign them to class lables
def to_class_lables(score_list,number_of_classes):
lables = ['A','B','C','D','E','F','G','H','I','J','K']
matching_scores_to_lables = [[], [], [], [], [], [], [], [], [], [], []]
rounded_score_list = [round(x) for x in score_list]
class_labels=[]
if number_of_classes == 11:
all_possible_scores = range(-5,6)
score_label_dict = dict()
for i in range(number_of_classes):
score_label_dict[all_possible_scores[i]] = lables[i]
for i in range(len(score_list)):
label = score_label_dict[rounded_score_list[i]]
lab_index = lables.index(label)
matching_scores_to_lables[lab_index].append(score_list[i])
class_labels.append(label)
categories = lables
else:
start = 100/float(number_of_classes)
edges = [start*i for i in range(1,number_of_classes+1)]
percentiles = np.percentile(score_list,edges)
categories = lables[:number_of_classes]
print 'PERCENTILES,:::,',percentiles
for i in range(len(score_list)):
score = rounded_score_list[i]
actual_values_score = score_list[i]
for a in range(number_of_classes):
if a == 0:
if score < percentiles[a]:
label = lables[a]
matching_scores_to_lables[a].append(actual_values_score)
#print "score/label: ", str(score) +"/" + str(label)
elif a >0 and a < number_of_classes-1:
b = a-1
if score >= percentiles[b] and score < percentiles[a]:
label=lables[a]
matching_scores_to_lables[a].append(actual_values_score)
#print "score/label: ", str(score) +"/" + str(label)
elif a == number_of_classes-1:
b = a-1
if score>= percentiles[b] and score <= percentiles[a]:
label = lables[a]
matching_scores_to_lables[a].append(actual_values_score)
#print "score/label: ", str(score) +"/" + str(label)
class_labels.append(label)
return class_labels,categories,matching_scores_to_lables
def own_tokenizer(sent):
# input comes in pre-tokenized, and tokens are sepparated by white space
# this is used in the *Vectorizer functions
return sent.split(' ')
MIN = -5
MAX = 5
y_train_scores = []
train_ids = []
y_train = []
train_tweets = []
train_file = args.train
test_file = args.test
vectorizerChoice = args.type
removeStops = args.removeStopWords
removeHashTags = args.removeHashTags
ngram_user_range = (int(args.useNgrams.split(' ')[0]), int(args.useNgrams.split(' ')[1]))
bad_values_choice = args.WhatBadValues
classification_type = args.ClassificationMethod
testScores_available = args.testScores
test_ids = []
test_tweets = []
def replace_user_tags(tweet):
# removes references to other users, but replaces with a special token,
# so does not remove the fact that they do reference others
split_tweet = tweet.split(' ')
nameless_tweet=[]
for w in split_tweet:
if w[0] == '@':
nameless_tweet.append('referenceAnotherUser')
else:
nameless_tweet.append(w)
fixed_tweet = (' ').join(nameless_tweet)
return fixed_tweet
def remove_user_tags(tweet):
# removes references to other users
split_tweet = tweet.split(' ')
nameless_tweet=[]
for w in split_tweet:
if not w[0] == '@':
nameless_tweet.append(w)
fixed_tweet = (' ').join(nameless_tweet)
return fixed_tweet
# open train file and extract ids, scores, and tweets
with open(train_file,'r') as f:
for line in f:
line = line.strip()
id_tag,score,tweet = line.split('\t')
### want to remove references to other twitter users, without removing the fact that they references a user
#tweet = replace_user_tags(tweet)
tweet = remove_user_tags(tweet)
# if Hash Tags are to be removed
if removeHashTags == 'True':
split_tweet = tweet.split(' ')
wl = [w for w in split_tweet if not w[0] =='#']
tweet = (' ').join(wl)
train_ids.append(id_tag)
y_train_scores.append(float(score))
train_tweets.append(tweet)
y_true = []
# open test file and extract ids, scores, and tweets
with open(test_file,'r') as tst:
for line in tst:
line = line.strip()
id_tag,score,tweet = line.split('\t')
#tweet = replace_user_tags(tweet)
tweet = remove_user_tags(tweet)
if removeHashTags == 'True':
split_tweet = tweet.split(' ')
wl = [w for w in split_tweet if not w[0] =='#']
tweet = (' ').join(wl)
test_ids.append(id_tag)
test_tweets.append(tweet)
if testScores_available:
y_true.append(float(score))
#y_true_labels,_,_= to_class_lables(y_true,nclasses)
#y_true_labels = np.array(y_true_labels)
y_ture = np.array(y_true)
# different possible BOW representations:
# remove stopwords or not? Just using built in list of english stopwords
if removeStops == 'True':
removeStopwords = 'english'
else:
removeStopwords = None
if vectorizerChoice == 'count':
vect_model = CountVectorizer(tokenizer=own_tokenizer,lowercase=False,binary=False,stop_words=removeStopwords,ngram_range=ngram_user_range)
elif vectorizerChoice == 'binary':
vect_model = CountVectorizer(tokenizer=own_tokenizer,lowercase=False,binary=True,stop_words=removeStopwords,ngram_range=ngram_user_range)
elif vectorizerChoice == 'tfidf':
vect_model = TfidfVectorizer(tokenizer=own_tokenizer,lowercase=False,binary=False,stop_words=removeStopwords,ngram_range=ngram_user_range)
# transform tweets to vector space representation
X_train = vect_model.fit_transform(train_tweets)
X_test = vect_model.transform(test_tweets)
### what classification model has been chosen:
if classification_type.lower() == 'dt':
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier()
X_train = X_train.todense()
X_test = X_test.todense()
elif classification_type.lower() == 'nb':
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
X_train = X_train.todense()
X_test = X_test.todense()
elif classification_type.lower() == 'nbm':
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
X_train = X_train.todense()
X_test = X_test.todense()
elif classification_type.lower() == 'knn':
from sklearn.neighbors import NearestNeighbors
# automatically run a way to find the best value of k
#clf = NearestNeighbors(n_neighbors=2)
def back_to_numbers(class_scores,scores_to_lables_lists,numclass):
from scipy.stats import mode
back_to_values_mean = np.zeros(len(class_scores))
back_to_values_mode_small = np.zeros(len(class_scores))
back_to_values_mode_larger = np.zeros(len(class_scores))
back_to_values_median = np.zeros(len(class_scores))
back_to_values_max = np.zeros(len(class_scores))
back_to_values_min = np.zeros(len(class_scores))
lables = ['A','B','C','D','E','F','G','H','I','J','K']
numbers_lables_dict = dict()
for j in range(0,11):
numbers_lables_dict[lables[j]] = j
for i in range(len(class_scores)):
cs = class_scores[i]
bin = numbers_lables_dict[cs]
back_to_values_mean[i] = np.array(scores_to_lables_lists[bin]).mean()
back_to_values_mode_small[i] = mode(scores_to_lables_lists[bin])[0][0]
back_to_values_mode_larger[i] = mode(scores_to_lables_lists[bin])[1][0]
back_to_values_median[i] = np.median(scores_to_lables_lists[bin])
back_to_values_max[i] = np.array(scores_to_lables_lists[bin]).max()
back_to_values_min[i] = np.array(scores_to_lables_lists[bin]).min()
return [back_to_values_mean,back_to_values_mode_small,back_to_values_mode_larger,back_to_values_median,back_to_values_max,back_to_values_min ]
# loop through all possible number of classes upto 11
for i in range(2,12):
nclasses = i
y_train,categories,current_scores_to_lables = to_class_lables(y_train_scores,nclasses)
y_train = np.array(y_train)
clf.fit(X_train, y_train)
predicted_scores = clf.predict(X_test)
if testScores_available == 'True':
systypes = ['mean','mode_smaller','mode_larger','meadian','max','min']
systemScores = back_to_numbers(predicted_scores,current_scores_to_lables,nclasses)
for i in range(len(systemScores)):
sysvalues = systemScores[i]
ss = systypes[i]
prediction_cosine = metrics.pairwise.cosine_similarity(y_true,sysvalues)[0][0]
mse = metrics.mean_squared_error(y_true,sysvalues)
print '%0.3f , %s , %0.8f,%0.4f' % (nclasses,ss,prediction_cosine,mse)
"""from sklearn import metrics
f1score = metrics.f1_score(y_true_labels, predicted_scores)
#print("f1-score: %0.3f" % f1score)
accuracy = metrics.accuracy_score(y_true_labels, predicted_scores)
#print("Accuracy: %0.3f" % accuracy)
print "%0.3f \t %0.3f\n" % (accuracy,f1score)
print("classification report:")
print(metrics.classification_report(y_true_labels, predicted_scores,target_names=categories))
print("confusion matrix:")
print(metrics.confusion_matrix(y_true_labels, predicted_scores,labels=categories))"""
"""
if not (len(test_ids) == len(predicted_scores)) and (len(test_ids)==len(test_tweets)):
print "ERROR:: lost data in test\n"
print 'Number of test_ids: \t', len(test_ids)
print 'Number of predicted_scores: \t', len(predicted_scores)
print 'Number of test tweets: \t', len(test_tweets)
else:
for i in range(len(test_ids)):
print test_ids[i]+'\t'+str(predicted_scores[i])+'\t'+test_tweets[i]
#print 'weights: '
#features = pd.Series(regr.coef_, index=vect_model.get_feature_names())
#importance_order = features.abs().order(ascending=False).index
#for i in range(300):
#s = features[importance_order].index[i] + ' ' + str(features[importance_order].ix[i]) + '\n'
#sys.stdout.write(s.encode('utf-8'))
"""
|
[
"sarah.alice.mcgillion@gmail.com"
] |
sarah.alice.mcgillion@gmail.com
|
fca24cecd75975e7ff41a3ea139467d5f9774921
|
7177a8a9eb2030fa871f19f36144b7d055d5c5b3
|
/main.py
|
e19f7da7cfa27a35074ea8d14b9b789db4f37925
|
[] |
no_license
|
Ziyu98/YOLOv3
|
5efb2bc809917041093cf61bfb7d52acbacb9fd7
|
4dfe50cf4a83bf0dde83ec3de8f6995461d6ce12
|
refs/heads/master
| 2021-01-06T19:26:56.256263
| 2020-02-18T20:32:17
| 2020-02-18T20:32:17
| 241,459,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,454
|
py
|
from __future__ import division, print_function
import tensorflow as tf
import numpy as np
import argparse
import cv2
import time
import shapely.geometry as sg
import shapely.ops as so
import math
import os
from utils.misc_utils import parse_anchors, read_class_names
from utils.nms_utils import gpu_nms
from utils.plot_utils import get_color_table, plot_one_box
from utils.data_aug import letterbox_resize
from shapely.geometry import Polygon
from model import yolov3
parser = argparse.ArgumentParser(description="YOLO-V3 video test procedure.")
parser.add_argument("input_video", type=str,
help="The path of the input video.")
parser.add_argument("--anchor_path", type=str, default="./data/yolo_anchors.txt",
help="The path of the anchor txt file.")
parser.add_argument("--new_size", nargs='*', type=int, default=[416, 416],
help="Resize the input image with `new_size`, size format: [width, height]")
parser.add_argument("--letterbox_resize", type=lambda x: (str(x).lower() == 'true'), default=True,
help="Whether to use the letterbox resize.")
parser.add_argument("--class_name_path", type=str, default="./data/coco.names",
help="The path of the class names.")
parser.add_argument("--restore_path", type=str, default="./data/darknet_weights/yolov3.ckpt",
help="The path of the weights to restore.")
parser.add_argument("--save_video", type=lambda x: (str(x).lower() == 'true'), default=False,
help="Whether to save the video detection results.")
args = parser.parse_args()
args.anchors = parse_anchors(args.anchor_path)
args.classes = read_class_names(args.class_name_path)
args.num_class = len(args.classes)
color_table = get_color_table(args.num_class)
vid = cv2.VideoCapture(args.input_video)
video_frame_cnt = int(vid.get(7))
video_width = int(vid.get(3))
video_height = int(vid.get(4))
video_fps = int(vid.get(5))
if args.save_video:
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
videoWriter = cv2.VideoWriter('video_result.mp4', fourcc, video_fps, (video_width, video_height))
#if os.path.exists("percentage.txt"):
# os.remove("percentage.txt")
#if os.path.exists("info_black_width_100_v1.txt"):
# os.remove("info_black_width_100_v1.txt")
with tf.Session() as sess:
input_data = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data')
yolo_model = yolov3(args.num_class, args.anchors)
with tf.variable_scope('yolov3'):
l1, l3, l5, l7, l9, l11, f_m_1, f_m_2, f_m_3 = yolo_model.forward(input_data, False)
pred_feature_maps = f_m_1, f_m_2, f_m_3
pred_boxes, pred_confs, pred_probs = yolo_model.predict(pred_feature_maps)
pred_scores = pred_confs * pred_probs
boxes, scores, labels = gpu_nms(pred_boxes, pred_scores, args.num_class, max_boxes=200, score_thresh=0.3, nms_thresh=0.45)
saver = tf.train.Saver()
saver.restore(sess, args.restore_path)
#fileper=open("percentage.txt","a")
info_new=open("verify_file.txt","a")
for i in range(video_frame_cnt):
ret, img_ori = vid.read()
height_ori, width_ori = img_ori.shape[:2]
size=height_ori*width_ori
if args.letterbox_resize:
img, resize_ratio, dw, dh = letterbox_resize(img_ori, args.new_size[0], args.new_size[1])
else:
height_ori, width_ori = img_ori.shape[:2]
img = cv2.resize(img_ori, tuple(args.new_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.asarray(img, np.float32)
img = img[np.newaxis, :] / 255.
start_time = time.time()
filen1=open('res_n1/n1_{}.txt'.format(i+1),'a')
filen3=open('res_n3/n3_{}.txt'.format(i+1),'a')
filen5=open('res_n5/n5_{}.txt'.format(i+1),'a')
filer1=open('res_r1/r1_{}.txt'.format(i+1),'a')
filer2=open('res_r2/r2_{}.txt'.format(i+1),'a')
filer3=open('res_r3/r3_{}.txt'.format(i+1),'a')
filef1=open('res_f1/f1_{}.txt'.format(i+1),'a')
filef2=open('res_f2/f2_{}.txt'.format(i+1),'a')
filef3=open('res_f3/f3_{}.txt'.format(i+1),'a')
print("********",i,"-th frame")
n1, n3, n5, r1, r2, r3, f1, f2, f3 = sess.run([l1, l3, l5, l7, l9, l11, f_m_1, f_m_2, f_m_3],feed_dict={input_data: img})
f_total = f1, f2, f3
data1=n1[0]
filen1.write('# Array shape: {0}'.format(data1.shape))
for data_slice in data1:
np.savetxt(filen1,data_slice,fmt='%.3f')
filen1.write('# New slice')
data3=n3[0]
filen3.write('# Array shape: {0}'.format(data3.shape))
for data_slice in data3:
np.savetxt(filen3,data_slice,fmt='%.3f')
filen3.write('# New slice')
data5=n5[0]
filen5.write('# Array shape: {0}'.format(data5.shape))
for data_slice in data5:
np.savetxt(filen5,data_slice,fmt='%.3f')
filen5.write('# New slice')
data7=r1[0]
filer1.write('# Array shape: {0}'.format(data7.shape))
for data_slice in data7:
np.savetxt(filer1,data_slice,fmt='%.3f')
filer1.write('# New slice')
data9=r2[0]
filer2.write('# Array shape: {0}'.format(data9.shape))
for data_slice in data9:
np.savetxt(filer2,data_slice,fmt='%.3f')
filer2.write('# New slice')
data11=r3[0]
filer3.write('# Array shape: {0}'.format(data11.shape))
for data_slice in data11:
np.savetxt(filer3,data_slice,fmt='%.3f')
filer3.write('# New slice')
data_f1=f1[0]
filef1.write('# Array shape: {0}'.format(data_f1.shape))
for data_slice in data_f1:
np.savetxt(filef1,data_slice,fmt='%.3f')
filef1.write('# New slice')
data_f2=f2[0]
filef2.write('# Array shape: {0}'.format(data_f2.shape))
for data_slice in data_f2:
np.savetxt(filef2,data_slice,fmt='%.3f')
filef2.write('# New slice')
data_f3=f3[0]
filef3.write('# Array shape: {0}'.format(data_f3.shape))
for data_slice in data_f3:
np.savetxt(filef3,data_slice,fmt='%.3f')
filef3.write('# New slice')
filen1.close()
filen3.close()
filen5.close()
filer1.close()
filer2.close()
filer3.close()
filef1.close()
filef2.close()
filef3.close()
boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data: img})
#boxes_, scores_, labels_ = [], [] ,[] #sess.run([boxes, scores, labels], feed_dict={input_data: img})
end_time = time.time()
# rescale the coordinates to the original image
if args.letterbox_resize:
boxes_[:, [0, 2]] = (boxes_[:, [0, 2]] - dw) / resize_ratio
boxes_[:, [1, 3]] = (boxes_[:, [1, 3]] - dh) / resize_ratio
else:
boxes_[:, [0, 2]] *= (width_ori/float(args.new_size[0]))
boxes_[:, [1, 3]] *= (height_ori/float(args.new_size[1]))
boxes_[boxes_< 0] = 0
count=i+1
#get information on boxes
res=np.arange(len(labels_)*7).reshape(len(labels_), 7)
res=res.astype(np.float32)
res[:,0]=np.around(np.ones(len(labels_))*count,decimals=0)
res[:,1]=np.around(labels_,decimals=0)
res[:,2]=np.around(scores_,decimals=3)
res[:,3:7]=np.around(boxes_,decimals=3)
#print(res)
np.savetxt(info_new,res,fmt='%.3f')
#height_ori, width_ori = img_ori.shape[:2]
#print("Loop Time:", (end_time_loop - start_time_loop) * 1000)
#print("scores:")
#print(scores_)
"""print(r1)"""
"""for i in range(len(boxes_)):
x0, y0, x1, y1 = boxes_[i]
plot_one_box(img_ori, [x0, y0, x1, y1], label=args.classes[labels_[i]] + ', {:.2f}%'.format(scores_[i] * 100), color=color_table[labels_[i]])
cv2.putText(img_ori, '{:.2f}ms'.format((end_time - start_time) * 1000), (40, 40), 0,
fontScale=1, color=(0, 255, 0), thickness=2)
cv2.imshow('image', img_ori)"""
if args.save_video:
videoWriter.write(img_ori)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#fileper.close()
info_new.close()
vid.release()
if args.save_video:
videoWriter.release()
|
[
"noreply@github.com"
] |
Ziyu98.noreply@github.com
|
4a5a3b8daa86ac399ae0a0cc3604254a77635bbf
|
00cb405170a6a9572bef0ec8f373813eada08c03
|
/Agario/Window.py
|
bb5038b0e49d302d2ded8589eaacfcc9884a849c
|
[] |
no_license
|
MarcPartensky/Python-Games
|
c0ad2857be5832d6029642bb0a96bc8e403a12e3
|
ebfcaaf4a028eddb36bbc99184eb3f7a86eb24ed
|
refs/heads/master
| 2022-09-03T00:04:16.402288
| 2022-08-12T17:10:22
| 2022-08-12T17:10:22
| 166,606,022
| 2
| 1
| null | 2021-03-07T16:20:15
| 2019-01-19T23:56:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
import pygame
from pygame.locals import *
class Window:
made=0
def __init__(self,game=None,size=None,font="monospace",set=True):
Window.made+=1
self.number=Window.made
self.title=game.name
self.font=font
self.open=True
pygame.init()
self.setSize(size)
self.font = pygame.font.SysFont(self.font, 65)
self.screen=pygame.display.set_mode(self.size)
pygame.display.set_caption(self.title)
def setSize(self,size=None):
if size is None:
info = pygame.display.Info()
self.size=(info.current_w/2,info.current_h/2)
else:
self.size=size
def pop_up(self,message):
pass
def scale(self,picture,size):
return pygame.transform.scale(picture,size)
def check(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.open=False
def select(self):
while self.open:
self.check()
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN and event.button == 1:
return (event.pos[0],event.pos[1])
def point(self):
for event in pygame.event.get():
return (event.pos[0],event.pos[1])
def flip(self):
pygame.display.flip()
def drawBackground(self,background):
if type(background) is tuple:
self.screen
self.screen.blit(picture, position)
def drawPicture(self,picture,position):
self.screen.blit(picture, position)
def display(page):
pass
|
[
"marc.partensky@gmail.com"
] |
marc.partensky@gmail.com
|
abc2fe52b390b7c640ccb2ff87cb1d20b07a358a
|
f4c39ea03255886185d72f4871f92cc9538b2ad3
|
/crm/admin.py
|
52199d99d740927aa64e82448255195775028a41
|
[] |
no_license
|
lgkiemde/Maverick-Food-Service
|
359430b99588a2077736f81a50c4c663b7e65637
|
38a17e515941ae7471a5ca9cabd8cad9228e68d7
|
refs/heads/main
| 2023-08-30T15:12:23.512109
| 2021-10-23T01:27:59
| 2021-10-23T01:27:59
| 420,088,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
from django.contrib import admin
from .models import Customer, Service, Product
class CustomerList(admin.ModelAdmin):
list_display = ('cust_name', 'organization', 'phone_number')
list_filter = ('cust_name', 'organization')
search_fields = ('cust_name',)
ordering = ['cust_name']
class ServiceList(admin.ModelAdmin):
list_display = ( 'cust_name', 'service_category', 'setup_time')
list_filter = ( 'cust_name', 'setup_time')
search_fields = ('cust_name', )
ordering = ['cust_name']
class ProductList(admin.ModelAdmin):
list_display = ( 'cust_name', 'product', 'pickup_time')
list_filter = ( 'cust_name', 'pickup_time')
search_fields = ('cust_name', )
ordering = ['cust_name']
admin.site.register(Customer, CustomerList)
admin.site.register(Service, ServiceList)
admin.site.register(Product, ProductList)
|
[
"74085491+lgkiemde@users.noreply.github.com"
] |
74085491+lgkiemde@users.noreply.github.com
|
9ba8abccd4ecd06af19b8b0d1cb92d449e9cdbf9
|
c0b9b12e5a5dc3d143fe13a80d4fe52c3ac97355
|
/example_test/example_data_split.py
|
8401270e0a51b94e87a5b30ff93fbc45c455786d
|
[] |
no_license
|
liufei0820/anheng
|
afccbe7221dc292f110122e3181a3cf2fdb0cbfc
|
27c33dde4d5f44f56b23ddb472b80817487e78ff
|
refs/heads/main
| 2023-07-22T03:59:51.616987
| 2021-09-12T08:58:06
| 2021-09-12T08:58:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/9/12 3:02 下午
# @Author : Alioth
# @File : example_data_split.py
# @Email : thxthx1999@gmail.com
# @Software: PyCharm
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import requests
if __name__ == '__main__':
path_project = os.path.abspath('..')
path_callbackurl = os.path.join(path_project, 'data', 'callbackurl' + '.json')
print(path_callbackurl)
with open(path_callbackurl, "r") as f:
json_callbackurl = json.load(f)
callBackUrl = json_callbackurl['callBackUrl']
print(callBackUrl)
path_data = os.path.join(path_project, 'data', 'loan_data' + '.csv')
data_set = pd.read_csv(path_data, header=0, index_col=0)
# Initiate a list for categoricals
categ_list = ['purpose']
# create new df with dummy variables
data_set = pd.get_dummies(data_set, columns=categ_list, drop_first=True)
# # print(data_set)
percent = 0.8
random = 1234
test_data = data_set.sample(frac=(1 - percent), replace=False, random_state=random, axis=0)
train_data = data_set[~data_set.index.isin(test_data.index)]
print(test_data.head())
path_train = os.path.join(path_project, 'data', 'train_data' + '.csv')
path_test = os.path.join(path_project, 'data', 'test_data' + '.csv')
train_data.to_csv(path_train)
test_data.to_csv(path_test)
dict_path = {
"path_train": path_train,
"path_test": path_test
}
r = requests.post(callBackUrl, json=dict_path) # does json.dumps(your_json) automatically
|
[
"1094168447@qq.com"
] |
1094168447@qq.com
|
be24fff7640880924ac1b8352d63c9ce128039bd
|
49beeee0d9aff3b776545cb553ef1bf15dd9f190
|
/example/example/views.py
|
6c06b12a01b8dad493049a74201b5a5b9af1ada9
|
[
"MIT"
] |
permissive
|
bluedisk/django-korean-fields
|
238364cf4f766db824adec832aaa2d83619cded1
|
b655e23d9a73e61cb217e34719ee6a2509f8f475
|
refs/heads/master
| 2020-03-19T09:55:10.974426
| 2018-11-10T15:02:02
| 2018-11-10T15:02:02
| 136,327,803
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# -*- coding: utf-8 -*-
from django.forms import forms, CharField
from django.http import HttpResponse
from django.shortcuts import render
from korean.fields import JuminFormField
class TestForm(forms.Form):
jumin1 = JuminFormField()
jumin2 = JuminFormField()
def demo(request):
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
return HttpResponse('success : ' + form.cleaned_data['jumin'])
else:
form = TestForm(initial={'jumin1': '010203-4567890'})
return render(request, 'demo.html', {'form': form})
|
[
"bluedisk@gmail.com"
] |
bluedisk@gmail.com
|
17eb256179da0f291fdd0e5d21d32169501672e1
|
e21ed71610f9d1004dfa21206300c0e9f3887e89
|
/modulo_2/Codewars/dev-junior/find_even_array.py
|
beb4a2bad5d9a8b39ec87d16249da6a0ba36113a
|
[] |
no_license
|
hpfn/wttd-2017-exerc
|
c0c79ee0cb3b5b331932787d280deee679357bc1
|
b1bf1394d2e2adc29257b7c4273af21b8509335f
|
refs/heads/master
| 2020-12-30T11:29:13.218980
| 2017-10-03T19:04:03
| 2017-10-03T19:04:03
| 91,572,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
# coding=utf-8
def find_even_index(arr):
tam_arr = len(arr)
for x in range(tam_arr):
if sum(arr[:x]) == sum(arr[x+1:]):
return x
return -1
|
[
"hpfn@debian.org"
] |
hpfn@debian.org
|
42d9a5f7b77cd5d5e697db8aab5835e9505444fc
|
ce7e7085b6bc07abf5eab5d4345e045a16ee0d56
|
/*backup/backup.py
|
63630e15cc53af9d5c5dbd5bc1213fe4f7079b78
|
[] |
no_license
|
WHjiangxiaolin/Python
|
bad4a5edc80f2dc96e6256ab2761437f93e666ab
|
2e6b4a02dba7d6016f846d6914eee9af61146860
|
refs/heads/master
| 2022-01-19T22:43:03.864235
| 2019-07-20T09:45:39
| 2019-07-20T09:45:39
| 197,872,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,322
|
py
|
#-将/tmp/demo/security备份到/tmp/demo/backup中
#- 需要支持完全和增量备份
#- 周一执行完全备份
#- 其他时间执行增量备份
#分析:
#- 完全备份需要执行备份目录、计算每个文件的md5值
#- 增量备份需要计算文件的md5值,把md5值与前一天的md5值比较,有变化的文件要备份;目录中新增的文件也要备份
#- 备份的文件名,应该体现出:备份的是哪个目录,是增量还是完全,哪一天备份的
import tarfile
from time import strftime
import os
import hashlib
import pickle
def check_md5(fname): #生成文件MD5值,该函数下面给下面函数用
m = hashlib.md5()
with open(fname, 'rb') as fobj:
while True:
data = fobj.read(4096)
if not data:
break
m.update(data)
return m.hexdigest()
def full_backup(src, dst, md5file): #完全备份
# 将完全备份文件名组合起来,os.path.basename(src)可以取目录最后的目录名
fname = '%s_full_%s.tar.gz' % (os.path.basename(src), strftime('%Y%m%d'))
fname = os.path.join(dst, fname) #将完全备份文件绝对路径组合起来
tar = tarfile.open(fname, 'w:gz') #打包备份文件
tar.add(src)
tar.close()
# 计算每个文件的md5值
md5dict = {}
for path, folders, files in os.walk(src):
#os.walk返回值由多个元祖构成,每个元祖有三项,第一项时路径字符串,第二项是该路径下的目录列表,第三项时该目录下的文件列表.path, folders, files对应此三项,由path和file组合成文件绝对路径
for file in files:
key = os.path.join(path, file)
md5dict[key] = check_md5(key) #生成文件MD5值,并保存为字典的值,字典的键为文件名
# 把md5值字典保存到文件
with open(md5file, 'wb') as fobj:
pickle.dump(md5dict, fobj)
def incr_backup(src, dst, md5file): #增量备份
#将增量备份文件名组合起来
fname = '%s_incr_%s.tar.gz' % (os.path.basename(src), strftime('%Y%m%d'))
fname = os.path.join(dst, fname) #将增量备份文件绝对路径组合起来
# 取出前一天的文件md5值
with open(md5file, 'rb') as fobj:
old_md5 = pickle.load(fobj)
# 计算当前下文件的md5值
md5dict = {}
for path, folders, files in os.walk(src):
for file in files:
key = os.path.join(path, file)
md5dict[key] = check_md5(key) #生成文件MD5值,并保存为字典的值,字典的键为文件名
# 找出变化的文件和新增的文件,把它们压缩
tar = tarfile.open(fname, 'w:gz')
for key in md5dict:
# get 如果key不在字典中返回None则表示判断不成立,则之前的目录中没有这个文件
if old_md5.get(key) != md5dict[key]:
tar.add(key)
tar.close()
# 把当前的md5字典写到文件中,以便下一次比较使用
with open(md5file, 'wb') as fobj:
pickle.dump(md5dict, fobj)
if __name__ == '__main__':
src = '/tmp/demo/security'
dst = '/tmp/demo/backup'
md5file = '/tmp/demo/backup/md5.data'
if strftime('%a') == 'Mon': #星期几时%a
full_backup(src, dst, md5file)
else:
incr_backup(src, dst, md5file)
|
[
"jxl@163.com"
] |
jxl@163.com
|
e3a72bd363e5d37a2c58181444706b9ab6a4d68f
|
6170d8451dffcbf0b0a3d5606b33ab4467070640
|
/Python/SCRIPTS/Python/UPDATA/host_seedfile.py
|
c6df67df9f72f0cd44fdc4e8b81b7ffb5d7cca35
|
[] |
no_license
|
jb26444/lazynet
|
5ab848d998e0ddb23dc8362596ac06b47c0315cb
|
6a39ed09e36e5deeca21714ce133f938dec7bf3d
|
refs/heads/master
| 2021-05-01T16:18:25.804995
| 2018-03-17T21:39:24
| 2018-03-17T21:39:24
| 121,050,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#
#
# Add the host addresses you want to log into
#
#network_devices = ['x.x.x.1', 'x.x.x.2', 'x.x.x.3', 'x.x.x.4']
#
#network_devices = ['10.205.205.1', '10.205.205.2', '10.205.205.3', '10.205.205.4', '10.205.205.5', '10.205.205.6','10.205.205.7','10.205.205.8', '10.205.205.9', '10.205.205.10']
network_devices = ['10.205.7.10', '10.205.7.11']
|
[
"jan.blahuta@gmail.com"
] |
jan.blahuta@gmail.com
|
d380fe52b1c521e8ecdac7ec5218fc2ce599e77d
|
34188f655a121b6db7c029c5da93779411ee92bc
|
/7a.Stos i kolejka/czynawiasowaniepoprawne.py
|
2314d35fece5bdf050ca66135a6502608160bb6a
|
[] |
no_license
|
aniagut/ASD-2020
|
3f0760f28888bdb0a6d689c357c8444bf09ff48b
|
f1a084d4f8175a76fd4274f270eab2ddc7a5e172
|
refs/heads/master
| 2023-03-08T07:51:05.351562
| 2021-02-28T17:30:24
| 2021-02-28T17:30:24
| 343,164,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
#jesli wchodzi nawias otwierajacy, to wrzucamy na stos
#jesli zmaykajacy to zdejmujemy ze stosu i sprawdzamy czy sie pokrywaja
class Stack:
def __init__(self):
self.s=[]
self.top=-1
self.size=0
def push(self,x):
self.top+=1
self.size+=1
if self.top==len(self.s):
self.s.append(x)
else:
self.s[self.top]=x
def pop(self):
self.size-=1
res=self.s[self.top]
self.top-=1
return res
def is_empty(self):
return self.size==0
def funkcja(nawiasy):
s=Stack()
n=len(nawiasy)
for i in range(n):
if nawiasy[i]=="(" or nawiasy[i]=="[":
s.push(nawiasy[i])
else:
if s.is_empty(): return False
res=s.pop()
if nawiasy[i]==")":
if res!="(":
return False
elif nawiasy[i]=="]":
if res!="[":
return False
if not s.is_empty(): return False
return True
nawiasy="((([][])))"
print(funkcja(nawiasy))
|
[
"noreply@github.com"
] |
aniagut.noreply@github.com
|
c4fe3b1f9b8f103406a394b5030f956677734043
|
20ad94b7bc15dc76ad7a78133b52f75fd3381470
|
/C++ dasturlash asoslari/42. Tanlangan masalalar yechimi/f6.py
|
44a12fa412552d42633a6c1cc6aefce98442c657
|
[] |
no_license
|
ilmfan/MohirdevAlgoritm
|
8de810879b660561dd3582e486d58e1a342ad655
|
d58ded8bc6aa3d348c27d26b3856ca223829800e
|
refs/heads/main
| 2023-07-14T17:42:26.698190
| 2021-08-25T11:06:39
| 2021-08-25T11:06:39
| 395,877,110
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
"""
author: Shodmonov Zafar
date and time: 09:00 14.08.2021
information about the algorithm:
InPut: n
OutPut: prime numbers up to n
"""
def prime_numbers(n):
output_list = [2]
for num in range(3, n+1, 2):
divided_into = []
does_not_divide = []
for i in range(2, num):
if num % i == 0:
divided_into.append(1)
else:
does_not_divide.append(1)
if len(does_not_divide) == num - 2:
output_list.append(num)
return output_list
|
[
"dasturchi.uzbek@gmail.com"
] |
dasturchi.uzbek@gmail.com
|
495d0a5cabcec1b443839fa4e8201c4e9afae6dd
|
eefbfe5a3f0d655177fd3c17335ae1100e8398bd
|
/Server/structure/ShellInterface.py
|
1fc7c51d327e3d4260d3c8a63c94a1262f4780f0
|
[] |
no_license
|
mkmagic/BCI_API
|
bd3c92c6162a29f2bfd37322e35c60a9446e1551
|
aef94cc14d65e915dd97ce66d06542a4587d04f6
|
refs/heads/master
| 2020-06-21T22:24:13.059550
| 2020-03-12T11:21:46
| 2020-03-12T11:21:46
| 197,566,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,587
|
py
|
"""
The Shell Interface module, utilizes Python's argparse module to create shell-like programs.
To create a shell-like program, copy the template Interface class provided in this file
and follow the instructions marked by # comments.
Auther: Hayun, Yoav
E-mail: yoavhayun@gmail.com
"""
from __future__ import print_function, with_statement
from abc import ABCMeta, abstractmethod
import os, sys
import xml.etree.ElementTree as ET
import argparse
import platform, traceback , shlex
from datetime import datetime
import threading, traceback
import importlib
import time
import codecs
from .Completer import Completer
try:
import builtins
__builtin__ = builtins
except:
import __builtin__
# try:
# try:
# import readline
# except:
# import pyreadline as readline
# except:
# try:
# from pip import main as pipmain
# except:
# from pip._internal import main as pipmain
# error = pipmain(['install', "pyreadline"])
# if not error:
# try:
# import readline
# except:
# import pyreadline as readline
# else:
# sys.exit(error)
class ShellCompleter(Completer):
def __init__(self, controllerInterface):
self.controller = controllerInterface
super(ShellCompleter, self).__init__()
def delegateControl(self, subparsers, id, interface):
self.enterId = id
parser = subparsers.add_parser(id, add_help=False)
parser.set_defaults(delegate=interface)
interface.completer.head.id = id
interface.completer.head.keywords = [id]
self.cur.addCommand(interface.completer.head)
return self
from structure.colors import colors
from structure.keys import Keys
class ShellInterface():
__metaclass__ = ABCMeta
LAST_EXECUTER_NAME = ["Shell Interface"]
END_CMDS = set(["q", "quit", "exit"])
READ_CMDS = set([".read", ".r"])
FILE_COMMENT = '#'
FILE_COLORS = {code + '#': color for code, color in [i for i in vars(colors.fg).items() if not i[0].startswith('_')]}
@abstractmethod
def buildParser(self):
"""
Builds the Interface's argument parser
"""
pass
def preprocessArguments(self):
"""
Prepocesses the arguments that were passed to the Interface
@Return whether or not the preprocessing was successful
"""
return True
def manageUnparsed(self, unparsed):
"""
Handles the arguments that couldn't be parsed by the Interface's arguments parser
@unparsed List of unparsed arguments
@Return whether or not the parsing was successful
"""
return len(unparsed) == 0
def __init__(self, name, version=None, description=None, logFile="ShellInterface.log", xmlConfiguration=None):
"""
Interface Constructor
@name The name of the interface
@version The current version of the interface
@description A description of the interface
@logFile The default log file of the interface
@xmlConfiguration A path to an XML configuration file, content saved in self.CONF
"""
self.parent = []
self.FLAGS = argparse.Namespace()
self.input = ''
self.XMLParser = ShellInterface.XMLParser("value", "name", lambda v: ShellInterface.XMLParser.extractionCast(v, "type"))
self.CONF = self.loadXmlConfiguration(xmlConfiguration, section=name) if xmlConfiguration else argparse.Namespace()
self.isFile = False
self.success = True
self.logLocks = {}
self.logFile = logFile
self.initLog()
self.keys = None
self.name = name if name else os.path.basename(__file__).split(".")[0]
self.version = version
self.description = "{}{}{}".format(self.name,
' v' + self.version if self.version else '',
': ' + description if description else '')
self.parser = argparse.ArgumentParser(description=self.description, add_help=False, formatter_class=argparse.RawTextHelpFormatter)
self.parser.add_argument("-h", "--help", action='store_true')
self.completer = ShellCompleter(self)
self.buildParser()
with self.completer.branch_out(".read"):
self.completer.branch_out("path", type=self.completer.BranchType.PATH)
self.completer.branch_out("--help")
with self.completer.branch_out(self.FILE_COMMENT, complete=False):
self.completer.branch_out("Line to print" , [])
for colorCode in self.FILE_COLORS:
with self.completer.branch_out(self.FILE_COMMENT + colorCode, hidden=True):
self.completer.branch_out("Line to print", [])
@abstractmethod
def execute(self):
"""
The main method of the Interface.
It's called whenever a shell command is entered or Interface.run() is called with argv.
@Return whether or not the execution was successful
"""
return True
def _close(self):
"""
This method is called whenever the interface closes
"""
self.close()
@abstractmethod
def close(self):
"""
This method is called whenever the interface closes
"""
pass
def initLog(self, logFile=None):
"""
Create an empty a log file.
If the file exists, this will overwrite it.
@logFile If given, will init the given log file and not the default
"""
logFile = logFile if logFile is not None else self.logFile
if logFile is not None:
if(os.path.isfile(logFile)):
os.remove(logFile)
open(logFile, "a").close()
def deleteLog(self, logFile=None):
"""
Deletes a logFile from the disk
@logFile If given, will delete the given log file and not the default
"""
logFile = logFile if logFile is not None else self.logFile
if(os.path.isfile(self.logFile)):
os.remove(self.logFile)
def showLog(self, logFiles=[], logLevel=0, lineNumber=0, online=False, inputHandler=None):
"""
Displays a log file on the screen
@logFiles List of files, If given, will show the given files instead of the default log file
@logLevel Show all log prints with (log level <= given log level)
@lineNumber Display log from a given line number instead of the beginning
@online Whether or not the keep displaying the log as it updates from an external source
until a KeyboardInterrupt event
@inputHandler a handler function to handle incoming input
@Return the last printed line number
"""
logFiles = logFiles if len(logFiles) > 0 else [self.logFile]
try:
if inputHandler is not None:
prompt = self.getPrompt()
if len(logFiles) == 1:
prompt = colors.bold
prompt += os.path.split(logFiles[0])[-1].split('.')[0]
prompt += "> " + colors.reset
inputHandler = ShellInterface.InputHandler(prompt, inputHandler, self.keys)
printers = {}
for logFile in logFiles:
if online:
printers[logFile] = ShellInterface.LogPrinter(logFile, lineNumber)
printers[logFile].start(logLevel)
else:
with open(logFile, 'r') as log:
[log.readline() for i in range(self.lineNumber)]
ShellInterface.LogPrinter.printLog(log, logLevel)
while(True):
if inputHandler is not None and not inputHandler.isWorking:
break
time.sleep(0)
except KeyboardInterrupt:
pass
finally:
if inputHandler is not None:
inputHandler.stop()
for printer in printers:
printers[printer].stop()
@staticmethod
def tryExecution(task, maxTries, expecting=Exception):
tries = 0
while(tries < maxTries):
try:
task()
return True
except expecting:
tries += 1
return False
@staticmethod
def _logMsgTask(logFile, descriptor, message):
with open(logFile, 'a') as log:
log.write("{} {}\n".format(descriptor, message))
def log(self, message, logFile=None, logLevel=0, error=False, id=None, timestamp=None, maxTries=1):
"""
This method prints a message to the log file
@message The message to log
@logFile If given, will print to the given file instead of the default log file
@logLevel The minimal logLevel needed to display this message
@error Whether or not this message is an error message
@id An id of what produced this message
@timestamp Whether or not to include a timestamp in the log print
"""
logFile = logFile if logFile is not None else self.logFile
if logFile is not None:
if logFile not in self.logLocks:
self.logLocks[logFile] = threading.Lock()
message = "{}".format(message) if error else message
descriptor = "{}::".format(logLevel)
descriptor = "{}[{}]".format(descriptor, timestamp) if timestamp is not None else descriptor
descriptor = "{}[{}]".format(descriptor, id) if id is not None else descriptor
descriptor = "{} ERROR: ".format(descriptor) if error else descriptor
with self.logLocks[logFile]:
logTask = lambda : ShellInterface._logMsgTask(logFile, descriptor, message)
if not ShellInterface.tryExecution(logTask, maxTries, PermissionError):
self.log("Unable to log message in '{}': {}".format(logFile, message.strip()), error=True)
def __str__(self):
"""
@Return a description of the interface
"""
return self.description
def readCommands(self, file):
"""
Executes argument lines from a file
@file Path to file containing argument lines to be executed by the interface
@Return whether or not the execution was successful
"""
try:
if os.path.isfile(file):
lines = []
with open(file, mode='r') as f:
lines = f.readlines()
self.isFile = True
self.__shell(inputLines=lines)
self.isFile = False
else:
ShellInterface.printError("'{}' is not a file".format(file))
except:
ShellInterface.printError("Could not read file '{}'".format(file))
self.isFile = False
return False
return self.success
def __createFlags(self):
"""
Creates self.FLAGS for the Interface
@Return whether or not the creation of flags was successful
"""
self.__unparsed = []
try:
mem = {}
if hasattr(self.FLAGS, "MEMORY"):
for arg in self.FLAGS.MEMORY:
if hasattr(self.FLAGS, arg):
mem[arg] = getattr(self.FLAGS, arg)
self.FLAGS, self.__unparsed = self.parser.parse_known_args(args=self.input, namespace=self.FLAGS)
for arg in self.FLAGS.MEMORY:
if not arg in mem:
mem[arg] = self.FLAGS.MEMORY[arg]
if arg in mem:
if not hasattr(self.FLAGS, arg) or getattr(self.FLAGS, arg) is None:
setattr(self.FLAGS, arg, mem[arg])
except SystemExit:
if int(str(sys.exc_info()[1])) != 0:
self.success = False
return False
return True
def __processArgs(self):
if not self.manageUnparsed(self.__unparsed):
ShellInterface.printError("The arguments {} are unknown".format(self.__unparsed))
if self.isFile:
self.success = False
return False
if not self.preprocessArguments():
ShellInterface.printError("Failed in preprocessing of '{}'.".format(self.inputLine.strip()))
if self.isFile:
self.success = False
return False
return True
def __resetFlags(self):
"""
Resets self.FLAGS of the Interface
"""
for arg in self.FLAGS.__dict__:
if arg == 'MEMORY':
continue
if hasattr(self.FLAGS, 'MEMORY') and arg not in self.FLAGS.MEMORY:
setattr(self.FLAGS, arg, None)
def runLine(self, line):
"""
Parse and execute a single argument line
@line argument line to parse and execute
@Return whether or not the execution was successful
"""
ShellInterface.LAST_EXECUTER_NAME.append(self.name)
isLastLine = False
self.__resetFlags()
self.inputLine = line
self.input = shlex.split(line, posix=(platform.system()!='Windows'))
if self.inputLine.startswith(self.FILE_COMMENT):
toPrint = self.inputLine[1:].strip()
availableColors = [k for k in vars(colors.fg).items() if not k[0].startswith('_')]
for code in self.FILE_COLORS:
if toPrint.lower().startswith(code):
toPrint = self.FILE_COLORS[code] + toPrint[len(code):].strip() + colors.reset
break
print(toPrint)
elif len(self.input) > 0:
if self.input[0] in ShellInterface.END_CMDS and not self.isFile:
isLastLine = True
elif self.input[0] in ShellInterface.READ_CMDS:
expArgs = 2
if len(self.input) < expArgs:
ShellInterface.printError("Read command accepts a path as an argument.")
else:
self.readCommands(' '.join(self.input[1:]))
else:
if self.__createFlags():
if hasattr(self.FLAGS, "delegate") and self.FLAGS.delegate:
hasKeys = self.keys is not None
if hasKeys: self.keys.close()
self.callOtherInterface(self.FLAGS.delegate ,self.input[1:])
#if hasKeys: self.keys = Keys(self.name, intro=self.getUsage())
elif self.FLAGS.help:
self.parser.print_help()
else:
if self.__processArgs():
self.success = self.execute()
return isLastLine
def getUsage(self):
usage = ''
usage += colors.fg.yellow + '\n'
usage += self.description + '\n'
usage += colors.reset
usage += "\tTo exit, enter one of the following {}\n".format([cmd for cmd in ShellInterface.END_CMDS])
usage += "\tto read commands from a file, enter one of the following {}\n".format([cmd for cmd in ShellInterface.READ_CMDS])
usage += colors.bold + '\n'
usage += "\tTip: At any time, add '-h' flag to the command for help.\n"
usage += colors.reset
return usage
def printUsage(self):
"""
Prints the welcome usage information of the interface
"""
print(self.getUsage())
def setMarkerView(self):
sys.stdout.write("\033[2A")
sys.stdout.flush()
def unsetMarkerView(self):
sys.stdout.write("\033[2B")
sys.stdout.flush()
def getPrompt(self, parent=[]):
shellPromptMsg = "{}> ".format('\\'.join(parent + [self.name]))
return colors.bold + shellPromptMsg + colors.reset
def __shell(self, inputLines=None):
"""
Runs the Interface as a shell program
@parent the name of the parent Interface
@inputLines a pre set list of input lines
@Return whether or not the last input line was successful
"""
if not self.isFile:
self.keys = Keys(self.name, intro=self.getUsage())
self.printUsage()
try:
shellPromptMsg = self.getPrompt(self.parent)
while inputLines is None or len(inputLines) > 0:
if inputLines is None:
print()
try:
inputLine = inputLines.pop(0) if inputLines else self.keys.readInput(shellPromptMsg, self.completer)
except EOFError:
break
try:
lastLine = self.runLine(inputLine)
if lastLine:
break
if not self.success:
if self.isFile:
ShellInterface.printError("Command Failed, Aborting execution from file")
break
else:
ShellInterface.printError("Command Failed")
self.success = True
except SystemExit:
if int(str(sys.exc_info()[1])) != 0:
raise
except:
traceback.print_exc()
sys.exit(1)
finally:
if not self.isFile:
self.keys.close()
return self.success
def loadXmlConfiguration(self, xml, section=None):
"""
Loads an XML configuration file into the interface.
@xml A path to an XML file
@section Specify to load a specific section in the XML only
@Return an argparse Namespace containing the values extracted from XML
XML Structure:
section : Groups arguments together
name - name of the section
[Content] - 'import', 'value' and 'group' elements
import : Includes another section in the current section
section - section name to import
[Content] - None
value : Holds a value for the interface to use
name - Access name for the value
type - A casting method to apply on the given string value
[Content] - The value to store
group : groups several values together
name - Access name for the group
[Content] - 'value' elements
XML Example:
<root>
<section name="A">
<group name="A_Group1">
<value name="Arg1">value for A.A_Group1.Arg1</value>
<value name="Arg2">value for A.A_Group1.Arg2</value>
</group>
</section>
<section name="B">
<import section="A"/> <!--Access 'B.A.A_Group1.Arg1' and 'B.A.A_Group1.Arg2'-->
<value name="Arg1">value for B.Arg1</value>
</section>
</root>
"""
return self.XMLParser.loadXml(xml, section)
def run(self, argv=None, parent=[]):
"""
Runs the Interface
@argv include argv list to be executed by the given Interface
omit argv list to pass control to the given Interface
# First arg is expected to be the call command
@parent the name of the parent Interface
@Return whether or not the parsing was successful
"""
try:
self.parent = parent
if argv and len(argv) > 1:
self.runLine(' '.join(argv))
return self.success
else:
retValue = self.__shell()
self._close()
return retValue
except SystemExit:
self._close()
if int(str(sys.exc_info()[1])) != 0:
raise
def callOtherInterface(self, other, argv=None):
"""
Calls another Interface
@other An Interface instance
@argv argv list as expected by the Interface's run method
@Return whether or not the call returned success
"""
return other.run(argv, self.parent + [self.name])
@staticmethod
def printError(error):
"""
Prints an error
@argv error error message
"""
executer = ShellInterface.LAST_EXECUTER_NAME.pop() if len(ShellInterface.LAST_EXECUTER_NAME) > 0 else "Shell Interface"
print(colors.fg.lightred + "\n[{}] Error: {}".format(executer, error) + colors.reset)
class LogPrinter:
def __init__(self, log, lineNumber):
self.log = log
self.lineNumber = lineNumber
def start(self, logLevel=0):
self.isWorking = True
self.worker = threading.Thread(target=self.run, args=[logLevel])
self.worker.start()
def stop(self):
self.isWorking = False
self.worker.join()
def run(self, logLevel):
with open(self.log, 'r') as log:
[log.readline() for i in range(self.lineNumber)]
while(self.isWorking):
ShellInterface.LogPrinter.printLog(log, logLevel=logLevel)
@staticmethod
def printLog(logFile, logLevel=0):
content = logFile.readline()
if content:
content = content.split("::")
if len(content) == 2:
level, content = content[0], content[1]
if logLevel >= int(level):
print(content, end='')
class InputHandler:
def __init__(self, prompt, handlerFunction, keys):
self.prompt = prompt
self.handlerFunction = handlerFunction
self.keys = keys
self.isWorking = True
self.worker = threading.Thread(target=self.run, args=[])
self.worker.start()
def stop(self):
self.isWorking = False
self.worker.join()
def run(self):
print()
while(self.isWorking):
inputline = self.keys.readInput(self.prompt, hideInputLine=True)
if inputline.strip() in ShellInterface.END_CMDS:
self.isWorking = False
break
self.handlerFunction(inputline)
class XMLParser():
XML = argparse.Namespace(
section = argparse.Namespace(tag="section", id="name"),
include = argparse.Namespace(tag="import", id="section"),
group = argparse.Namespace(tag="group", id="name")
)
def __init__(self, valueTitle, valueId, valueExtractMethod=None):
if valueExtractMethod is None:
valueExtractMethod = lambda value: value.text
self.value = argparse.Namespace(title=valueTitle,
id=valueId,
extractMethod=valueExtractMethod)
@staticmethod
def castValue(value, castDescription):
module = __builtin__
if '.' in castDescription:
modulePath = '.'.join(castDescription.split('.')[0:-1])
try:
module = importlib.import_module(modulePath)
except:
modulePath = modulePath.split('.')
for i in range(0, len(modulePath)):
module = getattr(module, modulePath[i])
method = castDescription.split('.')[-1]
return getattr(module, method)(value)
@staticmethod
def extractionCast(valueElement, castId):
"""
Casts a value in a given XML element to it's specified type
@valueElement XML element that has a text value and a 'type' attribute
@Return the casting of the text value to it's specified type
"""
if castId in valueElement.attrib:
return ShellInterface.XMLParser.castValue(valueElement.text, valueElement.attrib[castId])
return valueElement.text
def _appendNamespace(self, namespace, id, value):
namespace._ORDER.append(id)
setattr(namespace, id, value)
return namespace
def _createNamespaceFromXmlRoot(self, xml, root, history):
"""
Creates a new namespace containing values specified under a given XML root elemment
@xml A path to an XML file
@root The XML element containing values to parse out
@history Holds already visited sections
@Return an argparse Namespace containing the values extracted from XML
"""
namespace = argparse.Namespace(_ORDER=[])
for section in root.findall(self.XML.include.tag):
id = section.attrib[self.XML.include.id]
namespace = self._appendNamespace(namespace, id, self._loadXml(xml, id, history))
for value in root.findall(self.value.title):
id = value.attrib[self.value.id]
namespace = self._appendNamespace(namespace, id, self.value.extractMethod(value))
for group in root.findall(self.XML.group.tag):
groupId = group.attrib[self.XML.group.id]
namespace = self._appendNamespace(namespace, groupId, OrderedDict())
for value in group.findall(self.value.title):
groupValues = getattr(namespace, groupId)
groupValues[value.attrib[self.value.id]] = self.value.extractMethod(value)
return namespace
def _loadXml(self, xml, section=None, history=[]):
"""
Loads an XML configuration file into the interface.
@xml A path to an XML file
@section Specify to load a specific section in the XML only
@history Holds already visited sections
@Return an argparse Namespace containing the values extracted from XML
"""
tree = ET.parse(xml)
root = tree.getroot()
if section:
if section not in history:
history.append(section)
for sec in root.findall(self.XML.section.tag):
if sec.attrib[self.XML.section.id].upper() == section.upper():
return self._createNamespaceFromXmlRoot(xml, sec, history[:])
else:
print("ERROR: Found a circular import in XML file: '{}'".format(xml))
return None
else:
return self._createNamespaceFromXmlRoot(xml, root, history)
# We got a non existing section to read
return argparse.Namespace()
def loadXml(self, xml, section):
"""
Loads an XML file as an argparse.Namespace
@xml A path to an XML file
@section Specify to load a specific section in the XML only
@Return an argparse Namespace containing the values extracted from XML
XML Structure:
section : Groups arguments together
name - name of the section
[Content] - 'import', 'value' and 'group' elements
import : Includes another section in the current section
section - section name to import
[Content] - None
value : Holds a value for the interface to use
name - Access name for the value
type - A casting method to apply on the given string value
[Content] - The value to store
group : groups several values together
name - Access name for the group
[Content] - 'value' elements
XML Example:
<root>
<section name="A">
<group name="A_Group1">
<value name="Arg1">value for A.A_Group1.Arg1</value>
<value name="Arg2">value for A.A_Group1.Arg2</value>
</group>
</section>
<section name="B">
<import section="A"/> <!--Access 'B.A.A_Group1.Arg1' and 'B.A.A_Group1.Arg2'-->
<value name="Arg1">value for B.Arg1</value>
</section>
</root>
"""
return self._loadXml(xml, section, history=[])
"""
Interface Template Class
"""
###############################################################################
### Copy the entire code found below to start a new Shell Interface program ###
###############################################################################
import os, sys
from structure.ShellInterface import ShellInterface
class Interface(ShellInterface):
NAME = os.path.basename(__file__).split(".")[0] # Default is current file's name
VERSION = "1.0.0.0"
DESCRIPTION = 'A template Interface class' # Interface Short Description
def buildParser(self):
"""
Builds the Interface's argument parser
"""
# Add the arguments to self.parser (argparse.ArgumentParser type)
# use to keep values of arguments saved between commands at runtime.
self.parser.set_defaults(MEMORY={}) # dict: {[argument dest name] : [default value]}.
def __init__(self):
"""
Interface Constructor
"""
super(Interface, self).__init__(self.NAME, self.VERSION, description=self.DESCRIPTION)
def preprocessArguments(self):
"""
Prepocesses the arguments that were passed to the Interface
@Return whether or not the preprocessing was successful
"""
# Preprocess received arguments, stored in self.FLAGS (argparse namespace)
return super(Interface, self).preprocessArguments() # Return preprocessing result (bool)
def manageUnparsed(self, unparsed):
"""
Handles the arguments that couldn't be parsed by the Interface's arguments parser
@unparsed list of unparsed arguments
@Return whether or not the parsing was successful
"""
# Handle unparsed arguments (str list)
return super(Interface, self).manageUnparsed(unparsed) # Return parsing result (bool)
# Main Method
def execute(self):
"""
The main method of the Interface.
It's called whenever a shell command is entered or Interface.run() is called with argv.
@Return whether or not the execution was successful
"""
# Use self.FLAGS to access the parsed arguments (argparse namespace)
# Use self.input to access the given arguments (str list)
return True # Return execution result (bool)
def close(self):
"""
This method is called whenever the interface closes
@Return whether or not the execution was successful
"""
if __name__ == "__main__":
Interface().run(sys.argv)
|
[
"michaelkanon1@gmail.com"
] |
michaelkanon1@gmail.com
|
d47c3724879680967f10765f503c820e7982fb3f
|
714d4d2796e9b5771a1850a62c9ef818239f5e77
|
/components/metrics/DEPS
|
2f4d413d44817a460d2dc1304dd4027f1f530765
|
[
"BSD-3-Clause"
] |
permissive
|
CapOM/ChromiumGStreamerBackend
|
6c772341f815d62d4b3c4802df3920ffa815d52a
|
1dde005bd5d807839b5d45271e9f2699df5c54c9
|
refs/heads/master
| 2020-12-28T19:34:06.165451
| 2015-10-21T15:42:34
| 2015-10-23T11:00:45
| 45,056,006
| 2
| 0
| null | 2015-10-27T16:58:16
| 2015-10-27T16:58:16
| null |
UTF-8
|
Python
| false
| false
| 243
|
# This component is shared with the Chrome OS build, so it's important to limit
# dependencies to a minimal set.
include_rules = [
"-components",
"+components/compression",
"+components/metrics",
"+components/variations",
"-net",
]
|
[
"j.isorce@samsung.com"
] |
j.isorce@samsung.com
|
|
07e2550e41d1f8ee6112f46da821e1ab0852682c
|
01ab6c9aa8f877cef36160b65b959019cece62df
|
/FullCopy/src/utils.py
|
9612ea294f0921f8d8d9e06e5e2a96f012f57db2
|
[] |
no_license
|
kiscsonti/DPwithTorches
|
40f693c77dd38860037d671a07f51c10ab9de185
|
3892c8fcf1436711691c65d23f63da5372349a92
|
refs/heads/master
| 2020-03-12T00:06:11.593266
| 2018-05-19T09:26:08
| 2018-05-19T09:26:08
| 130,341,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,953
|
py
|
import os
import json
import string
import wikiwords
import unicodedata
import numpy as np
from collections import Counter
from nltk.corpus import stopwords
words = frozenset(stopwords.words('english'))
punc = frozenset(string.punctuation)
def is_stopword(w):
return w.lower() in words
def is_punc(c):
return c in punc
baseline = wikiwords.freq('the')
def get_idf(w):
return np.log(baseline / (wikiwords.freq(w.lower()) + 1e-10))
def load_data(path):
from doc import Example
data = []
for line in open(path, 'r', encoding='utf-8'):
if path.find('race') < 0 or np.random.random() < 0.6:
data.append(Example(json.loads(line)))
print('Load %d examples from %s...' % (len(data), path))
return data
class Dictionary(object):
NULL = '<NULL>'
UNK = '<UNK>'
START = 2
@staticmethod
def normalize(token):
return unicodedata.normalize('NFD', token)
def __init__(self):
self.tok2ind = {self.NULL: 0, self.UNK: 1}
self.ind2tok = {0: self.NULL, 1: self.UNK}
def __len__(self):
return len(self.tok2ind)
def __iter__(self):
return iter(self.tok2ind)
def __contains__(self, key):
if type(key) == int:
return key in self.ind2tok
elif type(key) == str:
return self.normalize(key) in self.tok2ind
def __getitem__(self, key):
if type(key) == int:
return self.ind2tok.get(key, self.UNK)
if type(key) == str:
return self.tok2ind.get(self.normalize(key),
self.tok2ind.get(self.UNK))
def __setitem__(self, key, item):
if type(key) == int and type(item) == str:
self.ind2tok[key] = item
elif type(key) == str and type(item) == int:
self.tok2ind[key] = item
else:
raise RuntimeError('Invalid (key, item) types.')
def add(self, token):
token = self.normalize(token)
if token not in self.tok2ind:
index = len(self.tok2ind)
self.tok2ind[token] = index
self.ind2tok[index] = token
def tokens(self):
"""Get dictionary tokens.
Return all the words indexed by this dictionary, except for special
tokens.
"""
tokens = [k for k in self.tok2ind.keys()
if k not in {'<NULL>', '<UNK>'}]
return tokens
vocab, pos_vocab, ner_vocab, rel_vocab, char_vocab = Dictionary(), Dictionary(), Dictionary(), Dictionary(), Dictionary()
def gen_race_vocab(data):
race_vocab = Dictionary()
build_vocab()
cnt = Counter()
for ex in data:
cnt += Counter(ex.passage.split())
cnt += Counter(ex.question.split())
cnt += Counter(ex.choice.split())
for key, val in cnt.most_common(30000):
if key not in vocab:
race_vocab.add(key)
print('Vocabulary size: %d' % len(race_vocab))
writer = open('./data/race_vocab', 'w', encoding='utf-8')
writer.write('\n'.join(race_vocab.tokens()))
writer.close()
def build_vocab(data=None):
global vocab, pos_vocab, ner_vocab, rel_vocab, char_vocab
# build word vocabulary
if os.path.exists('./data/vocab'):
print('Load vocabulary from ../data/vocab...')
for w in open('./data/vocab', encoding='utf-8'):
vocab.add(w.strip())
print('Vocabulary size: %d' % len(vocab))
else:
cnt = Counter()
for ex in data:
cnt += Counter(ex.passage.split())
cnt += Counter(ex.question.split())
cnt += Counter(ex.choice.split())
for key, val in cnt.most_common():
vocab.add(key)
print('Vocabulary size: %d' % len(vocab))
writer = open('./data/vocab', 'w', encoding='utf-8')
writer.write('\n'.join(vocab.tokens()))
writer.close()
# build part-of-speech vocabulary
if os.path.exists('./data/pos_vocab'):
print('Load pos vocabulary from ../data/pos_vocab...')
for w in open('./data/pos_vocab', encoding='utf-8'):
pos_vocab.add(w.strip())
print('POS vocabulary size: %d' % len(pos_vocab))
else:
cnt = Counter()
for ex in data:
cnt += Counter(ex.d_pos)
cnt += Counter(ex.q_pos)
for key, val in cnt.most_common():
if key: pos_vocab.add(key)
print('POS vocabulary size: %d' % len(pos_vocab))
writer = open('./data/pos_vocab', 'w', encoding='utf-8')
writer.write('\n'.join(pos_vocab.tokens()))
writer.close()
# build named entity vocabulary
if os.path.exists('./data/ner_vocab'):
print('Load ner vocabulary from ../data/ner_vocab...')
for w in open('./data/ner_vocab', encoding='utf-8'):
ner_vocab.add(w.strip())
print('NER vocabulary size: %d' % len(ner_vocab))
else:
cnt = Counter()
for ex in data:
cnt += Counter(ex.d_ner)
for key, val in cnt.most_common():
if key: ner_vocab.add(key)
print('NER vocabulary size: %d' % len(ner_vocab))
writer = open('./data/ner_vocab', 'w', encoding='utf-8')
writer.write('\n'.join(ner_vocab.tokens()))
writer.close()
# Load conceptnet relation vocabulary
assert os.path.exists('./data/rel_vocab')
print('Load relation vocabulary from ../data/rel_vocab...')
for w in open('./data/rel_vocab', encoding='utf-8'):
rel_vocab.add(w.strip())
print('Rel vocabulary size: %d' % len(rel_vocab))
if os.path.exists('./data/char_vocab.txt'):
print('Load character vocabulary from ../data/char_vocab...')
with open("./data/char_vocab.txt", "r") as f:
for line in f.readlines():
char_vocab.add(line[:1])
print('Character vocabulary size: %d' % len(char_vocab))
else:
print("There is no character vocab file dudi, do something about it")
def gen_submission(data, prediction):
assert len(data) == len(prediction)
writer = open('out-%d.txt' % np.random.randint(10**18), 'w', encoding='utf-8')
for p, ex in zip(prediction, data):
p_id, q_id, c_id = ex.id.split('_')[-3:]
writer.write('%s,%s,%s,%f\n' % (p_id, q_id, c_id, p))
writer.close()
def gen_debug_file(data, prediction):
writer = open('./data/output.log', 'w', encoding='utf-8')
cur_pred, cur_choices = [], []
for i, ex in enumerate(data):
if i + 1 == len(data):
cur_pred.append(prediction[i])
cur_choices.append(ex.choice)
if (i > 0 and ex.id[:-1] != data[i - 1].id[:-1]) or (i + 1 == len(data)):
writer.write('Passage: %s\n' % data[i - 1].passage)
writer.write('Question: %s\n' % data[i - 1].question)
for idx, choice in enumerate(cur_choices):
writer.write('%s %f\n' % (choice, cur_pred[idx]))
writer.write('\n')
cur_pred, cur_choices = [], []
cur_pred.append(prediction[i])
cur_choices.append(ex.choice)
writer.close()
def gen_final_submission(data):
import glob
proba_list = []
for f in glob.glob('./out-*.txt'):
print('Process %s...' % f)
lines = open(f, 'r', encoding='utf-8').readlines()
lines = map(lambda s: s.strip(), lines)
lines = list(filter(lambda s: len(s) > 0, lines))
assert len(lines) == len(data)
proba_list.append(lines)
avg_proba, p_q_id = [], []
for i in range(len(data)):
cur_avg_p = np.average([float(p[i].split(',')[-1]) for p in proba_list])
cur_p_q_id = ','.join(data[i].id.split('_')[-3:-1])
if i == 0 or cur_p_q_id != p_q_id[-1]:
avg_proba.append([cur_avg_p])
p_q_id.append(cur_p_q_id)
else:
avg_proba[-1].append(cur_avg_p)
gen_debug_file(data, [p for sublist in avg_proba for p in sublist])
writer = open('answer.txt', 'w', encoding='utf-8')
assert len(avg_proba) == len(p_q_id)
cnt = 0
for probas, cur_p_q_id in zip(avg_proba, p_q_id):
cnt += 1
assert len(probas) > 1
pred_ans = np.argmax(probas)
writer.write('%s,%d' % (cur_p_q_id, pred_ans))
if cnt < len(p_q_id):
writer.write('\n')
writer.close()
os.system('zip final_output.zip answer.txt')
print('Please submit final_output.zip to codalab.')
def eval_based_on_outputs(path):
dev_data = load_data('../data/dev-data-processed.json')
label = [int(ex.label) for ex in dev_data]
gold, cur_gold = [], []
for i, ex in enumerate(dev_data):
if i + 1 == len(dev_data):
cur_gold.append(label[i])
if (i > 0 and ex.id[:-1] != dev_data[i - 1].id[:-1]) or (i + 1 == len(dev_data)):
gy = np.argmax(cur_gold)
gold.append(gy)
cur_gold = []
cur_gold.append(label[i])
prediction = [s.strip() for s in open(path, 'r', encoding='utf-8').readlines() if len(s.strip()) > 0]
prediction = [int(s.split(',')[-1]) for s in prediction]
assert len(prediction) == len(gold)
acc = sum([int(p == g) for p, g in zip(prediction, gold)]) / len(gold)
print('Accuracy on dev_data: %f' % acc)
def text_to_char_index(text):
indexed = []
for char in text:
indexed.append(char_vocab[char])
return indexed
def text_to_grams(text, length=5):
partials = []
if len(text) < length:
partials.append(text)
else:
for i in range(length, len(text)):
partials.append(text[i-length:i])
return partials
if __name__ == '__main__':
# build_vocab()
trial_data = load_data('./data/trial-data-processed.json')
train_data = load_data('./data/train-data-processed.json')
dev_data = load_data('./data/dev-data-processed.json')
test_data = load_data('./data/test-data-processed.json')
build_vocab(trial_data + train_data + dev_data + test_data)
|
[
"kiscsonti@vipmail.hu"
] |
kiscsonti@vipmail.hu
|
8899018c3b57d2dc6e0f8fc1b71cb7428223e45c
|
b38abaa3b35f8c465be470d2240db515b460d469
|
/blog/admin.py
|
52f4623ff358530be5144a08ef1d4f2791309765
|
[] |
no_license
|
ninestep/mysite
|
fc44d12f0f2f69c802e83c829128f2a9420944cb
|
57c9a9ef3401f80aa1c07ae81dc7cd64185ec544
|
refs/heads/master
| 2022-07-18T06:09:33.870245
| 2022-06-26T00:44:36
| 2022-06-26T00:44:36
| 59,069,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
from django.contrib import admin
from . import models
from markdownx.admin import MarkdownxModelAdmin
# Register your models here.
admin.site.register(models.articles,MarkdownxModelAdmin)
admin.site.register(models.comments)
admin.site.register(models.system)
|
[
"859696354@qq.com"
] |
859696354@qq.com
|
881fdd4284165a6767a1d165b25cff1d89237f6f
|
469fc3043fc99969d16cee36d299f5944e21225d
|
/plugin.video.D17Replay/default.py
|
9d019f005f3994a8077d1205d57b10bc849a3f43
|
[] |
no_license
|
quatsch/JUL1EN094-xbmc-addons
|
313371d5a37569fa7d6db4bd866fc9d9779640c1
|
907671229ee018962d3a7c291cf8afe3dc0d959c
|
refs/heads/master
| 2021-01-18T11:38:27.451256
| 2014-04-14T17:23:50
| 2014-04-14T17:23:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,346
|
py
|
# -*- coding: utf-8 -*-
# xbmc modules
import xbmc
import xbmcplugin
import xbmcgui
import xbmcaddon
# os and lib modules
import os
import sys
import urllib
import urllib2
import re
# print_exc
from traceback import print_exc
# parseDOM
import CommonFunctions
common = CommonFunctions
common.plugin = "plugin.video.D17Replay"
__addonID__ = "plugin.video.D17Replay"
__author__ = "JUL1EN094"
__date__ = "01-02-2013"
__version__ = "1.0.6"
__credits__ = "Merci aux auteurs des autres addons replay du dépôt Passion-XBMC pour leur inspiration"
__addon__ = xbmcaddon.Addon( __addonID__ )
__settings__ = __addon__
__language__ = __addon__.getLocalizedString
__addonDir__ = __settings__.getAddonInfo( "path" )
# Global Variable
ROOTDIR = __settings__.getAddonInfo('path')
BASE_RESOURCE_PATH = os.path.join( ROOTDIR, "resources" )
MEDIA_PATH = os.path.join( BASE_RESOURCE_PATH, "media" )
ADDON_DATA = xbmc.translatePath( "special://profile/addon_data/%s/" % __addonID__ )
CACHEDIR = os.path.join( ADDON_DATA, "cache")
THUMB_CACHE_PATH = os.path.join( xbmc.translatePath( "special://profile/" ), "Thumbnails", "Video" )
WEBROOT = "http://www.d17.tv"
CANAL_VIDEOINFO_URL = "http://service.canal-plus.com/video/rest/getVideosLiees/"
FANART_PATH = os.path.join( ROOTDIR, "fanart.jpg" )
# List of directories to check at startup
dirCheckList = (CACHEDIR,)
class D17Replay:
"""
main plugin class
"""
debug_mode = False # Debug mode
def __init__( self, *args, **kwargs ):
print "==============================="
print " D17 Replay - Version: %s"%__version__
print "==============================="
print
self.set_debug_mode()
if self.debug_mode:
print "Python version:"
print sys.version_info
print "ROOTDIR: %s"%ROOTDIR
print "ADDON_DATA: %s"%ADDON_DATA
print "CACHEDIR: %s"%CACHEDIR
params = self.get_params()
url = None
name = None
mode = None
iconimage = None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
if self.debug_mode:
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
print "Iconimage: "+str(iconimage)
# Check if directories in user data exist
for i in range(len(dirCheckList)):
self.checkfolder(dirCheckList[i])
if mode==None or url==None or len(url)<1:
if self.debug_mode:
print "GET_CATEGORIES("+WEBROOT+")"
self.GET_CATEGORIES(WEBROOT)
self.clean_thumbnail(str(url))
xbmcplugin.setPluginCategory(handle=int(sys.argv[1]),category=__language__(30000))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==1:
if self.debug_mode:
print "GET_EMISSIONS_DIR : "+url
self.GET_EMISSIONS_DIR(url)
self.clean_thumbnail(str(url))
xbmcplugin.setPluginCategory(handle=int(sys.argv[1]),category=__language__(30000))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==2:
if self.debug_mode:
print "GET_EPISODES("+url+")"
self.GET_EPISODES(url,name)
self.clean_thumbnail(str(url))
xbmcplugin.setPluginCategory(handle=int(sys.argv[1]),category=__language__(30000))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==3:
if self.debug_mode:
print "PLAY_VIDEO"
print "vid :"+str(url)
video_url = self.GET_VIDEO_CANAL(str(url),'d17/')
item = xbmcgui.ListItem(path=video_url)
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=item)
def GET_CATEGORIES(self,url):
soup = self.get_soup(url)
html = soup.decode("iso-8859-1")
main_menu_s = common.parseDOM(html,"ul",attrs={"class":"main-menu"})
if main_menu_s :
main_menu = main_menu_s[0]
li_s = common.parseDOM(main_menu,"li")
for li in li_s :
links = re.findall(u"""<a href="(.*)">(.*)</a>""",li)
if links:
for anchor in links :
if self.debug_mode:
print "categorie : "+anchor[1].encode("utf-8")
self.addDir(anchor[1].encode("utf-8"),WEBROOT+(anchor[0].encode("utf-8")),1,"")
def GET_EMISSIONS_DIR(self,url,iconimage=''): # Olala mal de crâne!!
soup = self.get_soup(url)
html = soup.decode("iso-8859-1")
main_s = common.parseDOM(html,"div",attrs={"id":"main"})
if main_s :
main = main_s[0]
block_videos_s = common.parseDOM (main,"div",attrs={"class":"block-videos"})
for block in block_videos_s :
bvh_titles_s = common.parseDOM(block,"h3",attrs={"class":"bvh-title"})
for bvh in bvh_titles_s :
self.addDir(bvh.encode("utf-8"),url,2,"")
def GET_EPISODES(self,url,name):
xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
soup = self.get_soup(url)
html = soup.decode("iso-8859-1")
main_s = common.parseDOM(html,"div",attrs={"id":"main"})
if main_s :
main = main_s[0]
block_videos_s = common.parseDOM (main,"div",attrs={"class":"block-videos"})
for block in block_videos_s :
bvh_titles_s = common.parseDOM(block,"h3",attrs={"class":"bvh-title"})
for bvh in bvh_titles_s :
if bvh.encode("utf-8")==name :
Mylist = common.parseDOM(block,"ul",attrs={"class":"bv-list MYlist"})[0]
li_s = common.parseDOM(Mylist,"li")
for li in li_s :
episode_vid = common.parseDOM(li,"a",ret="href")[0]
episode_vid = str(re.findall("""\?vid=(.*)""",episode_vid)[0])
episode_name = common.parseDOM(li,"h4")[0].encode("utf-8")
episode_image = common.parseDOM(li,"img",ret="src")[0].encode("utf-8")
self.addLink(episode_name,episode_vid,3,episode_image)
def GET_VIDEO_CANAL(self,vid,canal):
soup = self.get_soup(CANAL_VIDEOINFO_URL+canal+vid)
xml = soup.decode("utf-8")
video_s = common.parseDOM(xml,"VIDEO")
for video in video_s :
id = common.parseDOM(video,'ID') [0]
if str(id) == str(vid) :
video_url = common.parseDOM(video,"HD")[0]
return video_url
def set_debug_mode(self):
debug =__settings__.getSetting('debug')
if debug == 'true':
self.debug_mode = True
else:
self.debug_mode = False
print "D17 Replay: debug Mode:"
print self.debug_mode
def addLink(self,name,url,mode,iconimage,info={},fanart=FANART_PATH):
u =sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
ok =True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('IsPlayable', 'true')
liz.setProperty( "Fanart_Image", fanart)
ok =xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz)
return ok
def addDir(self,name,url,mode,iconimage,info={},fanart=FANART_PATH):
if info == {} :
info = {"Title":name}
u =sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
ok =True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels=info )
liz.setProperty( "Fanart_Image", fanart)
ok =xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def get_params(self):
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def get_soup(self,url):
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 5.1; rv:15.0) Gecko/20100101 Firefox/15.0.1')
req.add_header('Referer',url)
soup = urllib2.urlopen(req).read()
if (self.debug_mode):
print str(soup)
return soup
def checkfolder(self,folder):
try:
if not os.path.exists(folder):
print "checkfolder Impossible to find the directory - trying to create the directory: "+folder
os.makedirs(folder)
except Exception, e:
print "Exception while creating folder "+folder
print str(e)
def clean_thumbnail(self,video_url):
try:
filename = xbmc.getCacheThumbName(video_url)
filepath = xbmc.translatePath(os.path.join(THUMB_CACHE_PATH,filename[0],filename))
if os.path.isfile(filepath):
os.remove(filepath)
if self.debug_mode:
print "Deleted %s thumb matching to %s video"%(filepath,video_url)
elif self.debug_mode:
print "No thumb found %s thumb matching to %s video"%(filepath,video_url)
return True
except:
print "Error: clean_thumbnail()"
print_exc()
return False
#######################################################################################################################
# BEGIN !
#######################################################################################################################
if ( __name__ == "__main__" ):
try:
D17Replay()
except:
print_exc()
|
[
"jujul1en094@gmail.com"
] |
jujul1en094@gmail.com
|
7fb4ea8ca62ee742cb03add25202bb3018bba0d6
|
8562adfbeb7cf901aeeaf004dc1e53c286a24d48
|
/beg86.py
|
ba49d5c28d0528682212047ecc0dd3986de5a4fc
|
[] |
no_license
|
sarureddi/isogram
|
1d4f8a7566a1df0f4a7b42502be60a1fafaabc10
|
3aca7e1172977cd116c0902761d70ded84402310
|
refs/heads/master
| 2020-06-03T11:03:43.392152
| 2019-06-12T09:54:11
| 2019-06-12T09:54:11
| 191,544,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
si1=str(input())
l=len(si1)
s=set(si1)
if(l==len(s)):
print("Yes")
else:
print("No")
|
[
"noreply@github.com"
] |
sarureddi.noreply@github.com
|
466e2b548dafa31a902439b94559d4cce8d115ec
|
051a5b30752d60b2f40c28c8440c1d59ff8d6f53
|
/lab2/01_linear_regression.py
|
a78eff81a9fb58946fd4e6547b48db366f720184
|
[] |
no_license
|
yungbyun/Study_Tensorflow
|
e20c0de76e820898600c28fec2da3a88502f8403
|
8e2bcd191fd670068aaabe9845146df90da88182
|
refs/heads/master
| 2021-01-17T08:44:13.813361
| 2017-03-18T06:37:23
| 2017-03-18T06:37:23
| 83,952,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
from __future__ import print_function
import tensorflow as tf
import matplotlib.pyplot as plot
x_data = [1, 2, 3]
y_data = [1, 2, 3]
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
hypothesis = W * x_data + b
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
a = tf.Variable(0.1)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
costs = []
weights = []
bs = []
for step in range(2001):
sess.run(train)
if step % 40 == 0:
val_c = sess.run(cost)
val_w = sess.run(W)
val_b = sess.run(b)
print(step, val_c, val_w, val_b)
costs.append(val_c)
weights.append(val_w)
bs.append(val_b)
print("Learning finished!")
plot.plot(costs, 'o-')
plot.xlabel('Step')
plot.ylabel('Error')
plot.show()
plot.plot(weights, 'o-')
plot.xlabel('Step')
plot.ylabel('Weight')
plot.show()
plot.plot(bs, 'o-')
plot.xlabel('Step')
plot.ylabel('Bias')
plot.show()
|
[
"byclink@gmail.com"
] |
byclink@gmail.com
|
ee88edd0ac690cc450f39f6384e744c016c895de
|
92ca965a167316bb531671d8e28c58bc1decb7e8
|
/rbac/middlewares/rbac.py
|
bd4b4ab5038583dbb78a7d0266946e3dafcbafa7
|
[] |
no_license
|
yaozhengjie/crm-1
|
b879a095af54d720ae6ab4b73efa7758b6760093
|
89d72631b6065cfb390a0d4fa0331c5da01a080e
|
refs/heads/master
| 2020-04-08T16:25:31.169742
| 2018-11-28T11:16:34
| 2018-11-28T11:16:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from django.utils.deprecation import MiddlewareMixin
from django.shortcuts import HttpResponse
import re
from django.conf import settings
class RbacMiddleware(MiddlewareMixin):
'''
1.获取当前用户的url
2.获取当前用户在session中的url权限列表
3.权限信息进行匹配
'''
def process_request(self, request):
'''
当用户请求刚进入时执行
:param request:
:return:
'''
# 获取当前用户的url
current_url = request.path_info
# 如果当前用户访问的url在白名单内则可以访问
for valid in settings.VALID_URL_LIST:
if re.match(valid, current_url):
return None
# print(current_url)
# 获取当前用户session中的存放的url
permission_list = request.session.get(settings.PERMISSION_SESSION_KEY)
# print('permission', permission_list)
# 如果没有session中不存在当前用户的信息则返回错误
if not permission_list:
return HttpResponse('未获取到用户信息,请登陆')
flag = False
# 循环session中的url,判断url是否与当前用户访问的url匹配,如果匹配则可以访问,匹配不成功则返回错误信息
for url in permission_list:
reg = '^%s$' % url
if re.match(reg, current_url):
flag = True
break
if not flag:
return HttpResponse('无权访问')
|
[
"41354304+yjiu1990@users.noreply.github.com"
] |
41354304+yjiu1990@users.noreply.github.com
|
191fb84e33cb5a9226de5e021a42e6bc6fb12eb0
|
48700c7222de631fc2ea4463abbb1b0b9aaebeec
|
/nodered-api-client-basic/get-json.py
|
b213550ee405012eb1ab0e95e123e110b401ad71
|
[] |
no_license
|
kopikaki/python_examples
|
cf4daf6d1ccac53e5910872b93f994e0c99c3de4
|
3395e9fe176014d404aaa9797f75e5c445805e55
|
refs/heads/master
| 2021-01-05T05:36:06.682296
| 2020-03-28T04:16:35
| 2020-03-28T04:16:35
| 240,898,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
import json
import requests
#这里用 HTTPS End Point 代替
nodeUrl = 'https://nr02.d1.zetez.com/node'
apiUrl = nodeUrl + '/data'
resp = requests.get(
apiUrl
)
if resp.status_code != 200:
# This means something went wrong.
print('HTTP Error: ' + resp.status_code)
else:
respJson = resp.json()
print('HTTP Response: '+json.dumps(respJson))
|
[
"jeffqu08@gmail.com"
] |
jeffqu08@gmail.com
|
bdef4180111df6d6c82feab386dc5b173629453f
|
40ac650d3eeec0e4951dcc21d9da1f09a11de9ff
|
/test_leetcode05.py
|
0d2e19f28eaefa8e65f730685d048adc6ea12beb
|
[] |
no_license
|
liuyufei-pia/BR
|
499a65ecd398cd259f5cb17d405d0b17c89a94e4
|
1861716f5dfca78a3c69ba56a827e225a4d9b800
|
refs/heads/master
| 2020-07-23T07:27:18.668754
| 2019-11-27T09:06:03
| 2019-11-27T09:06:03
| 207,485,295
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
def longestPalindrome(s: str) -> str:
# 马拉车算法
# 先在字符串中间加符号隔开,使得奇偶回文数的形式统一
# 然后用kmp的思想去优化中心扩散
if len(s) == 0:
return ""
s_new = '#' + '#'.join(s) + '#'
print(s_new)
# 已遍历的最大右边界
mx = 0
# 对应的中心点
mid = 0
l = len(s_new)
# 扩散半径数组,初始值1或者0都可以,只是代表刚开始的时候扩散半径是多少而已
p = [1] * l
for i in range(l):
if i < mx:
# 这个时候可以用已经计算过的值
# 不能超过已遍历的右边界
# i对应的镜像 = 2*mid - i
# 由mx定义可知半径最长不会超过mx-i
p[i] = min(mx - i, p[2 * mid - i])
# 主要的优化已经在上面节省了时间,接下来就是正常的扩散
while (i - p[i] >= 0 and i + p[i] < l and s_new[i - p[i]] == s_new[i + p[i]]):
p[i] += 1
# 记录一下mx和mid
if i + p[i] > mx:
mx = i + p[i]
mid = i
maxr = max(p)
ans = p.index(maxr)
# 因为跳出循环的时候多加了1,所以实际上的扩散半径应该减1
maxr -= 1
return s_new[ans - maxr:ans + maxr + 1].replace('#', "")
if __name__ == '__main__':
s = 'abcba'
print(longestPalindrome(s))
|
[
"q.we85273@163.com"
] |
q.we85273@163.com
|
2023b3467ceed0986d27fb4c617037077678dc8d
|
7d58be2bbd4fed35a604b3732eecd1013e255bb8
|
/modules/exit.py
|
2289d2e1231c089ac8eeae575e642a45e7e8261d
|
[] |
no_license
|
Tianchai/to-do-list
|
b02c645020a65a10e0b5d3716dd0fca32f8f6177
|
3485d1d7ce79226e78d78fc40f80b285db281640
|
refs/heads/master
| 2021-07-24T15:10:35.454499
| 2018-07-25T18:15:05
| 2018-07-25T18:15:05
| 142,311,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
from pyfiglet import Figlet
import sys
def exit(redis, style):
exit_msg = Figlet(font='slant')
print(exit_msg.renderText('Good Bye . . .'))
sys.exit()
|
[
"tianchai.riengviwat@gmail.com"
] |
tianchai.riengviwat@gmail.com
|
ef5014df5a01fb40ab483a448b2b532e3c791cd5
|
ca680d06086cef25a28bf7e5e7678b179bf99497
|
/14032020 1day 1commit.py
|
ed1a37ded5db15802c85235fb3da1fda7631d8d9
|
[] |
no_license
|
kierenmihaly/worldwebproject
|
660f09471c44c8db59bb49b16d41180026633df7
|
34578ffbac29a115bb731065c469f930831d28bd
|
refs/heads/master
| 2020-09-29T23:17:23.300891
| 2020-08-31T01:46:51
| 2020-08-31T01:46:51
| 227,146,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
#14032020
#파트3 조건문 - 파이썬 if else
SCISSOR = '가위'
ROCK = '바위'
PAPER = '보'
WIN = "win"
DRAW = 'draw'
LOSE = 'lose...'
mine = '가위'
yours = '바위'
if mine == yours:
result = DRAW
#짧은 if 와 else를 많이 쓰는방법 ))
else:
if mine == SICSSOR: #내가 낸게 가위
if yours == ROCK:
result = LOSE
else: #아니라면 이겼다
result = WIN
else: #가위가 아닌경우
if mine == ROCK:
if yours == PAPER:
result = LOSE
else:
result = WIN
else:
if mine == PAPER:
if yours == SCISSOR:
result = LOSE
else:
result = WIN
else:
print('weird')
print(result)
#elif
#else 와 if 블럭두개를 파이썬에서는 한개로 합칠 수 있다
#else 와 elif
#if의 조건이 맞지 않을 때 실행하는 코드
# else는 조건이 맞지 않을 경우 항상 실행되는 경우
#elif 는 조건이 맞지 않을 경우 다른조건을 검사하게 해주는 코드
|
[
"noreply@github.com"
] |
kierenmihaly.noreply@github.com
|
4868059941e7bf0fd7ddb81c0359474f6b1d0a89
|
4e522c82894fafbbd87997f39eff0e63b63df14c
|
/request.py
|
c2e4470b58b1a7f03f2ee0ebbb6e20513bc7bba2
|
[] |
no_license
|
tejas198606/wine-new
|
9a6be88190ce752394e970287682b0e83d15ccd7
|
aa79ed820ac59cc3fd374322a2d076a25b11cbd3
|
refs/heads/master
| 2022-12-04T10:21:44.235146
| 2020-08-30T06:39:49
| 2020-08-30T06:39:49
| 291,415,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
import requests
url = 'http://localhost:5000/predict_api'
r = requests.post(url,json={'fixedacidity':2.0000,'volatileacidity':6.0000,'citricacid':2.00000,'residualsugar':9.00000,'chlorides':6.00000,
'freesulfurdioxide':9.00000,'totalsulfurdioxide':6.00000,'density':20000,'pH':900000,
'sulphates':60000,'alcohol':60000})
print(r.json())
|
[
"noreply@github.com"
] |
tejas198606.noreply@github.com
|
d3b6e9f0e660a6ab3559ab5e2029a46b8e10bf27
|
255efb54075eb8cc2412bf1d5c936a97a003337e
|
/xt/environment/__init__.py
|
69338935f833cbdd1def7455667f8075e68b8eed
|
[
"MIT"
] |
permissive
|
jinqiuzhao/xingtian
|
914a4d48c62fd8b3d4ddd0479e9bab54bbe5cba7
|
95953dc6109c96e68dcdeb9755b3679ff51742d4
|
refs/heads/master
| 2023-06-06T06:20:28.815549
| 2021-07-02T10:00:42
| 2021-07-02T10:00:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Build environment module.
Do encapsulation for different simulations.
Unify the single and multi-agents.
"""
from __future__ import division, print_function
from xt.framework import Registers
def env_builder(env_name, env_info, **kwargs):
"""
Build the interface func for creating environment.
:param env_name:the name of environment
:param env_info: the config info of environment
:return:environment instance
"""
return Registers.env[env_name](env_info, **kwargs)
|
[
"hustqj@126.com"
] |
hustqj@126.com
|
46e48392571cf7b50609181560a7a5b5cfd54d72
|
1d665f40197ba89f756e862c0e62a889c42cddfb
|
/commission/migrations/0007_auto_20150407_2034.py
|
2b1be1c3a9965aa2314ab05057b9179433f0c7eb
|
[
"MIT"
] |
permissive
|
Ourinternet/website
|
8d9f9ddfe7d17fb0bb11b978cf3a7cd34af456ed
|
648203c0d0620da2d11b3b0e398ee218b5bef5df
|
refs/heads/master
| 2021-01-21T21:49:06.834576
| 2016-03-16T20:43:58
| 2016-03-16T20:43:58
| 15,683,988
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('commission', '0006_auto_20150407_1825'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='link',
field=models.CharField(max_length=1024, null=True, blank=True),
),
]
|
[
"csimpson@cigionline.org"
] |
csimpson@cigionline.org
|
3b2e792a01d05f90502f8647222c52e55e4095ee
|
ffc5257d66a581ed18d3ed024e263c2430f27cf3
|
/noi/noi/settings.py
|
0e261bf7f8a22230dfc1cd1d843e349b23424edd
|
[] |
no_license
|
ShadowLore/wow
|
e7456ff4702d94e522ff435c5893a4fa7b299e9a
|
d3e1a3d52d4ef2ae492910c2313e54fbfc37e54f
|
refs/heads/master
| 2023-08-20T02:56:14.059858
| 2021-10-22T13:44:57
| 2021-10-22T13:44:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,242
|
py
|
"""
Django settings for noi project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-m_$wkt$)!=2ism%()r62@r_&*4+4c@v_moyw5kz2yce&(ab_(w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'main',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'noi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'noi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"67012072+ShadowLore@users.noreply.github.com"
] |
67012072+ShadowLore@users.noreply.github.com
|
a62cffaf25c5e7ee992b973d0e3635e1296188ff
|
fbcb3c05e34e21573fc926282c9dbae1c0a36021
|
/Level 1/prison-labor-dodgers/solution.py
|
174f16bfac9f70e585ff1b24281b40dba58458ac
|
[] |
no_license
|
mattany/google-foobar
|
deb806f27505a98fed52c3eddf228dfa282ec0fa
|
33549bb6041fefcd0556de8583c5a7fca7d7508b
|
refs/heads/master
| 2023-01-03T19:57:46.159094
| 2020-11-01T00:03:22
| 2020-11-01T00:03:22
| 305,119,929
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
def solution(x, y):
sorted_x = sorted(x)
sorted_y = sorted(y)
for i in range(min(len(x), len(y))):
if sorted_x[i] != sorted_y[i]:
return min(sorted_x[i], sorted_y[i])
if len(x) > len(y):
return x[-1]
else:
return y[-1]
|
[
"mattany@gmali.com"
] |
mattany@gmali.com
|
bdc5fa0179d1b979bd63b233f5b2dcf76cf0b4a1
|
4676aae1f14170150782455b8c664a9fb462ba87
|
/lawbot/teledomain/util.py
|
3f5ba3e5d66c3fa8d541bb54717d1c8c7bd1c126
|
[] |
no_license
|
alages97/contract_translation
|
488fdae9bc237a205f7840229943c6bd08c622de
|
adcf2bf91667a9c77912b7695f986731f1b95957
|
refs/heads/master
| 2021-01-16T17:32:49.834527
| 2020-03-12T14:18:37
| 2020-03-12T14:18:37
| 243,198,277
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,204
|
py
|
import os
import sys
import time
import logging
#import win32api
import subprocess
import shutil
from pathlib import Path
import getpass
PATH_DIR = os.path.dirname(os.path.realpath(__file__))
PATH_DIR = r"%s" % PATH_DIR
OUTPUT_DIR = os.path.join(PATH_DIR, "./toTransfer/")
LOG_DIR = os.path.join(PATH_DIR, "./teleLogs/")
MOVE_DIR = os.path.join(PATH_DIR,"./testMoveDir/")
# Generate directories if not found
if not os.path.exists(MOVE_DIR):
os.mkdir(MOVE_DIR)
print("Made DIR %s" % MOVE_DIR)
logging.info('util: Made DIR %s' % MOVE_DIR)
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
print("Made DIR %s" % OUTPUT_DIR)
logging.info('util: Made DIR %s' % OUTPUT_DIR)
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
print("Made DIR %s" % LOG_DIR)
logging.info('util: Made DIR %s' % LOG_DIR)
def replaceMultiple(mainString, toBeReplaced, newString):
for elem in toBeReplaced:
if elem in mainString:
if elem in "<>-:":
newString =""
mainString = mainString.replace(elem,newString)
return mainString
def moveFolder(source,destination):
listsource = os.listdir(source)
print("Moving files: " + str(listsource))
for name in listsource:
if name == "System Volume Information":
continue
else :
logging.info('util: Moving file: %s' % name + ' to '+ destination)
#Use commandshell for windows, and moveFiles for linux
#CommandShell(OUTPUT_DIR + name,destination)
print(OUTPUT_DIR+name)
moveFiles(OUTPUT_DIR+name,destination+"/"+name)
def numOfDir(source):
d = os.listdir(source)
return len(d)
def removeFilesFromFolder():
folder = OUTPUT_DIR
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
logging.info('util: Removing file: %s' % file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
def removeFiles():
files = glob.glob(OUTPUT_DIR)
for f in files:
logging.info('util: Removing file: %s' % f)
os.remove(f)
def CommandShell(folder,destination):
folder = '"'+folder+'"'
destination = '"'+destination+'"'
subprocess.Popen(
[
r"C:\WINDOWS\system32\WindowsPowerShell\v1.0\powershell.exe",
"-ExecutionPolicy",
"Unrestricted",
("Move-Item -Path %s -Destination %s"% (folder,destination)),
]
)
def moveFiles(folder,destination):
#os.rename(folder,destination)
shutil.move(folder,destination)
#os.replace(folder,destination)
def SearchMasterDrive():
#following code for windows, comment out the below LINUX code when using windows
#WINDOWS
# drives = win32api.GetLogicalDriveStrings()
# drives = drives.split('\000')[:-1]
# for drive in drives:
# driveDetails = win32api.GetVolumeInformation(drive)
# driveName = driveDetails[0]
# if "MASTER" not in driveName:
# MOVE_DIR = os.path.join(PATH_DIR,"./testMoveDir/")
# if not os.path.exists(MOVE_DIR):
# os.makedirs(MOVE_DIR)
# logging.info('main: Could not find Master drive, moving files here instead: ' + MOVE_DIR)
# continue
# else:
# MOVE_DIR = drive
# print("Master drive found at %s " % (drive))
# break
# return MOVE_DIR
#LINUX
username = getpass.getuser()
masterPath = '/media/'+username+'/MASTER'
if not os.path.exists(masterPath):
MOVE_DIR = os.path.join(PATH_DIR,"./testMoveDir/")
if not os.path.exists(MOVE_DIR):
os.makedirs(MOVE_DIR)
logging.info('main: Could not find Master drive, moving files here instead: ' + MOVE_DIR)
else :
print("Master drive found at %s " % (masterPath))
MOVE_DIR = masterPath
return MOVE_DIR
|
[
"noreply@github.com"
] |
alages97.noreply@github.com
|
5913c16ac7eff4c10d1d7a3590760b8884e2bfc5
|
f857a029ca13d7bcfa957b75c9d73a39ef10703f
|
/Python Brasil/Estrutura sequencial/2.py
|
c10690064e6703b84eda9058318fc9cddd9c486a
|
[] |
no_license
|
Matheus-Morais/Atividades_treino
|
c011989de9cb1dd74bfae873f191e6af546a740f
|
6fceb1c39a23f992e0845e65e8a76eb53b6ff30d
|
refs/heads/master
| 2023-02-24T00:09:58.064600
| 2021-01-27T14:13:05
| 2021-01-27T14:13:05
| 333,433,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
numero = int(input('Digite um numero:'))
print(numero)
|
[
"matheus2992morais@gmail.com"
] |
matheus2992morais@gmail.com
|
447949c77b5e8715fdf2eafed6ecb92897e81cab
|
f75c0721ab885cec9d269bba798803197cc78787
|
/age_scraper.py
|
f6be723b7c0d9633c5a33100c38a1db7b697ddd3
|
[] |
no_license
|
shravan-shandilya/game-of-death
|
b635a51f327e5bb45d183262bb315eb61aa12418
|
59d45e053031ab9023d7da3d1538212aaace64df
|
refs/heads/master
| 2022-02-11T17:18:14.074438
| 2016-06-22T13:51:41
| 2016-06-22T13:51:41
| 53,967,559
| 1
| 0
| null | 2022-01-13T00:48:38
| 2016-03-15T18:09:21
|
CSS
|
UTF-8
|
Python
| false
| false
| 542
|
py
|
#!/usr/bin/python
from bs4 import BeautifulSoup
import requests
base_url = "http://gameofthrones.wikia.com/wiki/"
char_file = open("char_data.txt","r")
for char in char_file:
char = char.split(",")[0].replace(" ","_")
soup = BeautifulSoup(requests.get(base_url+char).content,"html.parser")
results = soup.find_all("div",{"class":"pi-item pi-data pi-item-spacing pi-border-color"})
for res in results:
try:
if res.h3.contents[0] == "Age":
print char,":",res.div.contents[0],"\n"
except AttributeError:
print char," missing"
|
[
"s.shravan95@gmail.com"
] |
s.shravan95@gmail.com
|
c36195265104ac0d70f7475b9cbc3d7d62808045
|
8ed85fda69449832e6edc1ed44694eda8d953e98
|
/ml/GestureRecognizer.py
|
d977e678e5da6740d1f21955df1f58ccdee4c26a
|
[] |
no_license
|
rajeevku02/exp
|
4bad7bb69c3c8a45a11a5136a55d0895349d2d23
|
518e8ddea9a0e0eed37065ce8d4304bd83ca282c
|
refs/heads/main
| 2023-09-04T16:56:02.083630
| 2021-11-24T09:20:47
| 2021-11-24T09:20:47
| 410,766,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
import numpy as np
from tensorflow import keras
from Gestures import *
from geometry import dist
from Util import log, pt
from drag_2_gesture import check_drag_2, deactivate_drag2
from drag_1_gesture import check_drag_1, deactivate_drag1
gestures_names = {
0: 'drag1',
1: 'drag2',
2: 'thumb',
3: 'pinch',
4: 'thumb_index',
5: 'open',
6: 'other'
}
class GestureRecognizer:
def __init__(self):
self.model = keras.models.load_model('models/trained_model')
self.drag1_gesture = Drag1Gesture()
self.drag2_gesture = Drag2Gesture()
self.thumb_gesture = ThumGesture()
self.pinch_gesture = PinchGesture()
self.noop_gesture = Gesture('noop')
def predict(self, landmarks):
arr = []
for item in landmarks:
arr.append(item.x)
arr.append(item.y)
arr.append(item.z)
out = self.model.predict(np.array(arr).reshape([1, -1]))
mx = np.argmax(out, axis=-1)
idx = int(mx[0])
#print(gestures_names[idx])
return idx
def get(self, landmarks):
idx = self.predict(landmarks)
pts = [pt(p) for p in landmarks]
ges = self.check_drag2(idx, pts)
if ges is not None:
deactivate_drag1()
return ges
ges = self.check_drag1(idx, pts)
if ges is not None:
return ges
ges = self.check_thumb(idx, pts)
if ges is not None:
return ges
ges = self.check_pinch(idx, pts)
if ges is not None:
return ges
return self.noop_gesture
def check_pinch(self, idx, pts):
if idx == 3:
return self.pinch_gesture
return None
def check_thumb(self, idx, pts):
if idx == 2:
return self.thumb_gesture
return None
def check_drag1(self, idx, pts):
if not (idx == 0 or idx == 4 or idx == 5):
deactivate_drag1()
return None
if check_drag_1(pts):
return self.drag1_gesture
return None
def check_drag2(self, idx, pts):
if not (idx == 1 or idx == 2 or idx == 4):
deactivate_drag2()
return None
if check_drag_2(pts):
return self.drag2_gesture
return None
|
[
"rajeevku02@gmail.com"
] |
rajeevku02@gmail.com
|
92378b9d2b6ae21a09ab5425517a89f70af2e4f6
|
e8503af6e8c8b7c10b93a76dcf0cbb141074361e
|
/pswa_django/pswa_django/urls.py
|
2bcd250b9cffc4ca636ab62a350aadf613f498e5
|
[] |
no_license
|
jjbyrne1/Project-Scheduler-Web-App
|
ea5e15ebe6627c1f619b6182bddd359362d7f67f
|
ef15fbb5853bda83dd2d11efeb6ae8625f5ba103
|
refs/heads/main
| 2023-04-21T02:36:16.726708
| 2021-05-13T18:09:25
| 2021-05-13T18:09:25
| 340,113,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
"""pswa_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("mainForm.urls")),
]
# Credit to https://stackoverflow.com/questions/5871730/how-to-upload-a-file-in-django
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
[
"jjbyrne@ksu.edu"
] |
jjbyrne@ksu.edu
|
044985b9b265586f2b071cc1296c5845a039b17d
|
56b7e5ed6941fc4b83148e00bd51421dc3ac993a
|
/Indeed/Expire Map.py
|
2b1778212c66da456e0bb6bd3e0defd2bbc1db77
|
[] |
no_license
|
samir-0711/Leetcode-Python
|
f960e15015a3f2fd88f723d7f9237945a7133553
|
d75876ae96bcd85c67bbfbf91bbc0f0bc773e97c
|
refs/heads/master
| 2022-12-18T05:27:48.224001
| 2020-09-30T21:03:42
| 2020-09-30T21:03:42
| 300,061,318
| 0
| 0
| null | 2020-09-30T20:59:42
| 2020-09-30T20:59:42
| null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
import time
class Data:
def __init__(self, value, duration):
self.value = value
self.duration = duration
self.startTime = int(round(time.time()))
class ExpireMap:
def __init__(self):
self.map = {}
def get(self, key):
data = self.map[key]
if data == None:
return None
currTime = int(round(time.time()))
if currTime - data.startTime <= data.duration:
return data.value
else:
del data
def set(self, key, value, duration):
data = Data(value, duration)
self.map[key] = data
test1 = ExpireMap()
test1.set(1, 5, 3)
time.sleep(2)
print test1.get(1)
time.sleep(2)
print test1.get(1)
|
[
"weng8916@gmail.com"
] |
weng8916@gmail.com
|
ab88b8234f344ef4469f84313c26e2edc8cec90b
|
d56a3ebea066bdd10e8f554be13be7260118ddad
|
/Server Code/server.py
|
d7e4a81da83d92f9398b9e34de9e6672797d1183
|
[
"MIT"
] |
permissive
|
Shanjiith-Pranov/AOGS-Code
|
20ce7d003f80521ff0d98c8c43a873539075a3c9
|
ed4c1b15a16fdb336da42eb838f83aaa16151b0d
|
refs/heads/main
| 2023-06-01T21:36:04.786653
| 2021-06-19T05:42:37
| 2021-06-19T05:42:37
| 378,325,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,497
|
py
|
import unittest
from datetime import datetime
from math import log, sin, cos, atan2, asin, degrees, radians, sqrt
import numpy
earth_radius = 6371 # kilometers
def haversine(point1, point2):
"""
Calculates the distance between two points on the earth.
haversine((52.2296756, 21.0122287), (52.406374, 16.9251681))
278.4581750754194
"""
lat1, lat2 = radians(point1[0]), radians(point2[0])
lon1, lon2 = radians(point1[1]), radians(point2[1])
delta_lat = lat2 - lat1
delta_lon = lon2 - lon1
a = (sin(delta_lat/2)*2) + (cos(lat1)*cos(lat2)*sin(delta_lon/2)*2)
c = 2*atan2(sqrt(a), sqrt(1-a))
distance = earth_radius * c
return distance
class SeismicStation:
"""
Class that creates the objects for a seismic station with a 'name', and
a set of gps coordinates, lat and lon (degrees)
"""
def _init_(self, name, coords: tuple):
self.name = name
self.coords = coords
self.latitude = coords[0]
self.longitude = coords[1]
self.events = list()
def add_event(self, event):
"""
Adds a single event to the events list.
"""
self.events.append(event)
return None
def _str_(self):
result = '{0.name} at {0.coords}'.format(self)
return result
def _repr_(self):
result = '{0.name}'.format(self)
return result
class StationEvent:
"""
An object pertaining to a single seismic event at a single seismic recording
station.
"""
def _init_(self, p_arrival_time, s_arrival_time, max_amplitude):
p_time, s_time = self.parse_station_time(p_arrival_time, s_arrival_time)
self.delta = s_time - p_time
self.delta_sec = self.delta.seconds
self.p_arrival_time = p_time
self.s_arrival_time = s_time
self.max_amplitude = max_amplitude
self.Vsp = self.wave_velocity()
self.dist_to_eq = self.calc_distance()
self.magnitude = self.calc_magnitude()
self.seismic_moment = self.calc_seismic_moment()
self.energy = self.calc_seismic_energy()
def _str_(self):
message = "{} | Tsp(s): {}, Amp(mm): {}"
return message.format(self.p_arrival_time, self.delta_sec, self.max_amplitude)
def _repr_(self):
message = "{} | Tsp(s): {}, Amp(mm): {}"
return message.format(self.p_arrival_time, self.delta_sec, self.max_amplitude)
def wave_velocity(self, VS=3.67, VP=6.34):
"""
Calculates the wave velocity based upon assumptions VS and VP.
VS = avg velocity of s-waves in CA crustal rocks (km/sec)
VP = avg velocity of p-waves in CA crustal rocks (km/sec)
"""
Vsp = (VS*VP) / (VP-VS)
return Vsp
def parse_station_time(self, p_time, s_time):
"""
parse_station_time("08:00:00", "08:00:49")
"""
p_time = datetime.strptime(p_time, "%H:%M:%S")
s_time = datetime.strptime(s_time, "%H:%M:%S")
return p_time, s_time
def calc_distance(self):
"""
Calculates the distance from the epicenter of the earthquake from
one seismic station. Using assumption of average velocity in California
crustal rocks for Vsp. (adaptable for location of stations or earthquake)
"""
self.dist_to_eq = float(self.delta_sec * self.Vsp)
return self.dist_to_eq
def calc_magnitude(self):
"""
Calculates the magnitude of the Earthquake on the Richter Scale.
source: http://crack.seismo.unr.edu/ftp/pub/louie/class/100/magnitude.html
"""
result = log(self.max_amplitude) + (3*log(8*self.delta_sec)-2.92)
self.magnitude = result
return self.magnitude
def calc_seismic_moment(self):
"""
Calculates the seismic moment (dyne-cm) of the earthquake based upon relationship
with Magnitude. source: https://goo.gl/lLpS9x
"""
result = 10 * ((3/2)(self.magnitude+16))
self.seismic_moment = result
return self.seismic_moment
def calc_seismic_energy(self, method='moment'):
"""
Calculates the amount of Energy (ergs) released by the earthquake, based on
either the magnitude or the seismic moment.
"""
if method == 'magnitude':
"""
E = 10 ^ (11.8 + (1.5 * Magnitude))
"""
result = 10 ** (11.8+(1.5*self.magnitude))
elif method == 'moment':
"""
E = Moment / 20,000
"""
result = self.seismic_moment / 20000
else:
print("Error, available methods are 'moment' or 'magnitude'.")
result = None
self.energy = result
return self.energy
def print_report(self):
"""
Prints out the results. :)
"""
message = 'The difference between p- and s-wave arrival times was: {} seconds.\
\nThe distance to the earthquake is {} kilometers.'
print(message.format(self.delta_sec, self.dist_to_eq))
class Earthquake:
"""
Compiles data from at least three seismic station events to determine
the epicenter of the earthquake.
"""
def _init_(self, *args):
self.station1 = args[0]
self.station2 = args[1]
self.station3 = args[2]
self.epicenter = Earthquake.calc_epicenter(self)
def calc_epicenter(self):
'''
Calculates the epicenter of the Earthquake with the following steps:
1. Gets the latitude (radians), longitude (radians), and radius (km) of each of the 3 seismic station events given
2. Converts the geodetic latitude and longitude to ECEF xyz coordinates.
3. Apply each X, Y, Z set of coordinates for each of the 3 points to it's own numpy array.
4. Individually calculate the X, Y, and Z coordinates of the epicenter.
5. Convert the ECEF xyz coordinates of the epicenter back to Geodetic Latitude and Longitude.
returns the location of the epicenter as a tuple (latitude, longitude)
'''
lat1 = radians(self.station1.coords[0])
lon1 = radians(self.station1.coords[1])
r1 = self.station1.events[0].dist_to_eq
lat2 = radians(self.station2.coords[0])
lon2 = radians(self.station2.coords[1])
r2 = self.station2.events[0].dist_to_eq
lat3 = radians(self.station3.coords[0])
lon3 = radians(self.station3.coords[1])
r3 = self.station3.events[0].dist_to_eq
x1 = earth_radius * (cos(lat1) * cos(lon1))
y1 = earth_radius * (cos(lat1) * sin(lon1))
z1 = earth_radius * (sin(lat1))
x2 = earth_radius * (cos(lat2) * cos(lon2))
y2 = earth_radius * (cos(lat2) * sin(lon2))
z2 = earth_radius * (sin(lat2))
x3 = earth_radius * (cos(lat3) * cos(lon3))
y3 = earth_radius * (cos(lat3) * sin(lon3))
z3 = earth_radius * (sin(lat3))
P1 = numpy.array([x1, y1, z1])
P2 = numpy.array([x2, y2, z2])
P3 = numpy.array([x3, y3, z3])
ex = (P2 - P1)/(numpy.linalg.norm(P2 - P1))
i = numpy.dot(ex, P3 - P1)
ey = (P3 - P1 - i*ex)/(numpy.linalg.norm(P3 - P1 - i*ex))
ez = numpy.cross(ex, ey)
d = float(numpy.linalg.norm(P2 - P1))
j = numpy.dot(ey, P3 - P1)
x = ((r1*2) - (r22) + (d*2)) / (2*d)
y = (((r1*2) - (r32) + (i2) + (j*2))/(2*j)) - ((i/j)*x)
z = sqrt(abs((r1*2) - (x2) - (y*2)))
tri_point = P1 + (x*ex) + (y*ey) + (z*ez)
lat = degrees(asin(tri_point[2] / earth_radius))
lon = degrees(atan2(tri_point[1], tri_point[0]))
epicenter = (lat, lon)
self.epicenter = epicenter
return self.epicenter
sensor1 = SeismicStation('sensor1', (40.8021, -124.1637))
sensor2 = SeismicStation('sensor2', (40.8324, -115.7631))
sensor3 = SeismicStation('sensor3', (36.1699, -115.1398))
event1 = StationEvent("00:00:00", "00:01:08", 250)
event2 = StationEvent("00:00:00", "00:01:14", 50)
event3 = StationEvent("00:00:00", "00:01:04", 100)
sensor1.add_event(event1)
sensor2.add_event(event2)
sensor3.add_event(event3)
eq=Earthquake(sensor1, sensor2, sensor3)
print("The epicenter of the earthquake is: " + str(eq.calc_epicenter()))
print("The magnitude of the eathquake is: " + str(eq.calc_magnitude()))
|
[
"62892238+Shanjiith-Pranov@users.noreply.github.com"
] |
62892238+Shanjiith-Pranov@users.noreply.github.com
|
ab9de07f610e712458e834dd574d3d92370c62d3
|
70b176a173825ba46a3688bb1f7a98046093f201
|
/SongGrapher.py
|
4261c18adf32c72e831e4c889b4fb8d22cbac5f7
|
[] |
no_license
|
ebovio/MusicMiner
|
b767871db4de47ff9e6411deecac1a5707ba68f5
|
8be7ceb9a31e24344b39b3c86ab03a84a4a9060d
|
refs/heads/master
| 2020-05-18T21:57:33.534620
| 2019-08-14T03:54:16
| 2019-08-14T03:54:16
| 184,677,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
import pymongo
import numpy as np
import matplotlib.pyplot as plt
dbPath = 'mongodb://localhost:27017/' #Direccion de la conexion
dbName = 'canciones' # Nombre de la BD
colName = 'lista_canciones' #Nombre de la coleccion
myclient = pymongo.MongoClient(dbPath)
mydb = myclient[dbName]
mycol = mydb[colName]
year = 1957
year_list = np.array([])
average_valence = np.array([])
valenceStats = {
}
while(year<=2018):
for x in mycol.find( {'year': str(year)} ):
if year not in valenceStats:
valenceStats[year] = np.array([])
valenceStats[year] = np.append(valenceStats[year], x['valence'])
else:
valenceStats[year] = np.append(valenceStats[year], x['valence'])
year_list = np.append(year_list,year)
year +=1
for i in year_list:
average_valence = np.append(average_valence, np.average(valenceStats[i]))
print(average_valence)
plt.plot(year_list,average_valence,'ro')
plt.xlabel('Año')
plt.ylabel('Valencia Promedio')
plt.show()
|
[
"e.bovio08@gmail.com"
] |
e.bovio08@gmail.com
|
4986a3100e08387b0cd05dec0ec98e374ed7f5c9
|
b3585d5d5379540a06b146c40dd50b424cc2aa6b
|
/leetCode/isSameTree.py
|
c899e335ee0dbf03fbfa752d2ad4d3ef741d4e58
|
[] |
no_license
|
shivamkaushik12007/practice
|
402e839512099a42bd94426a863f71f3e8d4156c
|
6689bc725d3bc58741b9bcb48cada4c276c4853f
|
refs/heads/master
| 2021-07-10T07:25:42.966500
| 2020-09-26T18:00:23
| 2020-09-26T18:00:23
| 200,655,807
| 1
| 2
| null | 2019-08-05T13:25:04
| 2019-08-05T12:58:24
| null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if(p==None and q==None):
return True;
if(p==None or q==None or p.val!=q.val):
return False
return self.isSameTree(p.left,q.left) and self.isSameTree(p.right,q.right)
|
[
"noreply@github.com"
] |
shivamkaushik12007.noreply@github.com
|
bee96e8e20e7141cc0a9bfd1c6a79a254632d4a3
|
92a1114f93ec0140fd9c60e93ecb39748dc5ac54
|
/Sample Preparation/gray2rgb.py
|
885e42beab92bb852002f4fefbb9de3ab70636e3
|
[
"Apache-2.0"
] |
permissive
|
A-new/ResNet-Packed-Malware-Detection
|
527f92341591421e4cc103ac5157d80f00885b0e
|
b7f9760285246c2ba4e3e6ce8a3b39c3ffbda52f
|
refs/heads/main
| 2023-06-29T17:18:16.753863
| 2021-07-30T07:06:35
| 2021-07-30T07:06:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
import numpy as np
import cv2
src = cv2.imread("目标路径与文件名", 0)
src_RGB = cv2.cvtColor(src, cv2.COLOR_GRAY2RGB)
cv2.imshow("2rgb", src_RGB)
cv2.imwrite("写入的路径与文件名", src_RGB)
cv2.waitKey(0)
|
[
"noreply@github.com"
] |
A-new.noreply@github.com
|
f8f461746d356c6b4f3a0fdabc67b71a89a74e00
|
1fcd563548cc6c54f40a9a7a8b5629db480aef7d
|
/database_handler.py
|
9ae295caff7f1c18e87e783de23e9ab5ff4b7b4f
|
[] |
no_license
|
benno0810/finance_data_scrapy
|
b350954f6e38033eb3a1be7b2114818ac0fdcca8
|
767d6231a382f5df241eaf58a1e51e7c1b696f82
|
refs/heads/main
| 2023-02-09T04:06:11.599816
| 2020-12-29T05:34:36
| 2020-12-29T05:34:36
| 319,832,871
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,446
|
py
|
import pymongo
import time
class DB():
def __init__(self,db_type='MongoDB',db_address='mongodb://localhost:27017/',db_name='db_test',table_name='col_test'):
self.db_address=db_address
self.db_type=db_type
self.db_name=db_name
self.table_name=table_name
def connect(self):
pass
def insert_one(self):
pass
def delete_one(self):
pass
def test_connection(self):
pass
class ProxyPool_DB(DB):
def __init__(self,db_type='MongoDB',db_address='mongodb://localhost:27017/',db_name='proxy_pool',table_name='proxy_col'):
super().__init__(db_type,db_address,db_name,table_name)
self.client = pymongo.MongoClient(self.db_address)
self.db=self.client[self.db_name]
self.col=self.db[self.table_name]
self.collist=self.db.list_collection_names()
if self.table_name in self.collist:
print('集合已存在,集合名{}'.format(self.table_name))
else:
line={
'ip_address':'127.0.0.1:30300',
'expires_time': time.time()
}
x=self.col.insert_one(line)
print(x)
def test_connection(self):
return True
def insert_one(self,line:dict):
super().insert_one()
if self.test_connection() and line.get('ip_address'):
if not line.get('expires_time'):
#若没有过期时间戳则设置过期时间戳为180秒+
line['expires_time']=time.time()+180
x=self.col.insert_one(line)
print(x)
def delete_many(self,myquery:dict):
x = self.col.delete_many(myquery)
print(x.deleted_count, "个文档已删除")
def delete_one(self,myquery:dict):
super().delete_one()
def find_many(self,myquery:dict):
x=self.col.find(myquery)
return x
def aggregate(self,myquery:list):
x=self.col.aggregate(myquery)
return x
if __name__=='__main__':
db_test = ProxyPool_DB()
line_test={
'ip_address':'127.0.0.1:30031',
'expires_time':time.time()-100
}
#db_test.insert_one(line_test)
myquery={
'ip_address':'127.0.0.1:30031'
}
myquery2={}
#=list(db_test.find_many(myquery2))
x=db_test.col.estimated_document_count()
print(x)
|
[
"benno0810@gmail.com"
] |
benno0810@gmail.com
|
d98e426c5ffa96200e49a63c91cbb1ac43216323
|
220e3fe31f00df908dc8d00c507400425f924cc3
|
/examples/multi_system/act6/unload.py
|
bf0fcc574b45c2f7fcf2d21c030c21e4aa89ff1f
|
[
"MIT"
] |
permissive
|
danielmitterdorfer/Thespian
|
3ed700d9fc6da35becfe801d3ab3bb68c86bddbc
|
f59439df8a6147b90ec31b44924d6a1b620f09d9
|
refs/heads/master
| 2021-01-19T05:06:33.005708
| 2017-07-31T04:44:03
| 2017-07-31T04:44:03
| 65,544,862
| 0
| 0
| null | 2016-08-12T10:22:29
| 2016-08-12T10:22:29
| null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from thespian.actors import ActorSystem, Actor, ValidateSource, ValidatedSource
import sys
portnum = int(sys.argv[1])
srchash = sys.argv[2]
asys = ActorSystem('multiprocTCPBase', {'Admin Port': portnum})
asys.unloadActorSource(srchash)
|
[
"kquick@godaddy.com"
] |
kquick@godaddy.com
|
98da7301ee8877e6ff6c1b20ba1b0043c82e30e9
|
a2db2ed8f6e982b4d2d1a743e824964ffa386148
|
/accounts/migrations/0022_auto_20171029_1555.py
|
0956576e79cb70e5a12af42bc44a580d25b2ef54
|
[] |
no_license
|
defydef/forum_board
|
ffae964dc9c877963dc1984a29fff15a9f424e53
|
41a46cb58fdc1757ed9329782aefa105849e9c32
|
refs/heads/master
| 2022-12-24T08:27:23.707497
| 2017-11-19T06:48:45
| 2017-11-19T06:48:45
| 111,266,814
| 0
| 0
| null | 2022-12-08T00:34:52
| 2017-11-19T05:20:05
|
Python
|
UTF-8
|
Python
| false
| false
| 760
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-29 04:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0021_auto_20171028_2111'),
]
operations = [
migrations.RemoveField(
model_name='newskill',
name='category',
),
migrations.RemoveField(
model_name='profile',
name='skillcategory',
),
migrations.AddField(
model_name='profile',
name='skill',
field=models.ManyToManyField(to='accounts.NewSkill'),
),
migrations.DeleteModel(
name='SkillCategory',
),
]
|
[
"devy.f.sihaloho@gmail.com"
] |
devy.f.sihaloho@gmail.com
|
6d9a899cc5415e40329693b80d3cc1bbf9759db2
|
a257bf65a2a1ba2c6841dd25c89d98c5672e4e57
|
/BackEnd/Semana22/DjangoRestFramework/DjangoRestFramework/wsgi.py
|
424593130b609b9f268eda5e5d98d2c974645dad
|
[] |
no_license
|
jorgegarba/CodiGo9
|
190cb67e3c7f9cbad271baf62657bda7ca03ec42
|
3b85c36a3ed8d2d5ee1d0fb6e8ca18599621fe47
|
refs/heads/master
| 2023-01-22T22:31:00.244982
| 2020-03-31T17:59:37
| 2020-03-31T17:59:37
| 211,982,487
| 6
| 5
| null | 2023-01-05T05:23:27
| 2019-10-01T00:21:25
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""
WSGI config for DjangoRestFramework project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoRestFramework.settings')
application = get_wsgi_application()
|
[
"ederiveroman@gmail.com"
] |
ederiveroman@gmail.com
|
6f9219124cdf28edd912b1cbde65e7ea17aece30
|
7b315bbe8c85ce05e6c51112e985ae1b392d83f5
|
/desafio_calcipv4/__init__.py
|
52688559769bae42e626eb8e42a779ae27f16e24
|
[] |
no_license
|
Cica013/aprendendoPython
|
e9f993b1b144e294a338a53f2bc36673d3cd00a6
|
9c964f2322e3d52b39a811aceec64b169bab4e10
|
refs/heads/main
| 2023-08-10T20:12:47.640239
| 2021-10-06T21:01:19
| 2021-10-06T21:01:19
| 385,755,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
from classes.calcipv4 import CalcIpv4
calc_ipv4 = CalcIpv4(ip='192.168.0.1', mascara='255.255.255.0')
|
[
"61808853+Cica013@users.noreply.github.com"
] |
61808853+Cica013@users.noreply.github.com
|
05b6a07425082a6af963320ba3ad7ce4ae2bf435
|
c885e60f9a86dc636b43bfd28e86162bd6d68806
|
/Students/xml与json数据之间的转换.py
|
f040c1ae36c13cd0a5c7c87322ba7c132fb33c45
|
[] |
no_license
|
zhaopengtian/requesttest
|
4eaa235293447cac39964ab383e77436cd70f81c
|
df3ca7a4ad4bd8b5cf67efbc9aff4ee7ad8242f8
|
refs/heads/master
| 2023-09-05T12:30:39.077101
| 2021-11-08T10:27:55
| 2021-11-08T10:27:55
| 424,950,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
#首先安装xmltodict,python3 -m pip install xmltodict
import xmltodict
import json
#定义XML转Json的函数
def xmltojson(xmlstr):
xmlparse = xmltodict.parse(xmlstr) #parse是XML解析器
jsonstr = json.dumps(xmlparse,indent=2,sort_keys=True)
return jsonstr
#定义Json转XML函数
def jsontoxml(jsonstr):
xmlstr = xmltodict.unparse(jsonstr)
return xmlstr
if __name__ == '__main__':
xmlinfo = """
<student>
<bokeid>fighter006</bokeid>
<bokeinfo>
<cnbologsname>laolu</cnbologsname>
<page>120</page>
</bokeinfo>
<data>
<address>http://www.baidu.com</address>
<title>python+dt+requests</title>
</data>
</student>
"""
aa = {
"student": {
"bokeid": "fighter006",
"bokeinfo": {
"cnbologsname": "laolu",
"page": "120"
},
"data": {
"address": "http://www.baidu.com",
"title": "python+dt+requests"
}
}
}
xtoj = xmltojson(xmlinfo)
print('XML转json:',xtoj)
jtox = jsontoxml(aa)
print('json转XML',jtox)
|
[
"chinaume@163.com"
] |
chinaume@163.com
|
a4df80ef0342700b0d72315bfaa9dafc12385988
|
87666a8920b134f2cd0815c9c127c4fa92e98b1b
|
/rover_project/test/test_reader_read_rover_starting_position.py
|
86d1b9dcfed5a8c3d37cd34b1327ffa5e5edf81b
|
[] |
no_license
|
gacrta/backend-rover-challenge
|
7f6d617eaa3528f3151af8ffdfedb33eb71162d7
|
1d3c690c908e485faeffde912aa73227b0490da4
|
refs/heads/master
| 2020-04-21T05:30:06.572240
| 2019-02-11T09:15:19
| 2019-02-11T09:15:19
| 169,342,584
| 0
| 0
| null | 2019-02-06T01:54:44
| 2019-02-06T01:54:44
| null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
from rover_project import reader
import unittest
class TestReaderReadRoverStartingPosition(unittest.TestCase):
""" Test Class for Reader.read_rover_starting_position
method at reader module.
"""
file_path = 'rover_project/tests/'
def test_reader_read_x_coord(self):
"""
Test if reader gets correct x_coord from
a file that contains 3 1 N information.
"""
filename = TestReaderReadRoverStartingPosition.file_path + "test_reader_rover_pos.txt"
with reader.Reader(filename) as r:
x_coord, y_coord, direction = r.read_rover_starting_position()
self.assertEqual(x_coord, 3)
def test_reader_read_y_coord(self):
"""
Test if reader gets correct y_coord from
a file that contains 3 1 N information.
"""
filename = TestReaderReadRoverStartingPosition.file_path + "test_reader_rover_pos.txt"
with reader.Reader(filename) as r:
x_coord, y_coord, direction = r.read_rover_starting_position()
self.assertEqual(y_coord, 1)
def test_reader_read_direction(self):
"""
Test if reader gets correct direction from
a file that contains 3 1 N information.
"""
filename = TestReaderReadRoverStartingPosition.file_path + "test_reader_rover_pos.txt"
with reader.Reader(filename) as r:
x_coord, y_coord, direction = r.read_rover_starting_position()
self.assertEqual(direction, 'N')
def test_reader_wrong_input(self):
"""
Test if reader avoids wrong input and don't
crash.
"""
filename = TestReaderReadRoverStartingPosition.file_path + "test_reader_wrong_input.txt"
with reader.Reader(filename) as r:
self.assertRaises(ValueError, r.read_upper_right_coordinates)
if __name__ == "__main__":
unittest.main(exit=False)
|
[
"gabrielcrta@gmail.com"
] |
gabrielcrta@gmail.com
|
00bde05cade22c41d0a35433fd6cb5452820be66
|
c8cf17465cfaf9858fe79de7d56841564226b67b
|
/Block.py
|
e4178e03c1e4acc98c64e32caf7b2e4c82c9f5db
|
[
"MIT"
] |
permissive
|
bullgom/Network-Project
|
29ebeb6699486ecb3528a05f91592b947c3488bd
|
a5a0ffad006c67c9ddbb769bb1d3c557c15d344d
|
refs/heads/master
| 2020-03-07T21:30:40.895642
| 2018-07-08T09:15:05
| 2018-07-08T09:15:05
| 127,728,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
import pygame as pyg
from GUI.base import BaseWidget
from Locals import *
class Block(BaseWidget):
def __init__(self, pos, size, name, id, imageDirectory, level, anchor=CENTER):
super().__init__(pos,size,anchor=anchor)
self.name = name
self.id = id
self.image = pyg.image.load(imageDirectory).convert_alpha()
self.image = pyg.transform.scale(self.image,size)
self.level = level #If level < 0 then lower than character
def render(self, surface):
surface.blit(self.image, self.as_rect())
|
[
"noreply@github.com"
] |
bullgom.noreply@github.com
|
aa196ae79a573731a31b45d1c19f8134b2e2a7bc
|
effa594367e5760dd2800a0a9707c2f26c3d4bd4
|
/connection.py
|
b767752708bdf4a89b2cd7e67f3026ae08556210
|
[
"MIT"
] |
permissive
|
diogocanut/blockchain-sniffer
|
5e2f5595e7e2f5e283c44c2cbcf4049996d5049d
|
8d14844ee2a508e1d5e931c515a27171832ae5cc
|
refs/heads/master
| 2023-03-28T17:10:53.027427
| 2021-04-03T18:11:02
| 2021-04-03T18:11:02
| 147,882,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,694
|
py
|
# Bitcoin P2P network transactions analyser
#
# This file is based on https://github.com/sebicas/bitcoin-sniffer by @sebicas
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import asyncore
import socket
import struct
import time
from StringIO import StringIO
from serializers import *
from event import Event
class Connection(asyncore.dispatcher):
messagemap = {
"version": msg_version,
"verack": msg_verack,
"addr": msg_addr,
"alert": msg_alert,
"inv": msg_inv,
"getdata": msg_getdata,
"getblocks": msg_getblocks,
"tx": msg_tx,
"block": msg_block,
"getaddr": msg_getaddr,
"ping": msg_ping
}
def __init__(self, host, database):
asyncore.dispatcher.__init__(self)
self.dstaddr = host[0]
self.dstport = host[1]
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = ""
self.recvbuf = ""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.event = Event(database)
vt = msg_version()
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print("\n Blockchain transactions analyzer")
print("Connection to peer: ", self.dstaddr)
try:
self.connect((self.dstaddr, self.dstport))
except:
self.handle_close()
def handle_connect(self):
print("Connection realized\n")
self.state = "connected"
def handle_close(self):
print("Ending connection")
self.state = "closed"
self.recvbuf = ""
self.sendbuf = ""
try:
self.close()
except:
pass
self.__init__
def handle_read(self):
try:
t = self.recv(8192)
except:
self.handle_close()
return
if len(t) == 0:
self.handle_close()
return
self.recvbuf += t
self.got_data()
def readable(self):
return True
def writable(self):
return (len(self.sendbuf) > 0)
def handle_write(self):
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != "\xf9\xbe\xb4\xd9":
raise ValueError("Got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 20:
return
command = self.recvbuf[4:16].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[16:20])[0]
checksum = None
if len(self.recvbuf) < 20 + msglen:
return
msg = self.recvbuf[20:20 + msglen]
self.recvbuf = self.recvbuf[20 + msglen:]
else:
if len(self.recvbuf) < 24:
return
command = self.recvbuf[4:16].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[16:20])[0]
checksum = self.recvbuf[20:24]
if len(self.recvbuf) < 24 + msglen:
return
msg = self.recvbuf[24:24 + msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("Bad checksum {}".format(repr(self.recvbuf)))
self.recvbuf = self.recvbuf[24 + msglen:]
if command in self.messagemap:
f = StringIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
print("Unknown command {}".format(command))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
command = message.command
data = message.serialize()
tmsg = "\xf9\xbe\xb4\xd9"
tmsg += command
tmsg += "\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if self.last_sent + 30 * 60 < time.time():
self.send_message(msg_ping())
if message.command == "version":
if message.nVersion >= 209:
self.send_message(msg_verack())
self.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
self.ver_recv = self.ver_send
elif message.command == "verack":
self.ver_recv = self.ver_send
elif message.command == "inv":
want = msg_getdata()
for i in message.inv:
if i.type == 1:
want.inv.append(i)
elif i.type == 2:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
elif message.command == "tx":
self.event.new_transaction(message.tx)
elif message.command == "block":
self.event.new_block(message.block)
|
[
"diogocanut@hotmail.com"
] |
diogocanut@hotmail.com
|
21aaffec3ed8892eaf0a660128ffde4513149715
|
5ae5026dcbaddf976fa925338fb07d498bcc7f11
|
/ncvoter/wsgi.py
|
f2d906e89e65828a66c8c05f414c5340e5a1110e
|
[
"MIT"
] |
permissive
|
calebsmith/voters-ingestor
|
de947aa0aa6218b077f71fed8acfa2ccc68590ea
|
175a1195ec5b7402bf952ed28cff110e504982bb
|
refs/heads/master
| 2021-07-23T08:14:05.005640
| 2017-11-02T23:40:54
| 2017-11-02T23:40:54
| 109,330,315
| 0
| 0
| null | 2017-11-02T23:42:16
| 2017-11-02T23:42:16
| null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for ncvoter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ncvoter.prod_settings")
application = get_wsgi_application()
|
[
"caleb.smithnc@gmail.com"
] |
caleb.smithnc@gmail.com
|
b891a21e50fd7f9a52706f2b802ad343cca4ea72
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/compute_management_client_enums.py
|
94796a92c7936618c37a51b7bf0ec2a9b37639ee
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class StorageAccountTypes(str, Enum):
standard_lrs = "Standard_LRS"
premium_lrs = "Premium_LRS"
class OperatingSystemTypes(str, Enum):
windows = "Windows"
linux = "Linux"
class DiskCreateOption(str, Enum):
empty = "Empty"
attach = "Attach"
from_image = "FromImage"
import_enum = "Import"
copy = "Copy"
restore = "Restore"
class SnapshotStorageAccountTypes(str, Enum):
standard_lrs = "Standard_LRS"
premium_lrs = "Premium_LRS"
standard_zrs = "Standard_ZRS"
class AccessLevel(str, Enum):
none = "None"
read = "Read"
|
[
"noreply@github.com"
] |
lmazuel.noreply@github.com
|
8707c6732a08d5007fe5f72f81bc5b3ae3317802
|
2a27d1c04b86fc32afea72cb4df12848f4a39078
|
/VNOI/pnumber.py
|
f8ec7a2caefa926ccafeffb8080595d84b7963b4
|
[] |
no_license
|
ngctnnnn/Competitive-Programming
|
9b68d3d30bdb8c0b258708b0c70005a037f2d01a
|
461d715720d4cdf88c0c79011c2aa873fb9e189c
|
refs/heads/main
| 2023-07-02T09:07:26.138500
| 2021-08-03T12:50:37
| 2021-08-03T12:50:37
| 321,263,350
| 4
| 0
| null | 2021-02-22T14:53:03
| 2020-12-14T07:11:07
|
C++
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
def prime(x):
if x == 2 or x == 3:
return True
elif x % 2 == 0 or x < 2:
return False
else:
for i in range(3, x, 2):
if i*i > x:
break
if x % i == 0:
return False
return True
x, y = map(int, input().split())
for i in range(x, y + 1):
if prime(i) is True:
print(i)
|
[
"noreply@github.com"
] |
ngctnnnn.noreply@github.com
|
0bebf2b16ff727c6ad6f1d7aca0f42970ec1dc48
|
bed559d18b0a9604e6d18879e1f3837d228d1440
|
/rx/backpressure/pausable.py
|
631ce64e952fd6f555f3e9866c6f605c96299a8e
|
[
"Apache-2.0"
] |
permissive
|
jesonjn/RxPY
|
a80b7a8f0a3a8a6ddcb7f3ed678d2f8411cad84e
|
9dfb62979f2c54b93bbb8c0ee5fa18cfae4d73d0
|
refs/heads/master
| 2020-12-29T00:25:17.866220
| 2014-11-15T10:24:05
| 2014-11-15T10:24:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,932
|
py
|
from six import add_metaclass
from rx import Observable
from rx.internal import ExtensionMethod
from rx.disposables import CompositeDisposable, Disposable
from rx.subjects import Subject
class PausableObservable(Observable):
def __init__(self, source, subject=None):
self.source = source
self.subject = subject or Subject()
self.is_paused = True
super(PausableObservable, self).__init__(self.subscribe)
def subscribe(self, observer):
conn = self.source.publish()
subscription = conn.subscribe(observer)
connection = [Disposable.empty()]
def on_next(b):
if b:
connection[0] = conn.connect()
else:
connection[0].dispose()
connection[0] = Disposable.empty()
pausable = self.subject.distinct_until_changed().subscribe(on_next)
return CompositeDisposable(subscription, connection[0], pausable)
def pause(self):
if self.is_paused:
return
self.is_paused = True
self.subject.on_next(False)
def resume(self):
if not self.is_paused:
return
self.is_paused = False
self.subject.on_next(True)
@add_metaclass(ExtensionMethod)
class ObservablePausable(Observable):
"""Uses a meta class to extend Observable with the methods in this class"""
def pausable(self, pauser):
"""Pauses the underlying observable sequence based upon the observable
sequence which yields True/False.
Example:
pauser = rx.Subject()
source = rx.Observable.interval(100).pausable(pauser)
Keyword parameters:
pauser -- {Observable} The observable sequence used to pause the
underlying sequence.
Returns the observable {Observable} sequence which is paused based upon
the pauser."""
return PausableObservable(self, pauser)
|
[
"dag@brattli.net"
] |
dag@brattli.net
|
eaff76abf8820739330402fe77b0aff5538045b0
|
cde14b5c9ed4fec317abfee4611af59b4967dbef
|
/team-separation/src/rooms.py
|
218974a62f6c80ca12b291093abf41c2617ff79d
|
[] |
no_license
|
hackohio/judging
|
de949e582b500b0fb0c9989ad85cad4c80645a3a
|
2adba3e2c11daa356ba0f2b3caa73e06860950ea
|
refs/heads/master
| 2020-09-01T08:47:14.179356
| 2019-11-03T12:21:45
| 2019-11-03T12:21:45
| 218,923,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
BALLROOM = 'Grand Ballroom (2nd floor)'
SENATE = 'Senate Chamber (2nd floor)'
TRADITIONS = 'Traditions Room(2nd Floor)'
CARTOON = 'Cartoon Room(3rd Floor)'
|
[
"kelly.wu.98@gmail.com"
] |
kelly.wu.98@gmail.com
|
5a00b73020577be86d6b7c9f68827501ec2be3eb
|
fafb5b817011892be9a824a4693bae58cebd5f04
|
/app/routes/auth/__init__.py
|
e3db8201ab12cfce7b750fd3cbb96e4e2952274a
|
[] |
no_license
|
vincentscode/Data-Studio
|
4c3f94a9d8bdf10bf08136637cb6c8ba162eeb0a
|
65d6b665a7b6ceef2ef388c96f6b6f6661fee2ce
|
refs/heads/master
| 2020-07-28T09:43:49.805922
| 2019-12-01T15:06:16
| 2019-12-01T15:06:16
| 209,384,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
from . import GoogleAuth
|
[
"vincentscode@gmail.com"
] |
vincentscode@gmail.com
|
d7147c0137ee9e0ad4bd9852404b8af452a36406
|
191e0df0aa9f2bb7e5a9da214e2ca73fd9f277e9
|
/src/apps/myapp/views.py
|
45899d4020acf1ee1d7e7f9ee2029db2d08c96db
|
[
"MIT"
] |
permissive
|
NewReStarter/Django_Form
|
720b76fd8bffacbf46ba96d31e0ea5f862658a7c
|
3a9c8b536f5750ed9490533cee64ca358020a265
|
refs/heads/master
| 2020-03-13T04:23:12.460130
| 2018-05-08T15:52:35
| 2018-05-08T15:52:35
| 130,961,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
from django.shortcuts import render
from django.views.generic.base import View
from .models import *
import json
class FormView(View):
def get(self, request):
categories = Category.objects.filter(status=1)
return render(request, "index.html", {'categories': categories})
def post(self, request):
data = []
check_list = {}
q_check_list = {}
for k, v in request.POST.items():
category = Category.objects.get(id=k.split('_')[0])
question = Question.objects.get(id=k.split('_')[1])
if check_list.__contains__(category.id):
if len(k.split('_')) == 3:
c_index = check_list[category.id]['count']
q_index = check_list[category.id]['question'][question.id]
data[c_index]['questions'][q_index]['answer'].append(v)
else:
data[check_list[category.id]['count']]['questions'].append({
'answer': [v],
'id': question.id,
'text': question.title,
'addtion_info': question.describe,
})
check_list[category.id]['question'][question.id] = len(check_list[category.id]['question'])
else:
data.append({
'id': category.id,
'text': category.text,
'questions': [{
'answer': [v],
'id': question.id,
'text': question.title,
'addtion_info': question.describe,
}],
})
check_list[category.id] = {
'count': len(data) - 1,
'question': {
question.id: 0
}
}
form_data = Form_data()
form_data.data = json.dumps(data)
form_data.create_time = datetime.now()
form_data.modify_time = datetime.now()
form_data.save()
categories = Category.objects.filter(status=1)
return render(request, "index.html", {'categories': categories})
|
[
"ziliugao@gmail.com"
] |
ziliugao@gmail.com
|
5db76e654df91d80b843b73dd410c5b47ee56eeb
|
57b6db85bd35ffa9c5ab8f38cf5bca5821b42d73
|
/reader_file_csv.py
|
72ee5a3064914def4e49534021923ec1bcff2107
|
[] |
no_license
|
Suryana009/PythonTutorial
|
2adb880f20dbfed64e9f8e7b2f9aa18f18bac1ad
|
858a5fe146cf9c6b82d89c236ba6c4524f1782fd
|
refs/heads/master
| 2020-03-09T22:06:21.236164
| 2018-04-23T06:17:57
| 2018-04-23T06:17:57
| 129,026,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
import csv
f = open('karyawan.csv', 'r')
reader = csv.reader(f)
for row in reader:
print row
f.close()
|
[
"suryana.ryan009@gmail.com"
] |
suryana.ryan009@gmail.com
|
e2053a52894b2dba4c8f3b3e5598d763d3246c1e
|
809b59be1161be7cf19a483ff1154fe2c8eda794
|
/loss_from_log.py
|
687abcf780272687c083b35408390555e7da9ff6
|
[] |
no_license
|
qzhao/train-CRF-RNN
|
d444bdc434424c20e98a68ca408a935cdb1b575c
|
9af2ce367e34f9d3d12df55701ad14757b908d58
|
refs/heads/master
| 2020-12-29T00:55:42.557635
| 2016-03-10T09:21:49
| 2016-03-10T09:21:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,642
|
py
|
#!/usr/bin/env python
# Martin Kersner, 2016/01/13
from __future__ import print_function
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
from utils import strstr
def main():
log_files = process_arguments(sys.argv)
train_iteration = []
train_loss = []
test_iteration = []
test_loss = []
test_accuracy = []
pixel_accuracy = []
mean_accuracy = []
mean_IU = []
frequency_weighted_IU = []
base_test_iter = 0
base_train_iter = 0
for log_file in log_files:
with open(log_file, 'rb') as f:
if len(train_iteration) != 0:
base_train_iter = train_iteration[-1]
base_test_iter = test_iteration[-1]
for line in f:
# TRAIN NET
if strstr(line, 'Iteration') and strstr(line, 'lr'):
matched = match_iteration(line)
train_iteration.append(int(matched.group(1))+base_train_iter)
elif strstr(line, 'Train net output'):
matched = match_loss(line)
train_loss.append(float(matched.group(1)))
elif strstr(line, 'pixel_accuracy'):
matched = re.search(r'pixel_accuracy: (.*)', line)
pixel_accuracy.append(float(matched.group(1)))
elif strstr(line, 'mean_accuracy'):
matched = re.search(r'mean_accuracy: (.*)', line)
mean_accuracy.append(float(matched.group(1)))
elif strstr(line, 'mean_IU'):
matched = re.search(r'mean_IU: (.*)', line)
mean_IU.append(float(matched.group(1)))
elif strstr(line, 'frequency_weighted'):
matched = re.search(r'frequency_weighted: (.*)', line)
frequency_weighted_IU.append(float(matched.group(1)))
# TEST NET
elif strstr(line, 'Testing net'):
matched = match_iteration(line)
test_iteration.append(int(matched.group(1))+base_test_iter)
elif strstr(line, 'Test net output'):
matched = match_loss(line)
if matched:
test_loss.append(float(matched.group(1)))
else:
matched = match_accuracy(line)
test_accuracy.append(float(matched.group(1)))
print("TRAIN", train_iteration, train_loss)
print("TEST", test_iteration, test_loss)
print("ACCURACY", test_iteration, test_accuracy)
# loss
plt.plot(train_iteration, train_loss, 'k', label='Train loss')
plt.plot(test_iteration, test_loss, 'r', label='Test loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Number of iterations')
plt.savefig('loss.png')
# evaluation
plt.clf()
plt.plot(range(len(pixel_accuracy)), pixel_accuracy, 'k', label='pixel accuracy')
plt.plot(range(len(mean_accuracy)), mean_accuracy, 'r', label='mean accuracy')
plt.plot(range(len(mean_IU)), mean_IU, 'g', label='mean IU')
plt.plot(range(len(frequency_weighted_IU)), frequency_weighted_IU, 'b', label='frequency weighted IU')
plt.legend(loc=0)
plt.savefig('evaluation.png')
def match_iteration(line):
return re.search(r'Iteration (.*),', line)
def match_loss(line):
return re.search(r'loss-ft = (.*) \(', line)
def match_accuracy(line):
return re.search(r'seg-accuracy = (.*)', line)
def process_arguments(argv):
print(argv)
if len(argv) < 2:
help()
log_files = argv[1:]
return log_files
def help():
print('Usage: python loss_from_log.py [LOG_FILE]+\n'
'LOG_FILE is text file containing log produced by caffe.'
'At least one LOG_FILE has to be specified.'
'Files has to be given in correct order (the oldest logs as the first ones).'
, file=sys.stderr)
exit()
if __name__ == '__main__':
main()
|
[
"m.kersner@gmail.com"
] |
m.kersner@gmail.com
|
cec8243c693159b82311f03b0f97f689b0252e68
|
8ed2700f29e669a05e324c23fc3cced361c25dd1
|
/cli/ceph/osd/crush.py
|
e2876d2eb9566dbd481491c580912e453ca9c57d
|
[
"MIT"
] |
permissive
|
red-hat-storage/cephci
|
179cdc8cc01f20bb80cb171800f04123ae8d6651
|
0691fbaf8fca2a9cd051c5049c83758c65301654
|
refs/heads/master
| 2023-08-31T15:19:00.375389
| 2023-08-31T14:43:30
| 2023-08-31T14:43:30
| 171,728,354
| 28
| 87
|
MIT
| 2023-09-14T18:59:33
| 2019-02-20T18:36:22
|
Python
|
UTF-8
|
Python
| false
| false
| 4,811
|
py
|
from cli import Cli
class Crush(Cli):
"""This module provides CLI interface to manage the Crush service."""
def __init__(self, nodes, base_cmd):
super(Crush, self).__init__(nodes)
self.base_cmd = f"{base_cmd} crush"
def rule(self, *Kargs):
"""
To create rules
Kargs:
Supported args
rule_type (str): create-simple | create-replicated |create-erasure
rule_name (str): name of the rule
root (str): root of the CRUSH hierarchy
failure_domain_type (str): failure domain (host/rack)
device_class (str): storage device class (hdd/sdd)
replicated (bool): if the rule is replicated or not
"""
cmd = f"{self.base_cmd} rule"
for arg in Kargs:
cmd += f" {arg}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def set_device_class(self, device_class, osd_id):
"""
To set device class to osd
Args:
device_class (str): device class (hdd/ssd)
osd_id (list): list of osd's
"""
cmd = f"{self.base_cmd} set-device-class {device_class}"
for _osd in osd_id:
cmd += f" {_osd}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def rm_device_class(self, device_class, osd_id):
"""
To remove device class to osd
Args:
device_class (str): device class (hdd/ssd)
osd_id (list): list of osd's
"""
cmd = f"{self.base_cmd} rm-device-class {device_class}"
for _osd in osd_id:
cmd += f" {_osd}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def rename_device_class(self, old_name, new_name):
"""
To rename device class
Args:
old_name (str): old class name
new_name (str): new class name
"""
cmd = f"{self.base_cmd} class rename {old_name} {new_name}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def ls_osd(self, device_class):
"""
To list all OSDs that belong to a particular class
Args:
device_class (str): device class (hdd/ssd)
"""
cmd = f"{self.base_cmd} class ls-osd {device_class}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def add_bucket(self, name, type):
"""
To add a bucket instance to CRUSH hierarchy
Args:
name (str): bucket name
type (str): type of bucket
"""
cmd = f"{self.base_cmd} add-bucket {name} {type}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def move(self, name, type):
"""
To move a bucket instance to a particular location in CRUSH hierarchy
Args:
name (str): bucket name
type (str): type of bucket
"""
cmd = f"{self.base_cmd} move {name} {type}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def add(self, osd, weight, bucket_details):
"""
To add an OSD to a CRUSH hierarchy
Args:
osd (str): osd id or name
weight (str): weight to be assigned
bucket_details (list): details of format {bucket-type}={bucket-name}
"""
cmd = f"{self.base_cmd} add {osd} {weight} "
cmd += " ".join(bucket_details)
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def remove(self, item):
"""
To remove an OSD from the CRUSH map of a running cluster
Args:
item (str): osd id or bucket name to be removed
"""
cmd = f"{self.base_cmd} remove {item}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def set(self, key, value):
"""
Set value to give key
Args:
key (str): Key to be updated
value (str): Value to be set to the key
"""
cmd = f"{self.base_cmd} set {key} {value}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
|
[
"pranavprakash20@gmail.com"
] |
pranavprakash20@gmail.com
|
79a7f455388690fa7a0287ab242b104b0be5b488
|
6233dfe18e53b55aef0c5eef9d6b59730f96dccb
|
/adminNotification/views.py
|
4583d1d1789130bfe01feecf28960ae530ccfaf0
|
[] |
no_license
|
omar74/HISIK_API2-master
|
6fe4f8380717f0a767409c62c1ffcfd060fddd4d
|
5d891bc9c7a31de8cdb0591a77d5fb1e0f759984
|
refs/heads/master
| 2020-12-15T15:38:33.103082
| 2020-01-20T18:48:18
| 2020-01-20T18:48:18
| 235,158,685
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
from django.shortcuts import render
from .models import NotificationAdmin
from .serializer import NotificationAdminSerializer
from rest_framework import generics
class AdminNotificationListView(generics.ListCreateAPIView):
permission_classes = []
authentication_classes = []
queryset = NotificationAdmin.objects.all()
serializer_class = NotificationAdminSerializer
def create(self, request, *args, **kwargs):
''' I wanted to do some stuff with serializer.data here '''
return super(AdminNotificationListView, self).create(request, *args, **kwargs)
class AdminNotficationDetailedView(generics.RetrieveUpdateDestroyAPIView):
permission_classes = []
authentication_classes = []
queryset = NotificationAdmin.objects.all()
serializer_class = NotificationAdminSerializer
lookup_field = 'type'
|
[
"omar.ashraf0597@gmail.com"
] |
omar.ashraf0597@gmail.com
|
3c9a07fa27647dc38716eb782a3a4838a70b2d17
|
1e182038f280285fa6a833b5aaf49591c707ad53
|
/ycombinator/encodings.py
|
deb67e51fe3528a3081ec9107a5ac4be87b9b944
|
[] |
no_license
|
turing-complet/samples
|
87e13e75ea1cb52503d0937cc32d02ad380909b9
|
87e1042cdf2d427def822a56a9701817b2f3fae8
|
refs/heads/master
| 2021-12-15T08:46:48.493475
| 2021-12-13T02:25:18
| 2021-12-13T02:25:18
| 235,974,764
| 0
| 0
| null | 2021-12-13T02:36:02
| 2020-01-24T09:36:55
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 109
|
py
|
class Numeral:
def __init__(self, n):
pass
class Bool:
def __init__(self, b):
pass
|
[
"jhagg314@gmail.com"
] |
jhagg314@gmail.com
|
992d0d89d6bb0e793cbf80caa45bb759cd343dba
|
58674e0ea4f0faa70892db30627fda006c3dc478
|
/Beginner/1060_positive_numbers.py
|
8e603faa7151ff993363d4fcdc22e248597d36bf
|
[] |
no_license
|
ShekhRaselMasrurAhmmadNissan/URI-Online-Judge
|
fe0f176987f63dc342d741de34c52b10edb3f6f6
|
1554d12a0338850ba1f07f401633390815e505b2
|
refs/heads/main
| 2023-02-23T10:36:42.935212
| 2021-01-25T14:21:26
| 2021-01-25T14:21:26
| 325,745,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
# Reading the Data...
numbers = list()
for serial_number in range(0, 6):
numbers.append(float(input()))
# Checking the conditions...
positive_number_count = 0
for number in numbers:
if (number >= 0):
positive_number_count += 1
print(f'{positive_number_count} valores positivos')
|
[
"shekhraselmasrurahmmadnissan@gmail.com"
] |
shekhraselmasrurahmmadnissan@gmail.com
|
79b21ab5c4ba6fadd6e18c4bc14248a109112bf2
|
e008b7ec16cbcffb5368bb1685d44b75c4019a44
|
/DeepVO/deepvo_net.py
|
bfb623602caa9d49fad86c837efe32080b09047b
|
[] |
no_license
|
akhanian/VisualOdometry
|
df02d03c031901f3b746e8e77a574a0f319f9acd
|
03bd99fa2312687cd67b159a20afa72ae15ba4c4
|
refs/heads/master
| 2023-01-06T08:41:24.802547
| 2020-11-11T05:10:29
| 2020-11-11T05:10:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
# -*- coding: utf-8 -*-
"""
Created by etayupanta at 6/30/2020 - 21:10
__author__ = 'Eduardo Tayupanta'
__email__ = 'eduardotayupanta@outlook.com'
"""
# Import Libraries:
from tensorflow import keras
from tensorflow.keras import layers
class DeepVONet(keras.Model):
def __init__(self):
super(DeepVONet, self).__init__()
self.reshape = keras.layers.Reshape((-1, 10 * 3 * 1024))
self.lstm1 = layers.LSTM(1000, dropout=0.5, return_sequences=True)
self.lstm2 = layers.LSTM(1000, dropout=0.5)
self.dropout = layers.Dropout(0.5)
self.out = layers.Dense(6)
def call(self, inputs, is_training=False):
x = self.reshape(inputs)
x = self.lstm1(x)
x = self.lstm2(x)
x = self.dropout(x, is_training)
x = self.out(x)
return x
|
[
"wetayupanta@gmail.com"
] |
wetayupanta@gmail.com
|
1cf78c07b6bdc205a1bca76933f89a3c6e6c2fd3
|
83e472f89c48a2793fa244f573a032bae80ba6bb
|
/Dragomir Robert-Simion/camera_app/blueprints/main/routes.py
|
965401d01a0abb990fe0e376f4b7a57201dcecfd
|
[] |
no_license
|
rodicadp/mobile-2020
|
992293d516a47cfe78a13b63fff7b1e9a4b475bd
|
9c3ceb82c1b4ec5b1e75af2a884c611990164e74
|
refs/heads/master
| 2020-12-27T19:47:27.515452
| 2020-02-03T19:13:21
| 2020-02-03T19:13:21
| 238,030,034
| 0
| 0
| null | 2020-02-03T18:12:56
| 2020-02-03T18:12:55
| null |
UTF-8
|
Python
| false
| false
| 2,700
|
py
|
import os
from flask_login import login_required
from flask import render_template, redirect, url_for, request, flash, Blueprint, session
from sqlalchemy import exc
from camera_app import db
from camera_app.blueprints.main.forms import Form_Photo
from camera_app.blueprints.main.models import Photo
main = Blueprint('main', __name__, template_folder='templates', static_folder='static', static_url_path='/static')
def iterate_pages(table):
return table.iter_pages(left_edge=2, right_edge=2, left_current=2, right_current=2)
def upload_photo(form_file):
if type(form_file) == str:
name = form_file
else:
name = form_file.filename
file_path = os.path.join(main.root_path, 'static', name)
form_file.save(file_path)
return name
@main.route("/edit_photo/<int:id>", methods=['GET', 'POST'])
@main.route('/add_photo', methods=['GET', 'POST'])
@login_required
def add_photo(id=None):
form = Form_Photo()
if id is not None: photo = Photo.query.get_or_404(id)
if request.method == 'GET':
form.process(request.args)
if id is not None:
form.description.data = photo.description
form.photo.data = photo.photo
if form.validate_on_submit():
try:
if id is not None:
photo.description = form.description.data
photo.photo = upload_photo(form.photo.data)
db.session.commit()
flash('Success!', 'success')
return redirect(url_for('main.photo', id=photo.id))
else:
row = Photo(description=form.description.data, photo=upload_photo(form.photo.data))
db.session.add(row)
db.session.commit()
return redirect(url_for('main.photos'))
flash('Success!', 'success')
except exc.IntegrityError as e:
flash(f'Error: {e}', 'danger')
return render_template('add_photo.html', title='Add a photo', form=form)
@main.route("/photo/<int:id>")
@login_required
def photo(id):
session['photo'] = id
return render_template('photo.html', photo=Photo.query.get_or_404(id))
@main.route("/delete_photo/<int:id>")
@login_required
def delete_photo(id):
db.session.delete(Photo.query.get_or_404(id))
db.session.commit()
return redirect(url_for('main.photos'))
@main.route('/', methods=['GET', 'POST'])
@main.route("/photos")
@login_required
def photos():
page = request.args.get('page', 1, type=int)
return render_template('photos.html', title='Photos', photos=Photo.query.paginate(per_page=5, page=page))
|
[
"noreply@github.com"
] |
rodicadp.noreply@github.com
|
1b1488f2e5ebd9c410f0123465c4c7e05c7126e8
|
c097eb64ab0305fb653bba74c616161a64b42a29
|
/carspy/convol_fcn.py
|
884fb80ada4dd43c536d8a786c54b3e392fe101e
|
[
"BSD-3-Clause"
] |
permissive
|
zhangxin19981016/carspy
|
f832a7a58dc1683506eefb6c4341c09cb5b95300
|
4c91138018b288635e1e608e7f8b0edd8950085b
|
refs/heads/main
| 2023-04-12T05:44:38.123167
| 2021-05-09T19:12:31
| 2021-05-09T19:12:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,439
|
py
|
"""Functions used in the convolution of CARS spectrum.
- Laser lineshape
- Impulse spectral response function (ISRF) for the spectrometer slit
"""
import numpy as np
def gaussian_line(w, w0, sigma):
"""Generate a normalized Gaussian lineshape (integral equals to 1).
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the Gaussian lineshape in wavenumber cm^(-1).
sigma : float
FWHM of the Gaussian lineshape wavenumber cm^(-1).
Returns
-------
1d array of floats
Intensities of the normalized Gaussian lineshape over w.
"""
_lineshape = 2/sigma*(np.log(2)/np.pi)**0.5*np.exp(
-4*np.log(2)*((w-w0)/sigma)**2)
return _lineshape
def lorentz_line(w, w0, sigma):
"""Generate a normalized Lorentzian lineshape (integral equals to 1).
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the Lorentzian lineshape in wavenumber cm^(-1).
sigma : float
FWHM of the Lorentzian lineshape wavenumber cm^(-1).
Returns
-------
1d array of floats
Intensities of the normalized Lorentzian lineshape over w.
"""
_lineshape = 1/np.pi*(sigma/2)/((w-w0)**2+sigma**2/4)
return _lineshape
def voigt_line(w, w0, sigma_V, sigma_L):
"""Generate an approximated Voigt lineshape following :cite:`Whiting:68`.
Parameters
----------
w : 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the Lorentzian lineshape in wavenumber cm^(-1).
sigma_V : float
FWHM of the Voigt lineshape wavenumber cm^(-1).
sigma_L : float
FWHM of the Lorentzian lineshape wavenumber cm^(-1).
Returns
-------
1d array
Intensities of the Voigt lineshape over w.
"""
# Preparations
_ratio = sigma_L/sigma_V
I_g = 1/(sigma_V*(1.065 + 0.447*_ratio + 0.058*_ratio**2))
_w = abs(w-w0)/sigma_V
# Building up the function
_term_1 = I_g*(1-_ratio)*np.exp(-2.772*_w**2) + _ratio/(1 + 4*_w**2)
_term_2 = 0.016*(1-_ratio)*_ratio*(np.exp(-0.4*_w**2.25)
- 10/(10 + _w**2.25))
return _term_1 + _term_2
def asym_Gaussian(w, w0, sigma, k, a_sigma, a_k, offset):
"""Asymmetric super-Gaussian following :cite:`Beirle:17`.
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the asymmetric Gaussian function in wavenumber cm^(-1).
sigma : float
FWHM of the Gaussian function in wavenumber cm^(-1).
k : float
Controls the skewing of the asymmetry.
a_sigma, a_k : float
Tuning factors for sigma and k.
offset : float
Background offset (from experimental spectrum).
Returns
-------
1d array of floats
Intensities of the peak-normalized asymmetric super-Gaussian over w.
"""
response_low = np.exp(-abs((w[w <= w0]-w0)/(sigma-a_sigma))**(k-a_k))
response_high = np.exp(-abs((w[w > w0]-w0)/(sigma+a_sigma))**(k+a_k))
response = np.append(response_low, response_high) + offset
return response/response.max()
def asym_Voigt(w, w0, sigma, k, a_sigma, a_k, sigma_L_l, sigma_L_h, offset):
"""Asymmetric super-Voigt.
.. note::
This is based on the super-Gaussian from :cite:`Beirle:17`, with
additional convolution with two Lorentzian profiles to better capture
slow-decaying wings in some experimental slit function
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the asymmetric Gaussian function in wavenumber cm^(-1).
sigma : float
FWHM of the Gaussian function in wavenumber cm^(-1).
k : float
Controls the skewing of the asymmetry.
a_sigma, a_k : float
Tuning factors for sigma and k.
sigma_L_l : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for the
lower half.
sigma_L_h : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for the
higher half.
offset : float
Background offset.
Returns
-------
1d array of floats
Intensities of the peak-normalized asymmetric super-Gaussian over w.
"""
response_low = np.exp(-abs((w-w0)/(sigma-a_sigma))**(k-a_k))
response_high = np.exp(-abs((w-w0)/(sigma+a_sigma))**(k+a_k))
response_low = np.convolve(response_low, lorentz_line(w, w0, sigma_L_l),
'same')
response_high = np.convolve(response_high, lorentz_line(w, w0, sigma_L_h),
'same')
response = np.append(response_low[np.where(w <= w0)],
response_high[np.where(w > w0)]) + offset
return response/response.max()
def asym_Voigt_deprecated(w, w0, sigma_V_l, sigma_V_h, sigma_L_l, sigma_L_h,
offset):
"""Asymmetric Voigt profile following NRC.
.. admonition:: Deprecated
:class: attention
This profile cannot capture certain slit functions with broadened
Gaussian profile.
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the asymmetric Gaussian function in wavenumber cm^(-1).
sigma_V_l : float
FWHM of the Voigt function in wavenumber cm^(-1) for the lower half.
sigma_V_h : float
FWHM of the Voigt function in wavenumber cm^(-1) for the higher half.
sigma_L_l : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for the
lower half.
sigma_L_h : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for the
higher half.
offset : float
Background offset.
Returns
-------
1d array of floats
Intensities of the peak-normalized asymmetric super-Gaussian over w.
"""
response_low = voigt_line(w[w <= w0], w0, sigma_V_l, sigma_L_l)
response_high = voigt_line(w[w > w0], w0, sigma_V_h, sigma_L_h)
response = (np.append(response_low/response_low.max(),
response_high/response_high.max()) +
offset)
return response/response.max()
def slit_ISRF(w, w0, param_1, param_2, param_3, param_4, param_5, param_6,
offset, mode='sGaussian'):
"""Impulse spectral response function (ISRF) as the slit function.
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the asymmetric Gaussian function in wavenumber cm^(-1).
param_1, param_2, param_3, param_4 : float
Parameters needed for the asymmetric ISRF depending on the mode.
- 'sGaussian':
sigma : float
FWHM of the Gaussian function in wavenumber cm^(-1).
k : float
Controls the skewing of the asymmetry.
a_sigma, a_k : float
Tuning factors for sigma and k.
- 'Voigt':
sigma_V_l : float
FWHM of the Voigt function in wavenumber cm^(-1) for
the lower half.
sigma_L_l : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for
the lower half.
sigma_V_h : float
FWHM of the Voigt function in wavenumber cm^(-1) for
the higher half.
sigma_L_h : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for
the higher half.
offset : float
Background offset.
mode : 'sGaussian', str, optional
Two options for the ISRF:
- Asymmetric super Gaussian: 'sGaussian'.
- Asymmetric Voigt: 'Voigt'.
Returns
-------
1d array of floats
Intensities of the peak-normalized asymmetric ISRF.
"""
slit_fc = []
if mode == 'sGaussian':
slit_fc = asym_Gaussian(w, w0, param_1, param_2, param_3,
param_4, offset)
elif mode == 'Voigt':
slit_fc = asym_Voigt(w, w0, param_1, param_2, param_3, param_4,
param_5, param_6, offset)
return slit_fc
|
[
"43315257+chuckedfromspace@users.noreply.github.com"
] |
43315257+chuckedfromspace@users.noreply.github.com
|
f075ed8bbec5b8e9a2aa280e5a35872d3244c077
|
f170a491f323a63665ccf39291ae2ad3fe8d626b
|
/privat_bank_test/wsgi.py
|
a1f6538b7d18f522fa3ec9e8b05ae0ac9fb13d3c
|
[] |
no_license
|
DmitryFleur/PrivatBankTest
|
098f1829a3c031f619ae82b8e498b827640dde5b
|
0ac2d605966735575b3fe498b92d20c352fdf458
|
refs/heads/master
| 2020-03-27T00:49:30.499822
| 2018-08-22T05:33:43
| 2018-08-22T05:33:43
| 145,660,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
WSGI config for privat_bank_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'privat_bank_test.settings')
application = get_wsgi_application()
|
[
"bDEFpm74!!"
] |
bDEFpm74!!
|
747a7cb3b08db83515a6456c6b9e5af1e2d0e703
|
c818b186c3e76f6d3c5edb8e2e30a04c2a1b99a9
|
/early-bird-impl/early_bird/wc_gc.py
|
48f2ab50c70b1d0126a8320643a5e33f7ebb09b6
|
[] |
no_license
|
imperialguy/algc
|
3048e31e16e19ea195797d4935111e8238244455
|
3d71210e6fd0e33249bfa461473da2fa79fff433
|
refs/heads/master
| 2021-03-24T09:32:59.492479
| 2017-05-05T21:52:25
| 2017-05-05T21:52:25
| 82,343,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,901
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 17 14:12:31 2016
@author: mcontrac
"""
import ConfigParser
import math
import numpy
import pandas
from helpers import setup_logging
from helpers import get_discount_amount
from helpers import get_dnb_scores
from helpers import get_sic_major_group
from helpers import round_down
from model_builder import GLMModel
# User inputs
user_duns_number = 608201141
user_division = 21
user_is_uslh = False
user_sic_code = '0111'
user_effective_date = pandas.datetime(2016, 9, 1)
user_total_projected_payroll = 10000000
user_estimated_clerical_payroll_ratio = 0.3
user_estimated_clerical_payroll = user_estimated_clerical_payroll_ratio * user_total_projected_payroll
user_estimated_non_clerical_payroll = user_total_projected_payroll - user_estimated_clerical_payroll
user_experience_mod = 0.97
input_data = pandas.DataFrame({'state': ['AK', 'CT', 'MD', 'KY', 'CA', 'CA', 'DE', 'AK'],
'class_code': ['6504', '4720', '2039', '6504', '8810', '6504', '0953', '9139'],
'payroll': [4000000, 500000, 1000000, 100000, 1000000, 200000, 200000, 0]})
input_history = pandas.DataFrame({'years_before': [1, 2, 3], 'ind_claim_count': [2, 2, 2], 'med_claim_count': [26, 19, 14]})
def read_rate_lookup(filename, is_uslh):
"""Reads the data from the rate_lookup.csv file into a pandas DataFrame
The rate_lookup.csv file should contain the columns called ``state``,
``class_code``, ``final_rate``, ``final_rate_uslh`` and ``clerical_ind``.
If the input division is 58-USLH, the ``final_rate`` column is dropped and
the ``final_rate_uslh`` column is renamed to final_rate.
Otherwise the ``final_rate_uslh`` column is dropped.
Args:
**is_uslh**: Boolean indicator whether the division is 58-USLH or not
Return:
A pandas DataFrame object with the state, class_code and final_rate
columns
"""
rate_lookup = pandas.read_csv(filename, index_col='lookup_key')
if is_uslh:
rate_lookup.drop('final_rate', axis=1, inplace=True)
rate_lookup.rename(columns={'final_rate_uslh': 'final_rate'}, inplace=True)
else:
rate_lookup.drop('final_rate_uslh', axis=1, inplace=True)
return rate_lookup
def read_discount_lookup(filename):
"""Reads the discount lookup data for the specifiec NCCI table number
Args:
**filename**: csv file from which to read the NCCI data
Return:
A pandas DataFrame containing the bucket as the index and the discount
rates for each bucket
"""
return pandas.read_csv(filename)
def read_state_rate_need_lookup(filename, division, effective_date, is_uslh):
"""Reads the fixed and variable rate need data for the input division and
effective date
The is_uslh indicator is only applicable to division 58. For all other
divisions, the indicator is assumed to be False regardless of input.
Args:
**filename**: csv file containing the state rate need data\n
**division**: The user input division\n
**effective_date**: The user input effective date\n
**is_uslh**: Boolean indicator for which division 58 rates to lookup
Return:
A pandas DataFrame with columns state, variable_rate_need,
fix_rate_need and indicated_loss_ratio
"""
state_rate_need = pandas.read_csv(filename, parse_dates=['effective_date', 'expiration_date'], infer_datetime_format=True)
def keep_row(index):
return (state_rate_need['division'][index] == division
and state_rate_need['effective_date'][index] <= effective_date <= state_rate_need['expiration_date'][index]
and state_rate_need['uslh_ind'][index] == is_uslh)
return state_rate_need.select(keep_row).drop(['division', 'uslh_ind', 'effective_date', 'expiration_date'], axis=1)
def read_wcng_loss_ratio_lookup(filename, division, is_uslh):
"""Reads the WCNG average loss ratio for the division by state
The is_uslh indicator is only applicable to division 58. For all other
divisions, the indicator is assumed to be False regardless of input.
Args:
**filename**: csv file containing the WCNG loss ratio data\n
**division**: The user input division\n
**is_uslh**: Boolean indicator for which division 58 rates to lookup
Return:
A pandas DataFrame with columns state and avg_wcng_loss_ratio
"""
wcng_loss_ratio = pandas.read_csv(filename)
def keep_row(index):
return (wcng_loss_ratio['division'][index] == division) and (wcng_loss_ratio['uslh_ind'][index] == is_uslh)
return wcng_loss_ratio.select(keep_row).drop(['division', 'uslh_ind'], axis=1)
def read_cdf(filename, state):
"""Reads the CDFs for prior three years
Args:
**filename**: csv file containing the CDF data\n
**state**: The state for which CDFs are to be read
Return:
A pandas DataFrame with columns ``prior_year`` and ``cdf``. Prior
year refers to number of years prior to current year.
"""
cdf_data = pandas.read_csv(filename)
cdf_data['inverse_cdf'] = 1 / cdf_data['cdf']
if state in cdf_data['state'].unique():
return cdf_data[cdf_data['state'] == state].drop('state', axis=1)
else:
return cdf_data[cdf_data['state'].isnull()].drop('state', axis=1)
def get_monopolistic_states():
"""Returns a list of state codes that are monopolistic states"""
return ['ND', 'OH', 'WA', 'WY']
def get_t9_states():
"""Returns a list of state codes that require T9 discount rates"""
return ['AZ', 'FL', 'IA', 'ID', 'MA', 'NJ']
def merge_rate_lookup(input_data, rate_lookup_table):
"""Merges the ``clerical_ind`` and ``class_rate`` from the rate lookup to
the input
The function also calculates the class premium ,non-clerical and clerical
payrolls for each input entry and also calculates the overall average
clerical and non-clerical rates for the input provided. The function also
adds the columns ``class_rate``, ``clerical_ind``, ``payroll_non_clerical``
and ``payroll_clerical`` columns to the input data.
Args:
**input_data**: The state, class code and payroll data input by the
user as a DataFrame\n
**rate_lookup_table**: The rates for calculating the class premium
percents from payroll
Return:
A dictionary containing the average clerical rate (``avg_clerical_rate``) and
the average non-clerical rate (``avg_non_clerical_rate``)
"""
input_data['class_rate'] = input_data.apply(lambda row: rate_lookup_table['final_rate'][row['lookup_key']], axis=1)
input_data['clerical_ind'] = input_data.apply(lambda row: rate_lookup_table['clerical_ind'][row['lookup_key']], axis=1)
input_data['class_premium'] = input_data['payroll'] * input_data['class_rate']
input_data['payroll_non_clerical'] = input_data['payroll'] * (1 - input_data['clerical_ind'])
input_data['payroll_clerical'] = input_data['payroll'] * input_data['clerical_ind']
avg_clerical_rate = sum(input_data['payroll_clerical'] * input_data['class_rate']) / input_data['payroll_clerical'].sum()
avg_non_clerical_rate = sum(input_data['payroll_non_clerical'] * input_data['class_rate']) / input_data['payroll_non_clerical'].sum()
return {'avg_clerical_rate': avg_clerical_rate, 'avg_non_clerical_rate': avg_non_clerical_rate}
def merge_wcng_lr_rate_need(payrolls, division, effective_date, is_uslh,
rate_need_file, wcng_lr_file):
"""Merges the payrolls data to the WCNG loss ratio and rate need data
Note that this function returns a separate DataFrame object instead of
merging inplace
Args:
**payrolls**: DataFrame containing the allocation ratio of each state\n
**division**: The user input division\n
**effective_date**: The user input effective date\n
**is_uslh**: Boolean indicator for which division 58 rates to lookup\n
**rate_need_file**: csv file containing the state rate need data\n
**wcng_lr_file**: csv file containing the WCNG loss ratio data
Return:
A pandas DataFrame with all columns from ``payrolls`` along with
``avg_wcng_loss_ratio``, ``variable_rate_need``, ``fix_rate_need`` and
``indicated_loss_ratio`` columns
"""
wcng_lr_data = read_wcng_loss_ratio_lookup(wcng_lr_file, division, is_uslh)
rate_need_data = read_state_rate_need_lookup(rate_need_file, division, effective_date, is_uslh)
return payrolls.merge(wcng_lr_data, how='left', on='state').merge(rate_need_data, how='left', on='state')
def calc_payroll_ratio(input_data):
"""Calculates the non-clerical and clerical payrolls for each state
The function modifies the input dataframe and calculates the non-clerical
payroll and clerical payroll columns for each row. It then calculates the
total non-clerical and clerical payroll for each state and returns that as
a DataFrame.
Args:
**input_data**: DataFrame containing the class premium, net, clerical
and non-clerical payrolls for each state and class code
Return:
A pandas DataFrame with total class premium, net, non-clerical and
clerical payrolls by state, and the ratio of non-clerical payroll for
each state where the clerical payroll is missing
"""
payrolls = input_data.groupby(by='state', as_index=False, sort=False).agg({'class_premium': 'sum',
'payroll': 'sum',
'payroll_non_clerical': 'sum',
'payroll_clerical': 'sum'})
payrolls['payroll_non_clerical_only'] = payrolls.apply(lambda row: row['payroll_non_clerical'] if row['payroll_clerical'] == 0 else 0,
axis=1)
total_non_clerical = payrolls['payroll_non_clerical_only'].sum()
payrolls['state_non_clerical_ratio'] = payrolls['payroll_non_clerical_only'] / total_non_clerical
payrolls.drop('payroll_non_clerical_only', axis=1, inplace=True)
return payrolls
def calc_allocate_clerical_payroll(payrolls, user_estimated_clerical_payroll):
"""Allocates the unentered clerical payroll to states based on non-clerical
payroll ratio
Uses the calculated non-clerical payroll ratio to allocate clerical payroll
that was not entered by the user based on the user entered total estimated
clerical payroll. The method modifies the payrolls DataFrame in place by
adding the ``allocated_clerical_payroll`` column
Args:
**payrolls**: DataFrame containing the allocation ratio of each state\n
**user_estimated_clerical_payroll**: User input total estimated
clerical payroll
"""
entered_clerical_payroll = payrolls['payroll_clerical'].sum()
clerical_payroll_to_be_allocated = max(0, user_estimated_clerical_payroll - entered_clerical_payroll)
payrolls['allocated_clerical_payroll'] = payrolls['state_non_clerical_ratio'] * clerical_payroll_to_be_allocated
def calc_clerical_class_premium(payrolls, rate_lookup_table):
"""Calculates the clerical class premium based on the allocated clerical
payroll
Determines the clerical rate to use from the rate table and calculates the
class premium for clerical payroll based on the allocated clerical payroll.
Modifies the payrolls DataFrame in place by adding the ``clerical_rate``
and ``allocated_clerical_class_premium`` columns
Args:
**payrolls**: DataFrame containing the allocated clerical payroll for
each state\n
**rate_lookup_table**: Table containing the rate for each state and
class code, with an boolean indicator for clerical vs non-clerical rate
"""
clerical_rates = rate_lookup_table.loc[rate_lookup_table['clerical_ind'] == 1].set_index('state')
payrolls['clerical_rate'] = payrolls['state'].map(clerical_rates['final_rate'])
payrolls['allocated_clerical_class_premium'] = payrolls['clerical_rate'] * payrolls['allocated_clerical_payroll']
def calc_standard_premium(payrolls, user_experience_mod):
"""Calculates the standard premium for each state
If a state is monopolistic, the experience mod is 1 else it is the user
input experience mod. Monopolistic states are determined by the
``get_monopolistic_states()`` function. Modifies the payrolls DataFrame
in place by adding the ``experience_mod``, ``standard_premium`` and
``standard_premium_ratio`` columns
Args:
**payrolls**: DataFrame containing the class premium by each state\n
**user_experience_mod**: User input experience mod factor
"""
monopolistic_states = get_monopolistic_states()
payrolls['experience_mod'] = payrolls.apply(lambda row: user_experience_mod if row['state'] not in monopolistic_states else 1, axis=1)
payrolls['standard_premium'] = payrolls['experience_mod'] * payrolls['class_premium']
total_standard_premium = payrolls['standard_premium'].sum()
payrolls['standard_premium_ratio'] = payrolls['standard_premium'] / total_standard_premium
def calc_missing_standard_premium(payrolls, avg_rates, user_experience_mod):
"""Returns the missing standard premiums to be allocated across the states
Args:
**payrolls**: DataFrame containing the clerical and non-clerical
payroll by state\n
**avg_rates**: Dictionary containing the average clerical and
non-clerical rates for input\n
**user_experience_mod**: User input experience mod factor
Return:
The total standard premium that is missing based on the inputs
"""
missing_clerical_payroll = max(0, user_estimated_clerical_payroll - payrolls['payroll_clerical'].sum())
missing_non_clerical_payroll = max(0, user_estimated_non_clerical_payroll - payrolls['payroll_non_clerical'].sum())
allocated_clerical_class_premium = payrolls['allocated_clerical_class_premium'].sum()
unknown_clerical_class_premium = (allocated_clerical_class_premium
if allocated_clerical_class_premium > 0
else avg_rates['avg_clerical_rate'] * missing_clerical_payroll)
unknown_non_clerical_class_premium = missing_non_clerical_payroll * avg_rates['avg_non_clerical_rate']
missing_clerical_standard_premium = unknown_clerical_class_premium * user_experience_mod
missing_non_clerical_standard_premium = unknown_non_clerical_class_premium * user_experience_mod
return missing_clerical_standard_premium + missing_non_clerical_standard_premium
def calc_allocated_standard_premium(payrolls, standard_premium_to_allocate):
"""Calcualtes the allocated the standard premiums for each state
Distributes the missing standard premium to each state based on the
standard premium ratio, and adds the calculated standard premium for the
state to get the final allocated standard premium for the state. The
function modifies the payrolls DataFrame in place by adding a
``allocated_standard_premium`` column
Args:
**payrolls**: DataFrame containing the standard premium value and ratio
for each state\n
**standard_premium_to_allocate**: The missing standard premium that
needs to be distributed among the states
"""
payrolls['allocated_standard_premium'] = (payrolls['standard_premium']
+ (payrolls['standard_premium_ratio'] * standard_premium_to_allocate))
def calc_premium_discount(payrolls, other_loadings, ncci_tier_files):
"""Calculates the premium discount to be applied to each state
Reads the discount tables for NCCI state groups (currently only 7 and 9)
and calculates the discount for each bucket within that group, totals it
and puts it as ``premium_discount`` column in the ``payrolls`` DataFrame.
The function also calculates the manual rate for each state as
``manual_rate`` column in the payrolls DataFrame.
Args:
**payrolls**: DataFrame containing the allocated standard premium for
each state\n
**other_loadings**: Other loadings factor for the rate calculations\n
**ncci_tier_files**: A dict containing the NCCI tier number as key, and
the filename as the value
"""
ncci_table7 = read_discount_lookup(ncci_tier_files[7])
ncci_table9 = read_discount_lookup(ncci_tier_files[9])
t9_states = get_t9_states()
def __discount_amount_helper(row):
if row['state'] in t9_states:
table = ncci_table9
else:
table = ncci_table7
return get_discount_amount(row['allocated_standard_premium'], table)
payrolls['premium_discount'] = payrolls.apply(__discount_amount_helper, axis=1)
payrolls['manual_rate_pre_model'] = (1 + other_loadings) * (payrolls['allocated_standard_premium'] - payrolls['premium_discount'])
payrolls['manual_rate'] = (1 + other_loadings) * (payrolls['standard_premium'] - payrolls['premium_discount'])
def calc_normalized_claim_counts(input_history, predom_state, aqi_data,
total_class_premium, cdf_file):
"""Calculates the normalized indemnity and medical claim counts and ratio
Uses the user input claim count history and the reference CDFs
to calculate the normalized claim counts for the last 3 years,
and calculates the indemnity to medical claim count ratio using
the credibilty and global average from AQI profitability studies.
Claim counts are calculated as 2 * claim count in prior year + claim counts
in two years before that. CDF adjusted premium is also calculated similarly.
Normalized claim counts are calculated by dividing the claim counts by the
CDF adjusted premium in millions. The indemnity to medical claim ratio is
calculated by adding the average respective claim frequency times the
credibility (as obtained from AQI profitability study) to the claim counts,
and then taking the ratio.
Args:
**input_history**: User input claim count history DataFrame\n
**predom_state**: State whose CDFs are used\n
**aqi_data**: A dictionary containing the keys ``credibility``,
``avg_indemenity_frequency_3yrs`` and ``avg_medical_frequency_3yrs``\n
**total_class_premium**: Class premium value to use to calculate
CDF adjusted premium\n
**cdf_file**: csv file containing the CDF data
Return:
A pandas DataFrame containing the ``indemnity_claim_count``,
``medical_claim_count``,``cdf_adjusted_premium``,
``norm_indemnity_claim_count``, ``norm_medical_claim_count``
and ``indemnity_medical_ratio`` as keys, with their corresponding values
"""
__calc_claim_count = lambda column: input_history[column].sum() + input_history[input_history['years_before'] == 1][column]
__norm_claim_count = lambda value, premium: value / (premium / 1000000)
credibility = aqi_data['credibility']
avg_indemnity_frequency_3yrs = aqi_data['avg_indemnity_frequency_3yrs']
avg_medical_frequency_3yrs = aqi_data['avg_medical_frequency_3yrs']
cdfs = read_cdf(cdf_file, predom_state)
cdfs['cdf_premium'] = cdfs['inverse_cdf'] * total_class_premium
cdf_premium_3yrs = cdfs['cdf_premium'].sum() + cdfs.loc[cdfs['prior_year'] == 1]['cdf_premium'].sum()
indemnity_claim_count = __calc_claim_count('ind_claim_count')
medical_claim_count = __calc_claim_count('med_claim_count')
norm_indemnity_claim_count = __norm_claim_count(indemnity_claim_count, cdf_premium_3yrs)
norm_medical_claim_count = __norm_claim_count(medical_claim_count, cdf_premium_3yrs)
indemnity_medical_ratio = ((indemnity_claim_count + (credibility * avg_indemnity_frequency_3yrs)) /
(medical_claim_count + (credibility * avg_medical_frequency_3yrs)))
return pandas.DataFrame.from_dict(data={'indemnity_claim_count': indemnity_claim_count,
'medical_claim_count': medical_claim_count,
'cdf_adjusted_premium': cdf_premium_3yrs,
'norm_indemnity_claim_count': norm_indemnity_claim_count,
'norm_medical_claim_count': norm_medical_claim_count,
'indemnity_medical_ratio': indemnity_medical_ratio
}, orient='columns')
def calc_entered_payroll_ratios(input_data):
"""Calculates the entered clerical and non-clerical payroll ratios
Entered clerical payroll ratio is defined as the clerical payroll entered
divided by the total projected payroll. Max is 1.
Entered non-clerical payroll ratio is defined as the non-clerical payroll
entered divided the non-clerical payroll estimated. The estimated non-clerical
payroll ratio is
``1 - max(entered_clerical_payroll_ratio, user_estimated_clerical_payroll_ratio)``
If this is 0, the entered non-clerical payroll ratio is 0. Otherwise, max is
1.
Args:
**input_data**: User input state, class code and payroll data after
clerical and non-clerical payrolls have been calculated
Return:
A dictionary containing the entered ratios with keys as ``clerical`` and
``non_clerical``
"""
entered_clerical_payroll_ratio = min(1, input_data['payroll_clerical'].sum() / user_total_projected_payroll)
estimated_non_clerical_payroll_ratio = 1 - max(entered_clerical_payroll_ratio, user_estimated_clerical_payroll_ratio)
if estimated_non_clerical_payroll_ratio > 0:
estimated_total_non_clerical_payroll = estimated_non_clerical_payroll_ratio * user_total_projected_payroll
entered_non_clerical_payroll_ratio = min(1, input_data['payroll_non_clerical'].sum() / estimated_total_non_clerical_payroll)
else:
entered_non_clerical_payroll_ratio = 0
return {'clerical': entered_clerical_payroll_ratio,
'non_clerical': entered_non_clerical_payroll_ratio}
def calc_diamond_bound_ratios(entered_clerical_payroll_ratio, entered_non_clerical_payroll_ratio,
bound_ratios_filename):
"""Calculates the upper and lower bound ratios for the diamond
Args:
**entered_clerical_payroll_ratio**: The ratio of clerical payroll to the
total payroll entered\n
**entered_non_clerical_payroll**: The ratio of non clerical payroll
entered to the non clerical payroll estimated\n
**bound_ratios_filename**: csv file containing the bound ratios for each
division
Return:
A tuple whose 0th element is the lower bound ratio, and 1st element
is the upper bound ratio. If ratios cannot be calculated, both are
``numpy.NaN``
"""
if 0.5 < entered_non_clerical_payroll_ratio < 1:
base_ratio = entered_non_clerical_payroll_ratio
elif 0.5 < entered_clerical_payroll_ratio < 1 and user_estimated_clerical_payroll_ratio == 1:
base_ratio = entered_clerical_payroll_ratio
else:
return (numpy.NaN, numpy.NaN)
bounds_base = (base_ratio - round_down(base_ratio, 1)) * 10
bound_ratios = pandas.read_csv(bound_ratios_filename)
bounds = bound_ratios.select(lambda ix: bound_ratios['ratio_lower_cap'][ix] < base_ratio <= bound_ratios['ratio_upper_cap'][ix]
).to_dict('records')[0]
return ((bounds_base * bounds['lower_bound_delta']) + bounds['lower_bound_ratio'],
(bounds_base * bounds['upper_bound_delta']) + bounds['upper_bound_ratio'])
def check_inputs(input_data, entered_ratios):
"""Checks whether inputs can be used by model for scoring
Args:
**input_data**: User input state, class code and payroll data after
clerical and non-clerical payrolls have been calculated\n
**entered_ratios**: The entered ratios dictionary returned by
``calc_entered_payroll_ratios(input_data)``
Return:
A tuple whose 0th element indicates whether inputs are usable or not,
and if not, the 1st element provides the reason
"""
if input_data['payroll'].sum() > (user_total_projected_payroll + 100):
return (False, 'Input payroll exceeds total projected payroll')
if input_data['payroll_clerical'].sum() > (user_total_projected_payroll * (user_estimated_clerical_payroll_ratio + 0.01)):
return (False, 'Clerical payroll entry exceeds total clerical payroll estimate')
estimated_non_clerical_payroll_ratio = 1 - max(entered_ratios['clerical'], user_estimated_clerical_payroll_ratio)
if input_data['payroll_non_clerical'].sum() > (user_total_projected_payroll * (estimated_non_clerical_payroll_ratio + 0.01)):
return (False, 'Non-clerical payroll entry exceeds total non-clerical payroll estimate')
if ((user_estimated_clerical_payroll_ratio == 1 and entered_ratios['clerical'] > 0.6) or
(user_estimated_clerical_payroll_ratio < 1 and entered_ratios['non_clerical'] > 0.6)):
return (True, '')
return (False, 'Not enough payroll data entered')
def run_model(model_inputs, model_coefficients_filename, rules_dict):
"""Runs the model based on the provided inputs
Builds a GLMModel object from the external coefficients, loads the rules
to convert apply the model coefficients based on the inputs and then runs
the model based on the inputs provided.
Args:
**model_inputs**: A dictionary or DataFrame containing the variables
required by the model as keys\n
**model_coefficients_filename**: Path to file containing the model
coefficients for the Worker's Comp GC model\n
**rules_dict**: Dictionary with lambda functions to derive the features
used by the model from the input variables
Return:
The predicted loss ratio for the account
"""
wc_gc_model = GLMModel(pandas.read_csv(model_coefficients_filename))
wc_gc_model.load_rules(rules_dict)
return math.exp(wc_gc_model.prep_data_and_score(model_inputs.iloc[0])[0])
def main_wc_gc_model():
config = ConfigParser.ConfigParser()
config.read('config/model_config.config')
app_log = setup_logging('wc_gc_logger', config.get('logger', 'log_file_name'))
app_log.info('Scoring DUNS number: %d' % user_duns_number)
rate_lookup_table = read_rate_lookup(config.get('data_files', 'rate_lookup'), user_is_uslh)
input_data['lookup_key'] = input_data['state'] + input_data['class_code']
avg_rates = merge_rate_lookup(input_data, rate_lookup_table)
entered_ratios = calc_entered_payroll_ratios(input_data)
inputs_valid, reason = check_inputs(input_data, entered_ratios)
if not inputs_valid:
return (numpy.NaN, numpy.NaN, numpy.NaN, reason)
payrolls = calc_payroll_ratio(input_data)
calc_allocate_clerical_payroll(payrolls, user_estimated_clerical_payroll)
calc_clerical_class_premium(payrolls, rate_lookup_table)
calc_standard_premium(payrolls, user_experience_mod)
standard_premium_to_allocate = calc_missing_standard_premium(payrolls, avg_rates, user_experience_mod)
calc_allocated_standard_premium(payrolls, standard_premium_to_allocate)
calc_premium_discount(payrolls, config.getfloat('constants', 'other_loadings'),
eval(config.get('data_files', 'ncci_tier_files')))
state_rate_data = merge_wcng_lr_rate_need(payrolls, user_division, user_effective_date, user_is_uslh,
config.get('data_files', 'state_rate_need_lookup'),
config.get('data_files', 'wcng_lr'))
credit_scores = get_dnb_scores(user_duns_number,
default_credit_score_pct=config.get('constants', 'default_duns_cs_pct'),
default_financial_score_pct=config.get('constants', 'default_duns_fs_pct'))
total_class_premium = input_data['class_premium'].sum()
predom_state = input_data.groupby(by='state')['class_premium'].sum().idxmax(axis=1)
model_inputs = calc_normalized_claim_counts(input_history, predom_state, eval(config.get('aqi', 'aqi_data')),
total_class_premium, config.get('data_files', 'cdf_file'))
model_inputs['credit_score_pct'] = credit_scores['credit_score_pct']
model_inputs['financial_score_pct'] = credit_scores['financial_score_pct']
model_inputs['payroll'] = user_total_projected_payroll
model_inputs['major_group'] = get_sic_major_group(user_sic_code)
predicted_lr = run_model(model_inputs, config.get('data_files', 'model_coefficients_file'),
eval(config.get('model_rules', 'rules')))
state_rate_data['target_pricing_deviation_factor'] = (((predicted_lr / state_rate_data['avg_wcng_loss_ratio'])
* state_rate_data['variable_rate_need'])
+ state_rate_data['fix_rate_need'])
state_rate_data['estimated_premium'] = state_rate_data['target_pricing_deviation_factor'] * state_rate_data['manual_rate_pre_model']
output_midpoint = state_rate_data['estimated_premium'].sum()
lower_ratio, upper_ratio = calc_diamond_bound_ratios(entered_ratios['clerical'], entered_ratios['non_clerical'],
config.get('data_files', 'bound_ratios'))
return (output_midpoint * lower_ratio, output_midpoint, output_midpoint * upper_ratio, '')
|
[
"ven.karri@aig.com"
] |
ven.karri@aig.com
|
cddab9580d9af9da3a18d635c9717ed2acc1f201
|
4bc2d855558ccb962991f997e9779919031687dd
|
/capstone/causalmodel/migrations/0001_initial.py
|
d9fe267a7a9b8e4c5697913127b312847c7b2554
|
[] |
no_license
|
jmblontoc/Likha-Capstone
|
80081e44b7ad6457eb776432e623c6db8b7a17e2
|
e1c32911b58cd1419c8e1a554ac32210456d201d
|
refs/heads/master
| 2022-12-10T03:26:32.946638
| 2018-12-09T04:33:10
| 2018-12-09T04:33:10
| 134,726,142
| 0
| 1
| null | 2022-11-25T23:52:42
| 2018-05-24T14:21:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
# Generated by Django 2.0.5 on 2018-06-27 15:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metric', models.CharField(max_length=255)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('threshold', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='RootCause',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.AddField(
model_name='datamap',
name='root_cause',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='causalmodel.RootCause'),
),
]
|
[
"37819032+jmblontoc@users.noreply.github.com"
] |
37819032+jmblontoc@users.noreply.github.com
|
3ad629c37259ce486878f28cf6844c6bc01b524f
|
bdb781b295f2c4fe570ff2db39b9bfe38cab6476
|
/example/auth0login/urls.py
|
68805a4c05ba7da12313edc66b0c5a93f436d96a
|
[] |
no_license
|
jangita/learn-django-auth0
|
c8386dc138e9706c9507c5472402b60cb119bc17
|
3cdf25a066409dd7acecf0308ed901fbc136fddb
|
refs/heads/master
| 2023-01-02T01:34:53.665904
| 2020-10-28T03:17:53
| 2020-10-28T03:17:53
| 308,088,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
urlpatterns = [
path('', views.index),
path('dashboard', views.dashboard),
path('logout', views.logout),
path('', include('django.contrib.auth.urls')),
path('', include('social_django.urls')),
]
|
[
"jangita.nyagudi@gmail.com"
] |
jangita.nyagudi@gmail.com
|
7557f544a64fd0f4ff99c1cbdade203205fdfb81
|
279967844e5b35f5d926f75f34d2a3e926819a52
|
/covid-19-timelapse/dashapps/term_frequency/utils.py
|
9e1c38043f6edbf626ced82cf315979851293bb5
|
[
"Apache-2.0"
] |
permissive
|
thehonduranjazzman/developer-platform
|
e22e62c27714e531fb87c77087aafb286221a797
|
ba3d8be69c78dc3ec189d0e1df045f5e7272341c
|
refs/heads/master
| 2022-05-23T18:27:27.935734
| 2020-04-22T08:54:13
| 2020-04-22T08:54:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,727
|
py
|
import collections
import json
import random
import re
from datetime import datetime
import fastavro
import nltk
import pandas as pd
import requests
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.util import ngrams
from .config import TERMS_TO_REMOVE
# nltk.download('stopwords')
# nltk.download('punkt')
def ngram_frequencies(n, articles, verbose=True, start_date=None, end_date=None):
"""
Generate NGram frequencies from an article dataframe
Args:
n (int): The size of the ngram
articles (pandas.DataFrame): Articles to process
verbose (bool): Whether or not to print some useful information while the process is running.
Returns:
Frequencies (dict): Dict containing ngram counts by day.
"""
if start_date:
articles = articles[articles['publication_datetime'] >= start_date]
if end_date:
articles = articles[articles['publication_datetime'] < end_date]
articles['publication_datetime'] = articles['publication_datetime'].dt.floor(
'D')
grouped_by_pub_date = articles.sort_values(
by='publication_datetime').groupby(['publication_datetime'])
if verbose:
print('Number of groups (days): {}'.format(
len(grouped_by_pub_date.groups)))
sw = set(stopwords.words('english'))
frequencies = {}
for i, group in enumerate(grouped_by_pub_date.groups):
articles = grouped_by_pub_date.get_group(group)
article_tokens = [word.lower() for text in articles['full_articles']
for word in word_tokenize(text)
if (not word in sw) and word.isalnum()]
ngrams_ = ngrams(article_tokens, n)
counted = collections.Counter(ngrams_)
most_common = {' '.join(list(k)): v for (
k, v) in counted.most_common(100)}
pub_date_str = datetime.strftime(group, '%Y-%m-%d')
#pub_date_str = datetime.strftime(group, '%#m/%d/%Y')
if group in frequencies.keys():
frequencies[pub_date_str].update(most_common)
else:
frequencies[pub_date_str] = {}
frequencies[pub_date_str].update(most_common)
if verbose:
if i > 0 and i % 5 == 0:
print('Processed {} groups.'.format(i))
return frequencies
def strip_split(value):
return value.strip(',').split(',')
def strip_commas(value):
return value.strip(',')
def clean_up_text(string):
if string:
return re.sub(r'[^A-Za-z0-9!?.,:;\' ]', ' ', string)
return ''
def process_datetimes(value):
return datetime.utcfromtimestamp(value / 1000)
def snapshot_files_to_dataframe(user_key, snapshot_id):
'''
Retrieve the files from a completed extraction
Args:
user_key: Snapshots API user key.
files: The file URI list retrieved from a completed snapshot job.
'''
headers = {
'content-type': 'application/json',
'user-key': user_key
}
article_dataframes = []
job_url = 'https://api.dowjones.com/alpha/extractions/documents/{}'.format(
snapshot_id)
files = requests.get(job_url, headers=headers).json()[
'data']['attributes']['files']
for f in files:
uri = f['uri']
file_name = uri.split('/')[-1]
if len(file_name) > 0:
file_response = requests.get(
uri, headers=headers, allow_redirects=True, stream=True)
file_response.raw.decode_content = True
records = fastavro.reader(file_response.raw)
records_df = pd.DataFrame(records)
article_dataframes.append(records_df)
data = pd.concat(article_dataframes, ignore_index=True)
return data
def reformat_dataframe(source_df):
"""
Reformat dataframe to use in the graph.
Args:
source_df: DataFrame to reformat
Returns:
New dataframe: reformatted dataframe
"""
new_df = pd.DataFrame(columns=['day', 'term', 'count'])
for i in range(len(source_df)):
for j in source_df.iloc[i].index:
new_df = new_df.append({
'day': source_df.iloc[i].name,
'term': str(j),
'count': source_df.iloc[i][j]
}, ignore_index=True)
return new_df
def generate_figure(source_df):
"""
Generate figure with a slider
Args:
source_df: Dataframe with data to use for the figure
Returns:
Figure dict containing necessary parameters to pass to go.Figure()
"""
# Define the figure
fig_dict = {
'data': [],
'layout': {},
'frames': []
}
days = []
for day in source_df['day']:
if day not in days:
days.append(day)
terms = []
for term in source_df['term']:
if term not in terms:
terms.append(term)
fig_dict['layout']['xaxis'] = {
'range': [source_df['day'].min(), source_df['day'].max()],
'title': 'Publication Date'
}
fig_dict['layout']['yaxis'] = {
'range': [0, 4000],
'title': 'Term Frequency'
}
fig_dict['layout']['title'] = 'COVID-19 - Term Evolution'
fig_dict['layout']['hovermode'] = 'x'
fig_dict['layout']['sliders'] = {
'args': [
'transition', {
'duration': 0,
'easing': 'linear'
}
],
'initialValue': days[0],
'plotlycommand': 'animate',
'values': days,
'visible': True
}
sliders_dict = {
'active': 0,
'yanchor': 'top',
'xanchor': 'left',
'currentvalue': {
'font': {
'size': 12
},
'visible': True,
'xanchor': 'right'
},
'transition': {
'duration': 0,
'easing': 'linear'
},
'pad': {
'b': 10,
't': 50
},
'len': 1.0,
'steps': []
}
# Generate the first point in the display
day_1 = days[0]
for term in terms:
dataset_by_day = source_df[source_df['day'] == day_1]
dataset_by_day_and_term = dataset_by_day[dataset_by_day['term'] == term]
data_dict = {
'x': list(dataset_by_day_and_term['day']),
'y': list(dataset_by_day_and_term['count']),
'mode': 'lines',
'text': list(dataset_by_day_and_term['term']),
'name': term,
'line': {
'width': 3
},
'showlegend': True
}
fig_dict['data'].append(data_dict)
all_x = []
# Create frames
for i, day in enumerate(days):
all_x.append(day)
frame = {'data': [], 'name': str(day)}
for term in terms:
dataset_by_day = source_df[source_df['day'] == day]
dataset_by_day_and_term = dataset_by_day[dataset_by_day['term'] == term]
all_counts = list(source_df[source_df['term'] == term]['count'])
if i == 0:
all_y = [all_counts[i]]
else:
all_y = all_counts[:i+1]
data_dict = {
'x': all_x,
'y': all_y,
'mode': 'lines',
'text': list(dataset_by_day_and_term['term']),
'name': term,
'line': {
# 'color': term_color_dict[term]
'width': 3
},
'showlegend': True
}
frame['data'].append(data_dict)
fig_dict['frames'].append(frame)
slider_step = {
'args': [
[day],
{
'frame': {
'duration': 0,
'redraw': False
},
'mode': 'immediate',
'transition': {
'duration': 0
}
}
],
'label': day,
'method': 'animate'
}
sliders_dict['steps'].append(slider_step)
fig_dict['layout']['sliders'] = [sliders_dict]
return fig_dict
def update_terms_figure(date, terms_df):
"""
Generate a figure frame using the date.
Args:
date: The date until to generate the frame.
terms_df: Dataframe to use.
"""
filtered_df = terms_df[terms_df['day'] <= date]
days = [day for day in filtered_df['day'].unique()]
terms = [term for term in filtered_df['term'].unique()]
traces = []
for term in terms:
counts = list(filtered_df[filtered_df['term'] == term]['count'])
data_dict = {
'x': days,
'y': counts,
'mode': 'lines',
'text': [term],
'name': term,
'line': {
'width': 3
}
}
traces.append(data_dict)
return {
'data': traces,
'layout': dict(
xaxis = {
'range': [terms_df['day'].min(), terms_df['day'].max()],
'title': 'Publication Date',
'showgrid': False
},
yaxis = {
'range': [0, 3500],
'title': 'Term Frequency',
'showgrid': False
},
hovermode = 'x',
title = 'Bi-grams in the news',
paper_bgcolor = '#39485A',
plot_bgcolor = '#39485A',
font = dict(color = 'white', family='SimplonRegular')
)
}
def ngram_dataframe_from_file(bigrams_or_path, read_from_file=False, start_date=None):
"""
Generate the ngram dataframe to use in charts from a file.
Args:
bigrams_or_path (str): Either the bigrams to use for dataframe, or file path to read bigrams from.
read_from_file (bool): Whether or not to read bigrams from file.
Returns:
Dataframe containing dates, bigrams, counts to use in the charts.
"""
if read_from_file:
bigrams = json.load(open(bigrams_or_path, 'rt', encoding='utf-8'))
else:
bigrams = bigrams_or_path
bigram_df = pd.DataFrame.from_dict(bigrams).fillna(0)
date_ind = bigram_df.swapaxes('index', 'columns', copy=True)
date_ind = date_ind[date_ind.index >= '2020-03-06']
date_ind = date_ind[date_ind.index <= '2020-04-01']
to_remove = TERMS_TO_REMOVE
top_ngrams = date_ind.sum().sort_values(ascending=False).head(100)
top_ngrams = top_ngrams.keys().tolist()
relevant_terms = set(top_ngrams) - set(to_remove)
df_for_chart = date_ind[relevant_terms]
return reformat_dataframe(df_for_chart)
|
[
"miballegh@outlook.com"
] |
miballegh@outlook.com
|
bee21a100ddcbd04daa619398ab9c09790be2d86
|
106536a7448d4414fac079cb657044f1dc92a588
|
/framework/machine.py
|
6cb012ab17185fe4a33168086a06f249a3002025
|
[] |
no_license
|
ChrisQiqiang/drlScheduler
|
0b9a10c8de4883cea2ada7565cdfb65185608dc4
|
2cd8b984bfed16687a7852baccb79742d1a35773
|
refs/heads/main
| 2023-08-03T17:55:17.654560
| 2021-09-14T15:17:56
| 2021-09-14T15:17:56
| 405,674,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,161
|
py
|
from framework.instance import Instance
class MachineConfig(object):
# def __init__(self, machine_id, cpu_capacity, memory_capacity, disk_capacity):#, cpu=None, memory=None, disk=None):
# self.id = machine_id
# self.cpu_capacity = cpu_capacity
# self.memory_capacity = memory_capacity
# self.disk_capacity = disk_capacity
# """self.cpu = cpu_capacity if cpu is None else cpu
# self.memory = memory_capacity if memory is None else memory
# self.disk = disk_capacity if disk is None else disk"""
# self.to_schedule = False
def __init__(self, machine_id, cpu_capacity):
self.id = machine_id
self.cpu_capacity = cpu_capacity
self.to_schedule = False
class Machine(object):
# def __init__(self, machine_config):
# self.id = machine_config.id
# self.cpu_capacity = machine_config.cpu_capacity
# self.memory_capacity = machine_config.memory_capacity
# self.disk_capacity = machine_config.disk_capacity
# """self.cpu = machine_config.cpu
# self.memory = machine_config.memory
# self.disk = machine_config.disk"""
# self.cluster = None
# self.instances = {}
def __init__(self, machine_config):
self.id = machine_config.id
self.cpu_capacity = machine_config.cpu_capacity
self.cluster = None
self.instances = {}
def attach(self, cluster):
self.cluster = cluster
def add_instance(self, instance_config):
# assert instance_config.cpu <= self.cpu and instance_config.memory <= self.memory and instance_config.disk <= self.disk
# print('instance_config.cpu = ', instance_config.cpu, ', self.cpu = ', self.cpu)
# assert instance_config.cpu <= self.cpu
instance = Instance(instance_config)
self.instances[instance.id] = instance
"""self.cpu -= instance.cpu
self.memory -= instance.memory
self.disk -= instance.disk"""
instance.attach(self)
# def accommodate_w(self, instance, cpu_threshold=0.75, memory_threshold=0.75, disk_threshold=0.75):
# return self.cpu - instance.cpu >= self.cpu_capacity * (1 - cpu_threshold) \
# and self.memory - instance.memory >= self.memory_capacity * (1 - memory_threshold) \
# and self.disk - instance.disk >= self.disk_capacity * (1 - disk_threshold)
def accommodate_w(self, instance, cpu_threshold=0.75):
return self.cpu - instance.cpu >= self.cpu_capacity * (1 - cpu_threshold)
# def accommodate_wo(self, instance, cpu_threshold=0.75, memory_threshold=0.75, disk_threshold=0.75):
# return self.cpu + instance.cpu >= self.cpu_capacity * (1 - cpu_threshold) \
# and self.memory + instance.memory >= self.memory_capacity * (1 - memory_threshold) \
# and self.disk + instance.disk >= self.disk_capacity * (1 - disk_threshold)
def accommodate_wo(self, instance, cpu_threshold=0.75):
return self.cpu + instance.cpu >= self.cpu_capacity * (1 - cpu_threshold)
def pop(self, instance_id):
instance = self.instances.pop(instance_id)
"""self.cpu += instance.cpu
self.memory += instance.memory
self.disk += instance.disk"""
instance.machine = None
return instance
def push(self, instance):
self.instances[instance.id] = instance
"""self.cpu -= instance.cpu
self.memory -= instance.memory
self.disk -= instance.disk"""
instance.attach(self)
@property
def cpu(self):
occupied = 0
for instance in self.instances.values():
occupied += instance.cpu
return self.cpu_capacity - occupied
# @property
# def memory(self):
# occupied = 0
# for instance in self.instances.values():
# occupied += instance.memory
# return self.memory_capacity - occupied
# @property
# def disk(self):
# occupied = 0
# for instance in self.instances.values():
# occupied += instance.disk
# return self.disk_capacity - occupied
|
[
"2290142073@qq.com"
] |
2290142073@qq.com
|
770781cf8434a6484eb3418aafba1bd504f0315d
|
1a819b4d69a7c455199b638b1609d3284ecbf255
|
/alttprbot_srl/racebot.py
|
c760ffc28d30de0301fd73fb1bf3fb04a1d6a28b
|
[] |
no_license
|
Maxor14/sahasrahbot
|
5167355a23a4e9d91171b583fe8065acd0ab99a6
|
9183933869f87743d94867cf52c463179d0b687a
|
refs/heads/master
| 2021-05-22T21:30:54.015013
| 2020-04-01T01:01:47
| 2020-04-01T01:01:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,951
|
py
|
import asyncio
import math
import re
import ircmessage
from alttprbot.database import spoiler_races, srl_races
from alttprbot.tournament import league
from alttprbot.util.srl import srl_race_id
from alttprbot_srl import alt_hunter, discord_integration
from config import Config as c
starting = re.compile(
"\\x034\\x02The race will begin in 10 seconds!\\x03\\x02")
go = re.compile("\\x034\\x02GO!\\x03\\x02")
newroom = re.compile(
"Race initiated for (.*)\. Join\\x034 (#srl-[a-z0-9]{5}) \\x03to participate\.")
runnerdone = re.compile(
"(.*) (has forfeited from the race\.|has finished in .* place with a time of [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.)")
racedone = re.compile(
"^Status: Complete \| Game: .*$"
)
srl_game_whitelist = [
'The Legend of Zelda: A Link to the Past Hacks',
'A Link to the Past & Super Metroid Combo Randomizer'
]
async def topic_change_handler(target, source, message, client):
if not (source == 'RaceBot' or source == 'synack'):
return
if target.startswith('#srl-') and racedone.search(message):
await asyncio.sleep(5)
await league.process_league_race_finish(target, client)
async def handler(target, source, message, client):
if not (source == 'RaceBot' or source == 'synack'):
return
srl_id = srl_race_id(target)
if target == '#speedrunslive':
result = newroom.search(message)
if result and result.group(1) in srl_game_whitelist:
if not c.DEBUG:
await asyncio.sleep(1)
await client.join(result.group(2))
await asyncio.sleep(60)
await client.message(result.group(2), "Hi! I'm SahasrahBot, your friendly robotic elder and ALTTPR/SMZ3 seed roller. To see what I can do, visit https://sahasrahbot.synack.live")
else:
print(f'would have joined {result.group(2)}')
if target.startswith('#srl-'):
if starting.match(message) or message == 'test starting':
race = await srl_races.get_srl_race_by_id(srl_id)
if race:
if not client.in_channel(target):
await client.join(target)
await client.message(target, f".setgoal {race['goal']}")
if race['message'] is not None:
await asyncio.sleep(15)
await client.message(target, race['message'])
await srl_races.delete_srl_race(srl_id)
if go.match(message) or message == 'test go':
# spoilers
race = await spoiler_races.get_spoiler_race_by_id(srl_id)
if race:
await client.message(target, 'Sending spoiler log...')
await client.message(target, '---------------')
await client.message(target, f"This race\'s spoiler log: {race['spoiler_url']}")
await client.message(target, '---------------')
await client.message(target, 'GLHF! :mudora:')
await countdown_timer(
ircbot=client,
duration_in_seconds=race['studytime'],
srl_channel=target,
beginmessage=True,
)
await spoiler_races.delete_spoiler_race(srl_id)
await discord_integration.discord_race_start(srl_id)
await alt_hunter.check_race(srl_id)
if message == 'test complete':
await topic_change_handler(target, source, message, client)
result = runnerdone.search(message)
if result:
await discord_integration.discord_race_finish(result.group(1), srl_id)
async def countdown_timer(ircbot, duration_in_seconds, srl_channel, beginmessage=False):
loop = asyncio.get_running_loop()
reminders = [1800, 1500, 1200, 900, 600, 300,
120, 60, 30, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
start_time = loop.time()
end_time = loop.time() + duration_in_seconds
while True:
# print(datetime.datetime.now())
timeleft = math.ceil(start_time - loop.time() + duration_in_seconds)
# print(timeleft)
if timeleft in reminders:
minutes = math.floor(timeleft/60)
seconds = math.ceil(timeleft % 60)
if minutes == 0 and seconds > 10:
msg = f'{seconds} second(s) remain!'
elif minutes == 0 and seconds <= 10:
msg = ircmessage.style(
f"{seconds} second(s) remain!", fg='green', bold=True)
else:
msg = f'{minutes} minute(s), {seconds} seconds remain!'
await ircbot.message(srl_channel, msg)
reminders.remove(timeleft)
if loop.time() >= end_time:
if beginmessage:
await ircbot.message(srl_channel, ircmessage.style('Log study has finished. Begin racing!', fg='red', bold=True))
break
await asyncio.sleep(.5)
|
[
"tcprescott@gmail.com"
] |
tcprescott@gmail.com
|
49af44e9d1dc28c1ec60101728e6a68fa331e058
|
9788bf7929da8a87d7dfab8b633601122df88bf2
|
/accounts/urls.py
|
920c688f52fbd6db80c3959580af4dc27ff733f8
|
[] |
no_license
|
praneshsaminathan/dukaan
|
d0eab83d28625857a84c6f6ab1f44619326985b3
|
f4986966892fb7b3cede083b142bccf35174e068
|
refs/heads/main
| 2023-03-02T02:38:15.003309
| 2021-02-10T17:20:43
| 2021-02-10T17:20:43
| 337,749,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from accounts.views import LoginAPIView, GenerateOTPAPIView, StoreViewSet
from dukaan.utils.apps import get_api_url
router = DefaultRouter(trailing_slash=True)
router.register(r'stores', StoreViewSet, 'api-stores')
urlpatterns = [
path(get_api_url(), include(router.urls)),
path(get_api_url(url_name='generate-otp'), GenerateOTPAPIView.as_view(), name='ap-generate-otp'),
path(get_api_url(url_name='login'), LoginAPIView.as_view(), name='api-login')
]
|
[
"pranesh"
] |
pranesh
|
4ddc52309634f93275931f026fe9acd394cf88e0
|
04d1c898b4fdd1b55785c48260f0b7efcd8d0060
|
/int.py
|
76537a32fd9ae97927370dbb376a91ce8b0d25a7
|
[] |
no_license
|
protosscom/python-ch2.2
|
27799f8971839456333aa61ba249c2c67b04efa9
|
61e70008f4261068bb7c570b2f9eaa6a6940f87b
|
refs/heads/master
| 2020-04-10T16:03:52.606662
| 2018-12-10T07:04:40
| 2018-12-10T07:04:40
| 161,131,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
# 2진, 8진, 10진, 16진 Literal
a = 23
print(type(a))
b = 0b1101
o = 0o23
h = 0x23
print(b, o, h)
# 3.x에서는 int와 long이 합쳐졌다. 표현범위가 무한대
e = 2**1024
print(type(e))
print(e)
print(e.bit_length())
# 변환 함수
print(oct(38))
print(hex(38))
print(bin(38))
|
[
"protosscom@gmail.com"
] |
protosscom@gmail.com
|
0a034e44b177bb293899d150df0f040bea24495c
|
8e35bffd191e2eec8b50370828ca954b5e249ae8
|
/flaskps/resources/api/ciclos_lectivos.py
|
ab6b587733294ca3a1e1d6c424845cb928fd9b7a
|
[] |
no_license
|
jmsolar/proySoft2019
|
6a0e42af239f13f3a7e314f5cf740c2a6b6d7a51
|
bc607c3e0c9830d5a0b48d88e299df46b5b20c6f
|
refs/heads/master
| 2023-05-30T02:44:02.410680
| 2020-01-21T17:23:06
| 2020-01-21T17:23:06
| 235,398,209
| 0
| 0
| null | 2023-05-22T22:38:36
| 2020-01-21T17:16:12
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
from flask_restful import Resource
from flask_restful import request
from flaskps.models.ciclo_lectivo import CicloLectivoModel
from flaskps.models.configuracion import Configuracion
class CicloLectivo(Resource):
def get(self):
datatables = False
page = None
if request.args.__len__() == 0:
ciclos = CicloLectivoModel.all()
else:
datatables = True
start = int(request.args['start'])
page = 1
if start != 0:
page = start / Configuracion.get_config().registros_por_pagina + 1
order = {'column': request.args['columns[' + request.args['order[0][column]'] + '][data]'],
'dir': request.args['order[0][dir]']}
page = CicloLectivoModel.all_by_page(page, order)
ciclos = page.items
ciclos_lectivos = []
for ciclo in ciclos:
semestre = "Primero" if (ciclo.semestre == 0) else "Segundo"
c = {
"id": ciclo.id,
"fecha_ini": ciclo.fecha_ini.strftime("%d/%m/%Y"),
"fecha_fin": ciclo.fecha_fin.strftime("%d/%m/%Y"),
"semestre": semestre
}
ciclos_lectivos.append(c)
if datatables:
return {
"draw": request.args['draw'],
"recordsTotal": page.total,
"recordsFiltered": page.total,
"data": ciclos_lectivos
}
else:
return ciclos_lectivos
class CicloLectivoTalleres(Resource):
def get(self, id):
ciclo = CicloLectivoModel.find_by_id(id)
talleres = []
for taller in ciclo.talleres:
t = {
"id": taller.id,
"nombre": taller.nombre
}
talleres.append(t)
return {
"talleres": talleres
}
|
[
"matias.solar@outlook.com"
] |
matias.solar@outlook.com
|
0dc52145873acef997045ced74eebb0ce1aa6d7f
|
19b0fd18df23da2999d298ee9aa426451b4e5c12
|
/src/sonic_ax_impl/mibs/vendor/__init__.py
|
5514a7346795691dbb1528f20f694081290f58e4
|
[
"Apache-2.0"
] |
permissive
|
qiluo-msft/sonic-snmpagent
|
ced0e2fd053bbed60ee5f22c1794040105ab5a4f
|
a5b2983be06fa51a711cded92cbc4f089a147233
|
refs/heads/master
| 2023-02-19T15:17:49.463707
| 2022-03-28T18:15:00
| 2022-03-28T18:15:00
| 79,850,509
| 0
| 0
|
NOASSERTION
| 2023-02-14T21:49:13
| 2017-01-23T21:33:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
import collections
import time
import psutil
from ax_interface import MIBUpdater
from sonic_ax_impl import logger
class SystemUtilizationHandler(MIBUpdater):
def __init__(self):
super().__init__()
# From the psutil documentation https://pythonhosted.org/psutil/#psutil.cpu_percent:
#
# Warning the first time this function is called
# with interval = 0.0 or None it will return a
# meaningless 0.0 value which you are supposed
# to ignore.
psutil.cpu_percent()
# '...is recommended for accuracy that this function be called with at least 0.1 seconds between calls.'
time.sleep(0.1)
# a sliding window of 60 contiguous 5 sec utilization (up to five minutes)
self.cpuutils = collections.deque([psutil.cpu_percent()], maxlen=60)
self.system_virtual_memory = psutil.virtual_memory()
logger.debug('System Utilization handler initialized.')
def get_cpuutil_5sec(self):
"""
:return: Last polled CPU utilization.
"""
return int(self.cpuutils[-1])
def get_cpuutil_1min(self):
"""
:return: Up to one minute's worth of average CPU utilization.
"""
past_utilization = list(self.cpuutils)[-12:]
return int(sum(past_utilization) / len(past_utilization))
def get_cpuutil_5min(self):
"""
:return: Up to five minute's worth of average CPU utilization.
"""
return int(sum(self.cpuutils) / len(self.cpuutils))
def get_memutil(self):
"""
:return: The current memory utilization (as a percent integer)
"""
return int(self.system_virtual_memory.percent)
def update_data(self):
"""
Background task to add CPU Utilization sample / refresh memory utilization.
"""
cpu_util = psutil.cpu_percent()
self.cpuutils.append(cpu_util)
self.system_virtual_memory = psutil.virtual_memory()
logger.debug('Updating CPU/Mem Utilization with: {}% / {}%'.format(cpu_util, self.get_memutil()))
sys_util_h = SystemUtilizationHandler()
|
[
"noreply@github.com"
] |
qiluo-msft.noreply@github.com
|
178a175dbfafdd590e2ff2248e27c5ae44eedd7d
|
1a6b18b8009f64006771b6da742742db45cedfe0
|
/Experiment 3/hyperparams.py
|
b800fe076219572bd4af833256e17f3c0ad8fcfe
|
[] |
no_license
|
HibaShah/Chinese-English-Translation-Machine-Based-on-sequence-to-sequence-network-speech-synthesis-
|
a2776987b1d20f08c965f7b6f781fae5f66ab056
|
ce370129676052e1159c6e42e8ff6cb9be79a044
|
refs/heads/main
| 2023-08-17T16:24:46.735428
| 2021-09-29T07:44:55
| 2021-09-29T07:44:55
| 411,400,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
class Hyperparams:
'''Hyper parameters'''
# pipeline
prepro = False # if True, run `python prepro.py` first before running `python train.py`.
vocab = "PE abcdefghijklmnopqrstuvwxyz'.?" # P: Padding E: End of Sentence
# data
data = "/data/private/voice/LJSpeech-1.0"
# data = "/data/private/voice/nick"
test_data = 'harvard_sentences.txt'
max_duration = 10.0
# signal processing
sr = 22050 # Sample rate.
n_fft = 2048 # fft points (samples)
frame_shift = 0.0125 # seconds
frame_length = 0.05 # seconds
hop_length = int(sr*frame_shift) # samples.
win_length = int(sr*frame_length) # samples.
n_mels = 80 # Number of Mel banks to generate
power = 1.2 # Exponent for amplifying the predicted magnitude
n_iter = 50 # Number of inversion iterations
preemphasis = .97 # or None
max_db = 100
ref_db = 20
# model
embed_size = 256 # alias = E
encoder_num_banks = 16
decoder_num_banks = 8
num_highwaynet_blocks = 4
r = 5 # Reduction factor. Paper => 2, 3, 5
dropout_rate = .5
# training scheme
lr = 0.001 # Initial learning rate.
logdir = "logdir/01"
sampledir = 'samples'
batch_size = 32
|
[
"noreply@github.com"
] |
HibaShah.noreply@github.com
|
3eef37096a1b8dfa04f2d43d8a80e433d5771e3c
|
0d4966bb125abc0def9a48309e8353b05c242c4c
|
/Test1/diseases/migrations/0001_initial.py
|
5d68d0d3942a4853924bb3d73981ea9df6115ece
|
[] |
no_license
|
ChanBong/Viral-De-cease
|
8b7b30c698883f45f26d2f9f2be7ab787399a484
|
b44c95cdbf87af76039ae32bbe3ac4502fe9045e
|
refs/heads/master
| 2023-02-12T20:49:11.879306
| 2021-01-11T16:37:20
| 2021-01-11T16:37:20
| 327,962,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
# Generated by Django 3.1.4 on 2021-01-09 09:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Diseas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('about_s', models.TextField()),
('site', models.URLField()),
('symptoms', models.TextField()),
('about_l', models.TextField()),
],
),
]
|
[
"harsh_k@ch.iitr.ac.in"
] |
harsh_k@ch.iitr.ac.in
|
07c821b253d8b2176af47cd42bb65e0f706db38a
|
3109e3a7f2f2dccc5a806695f0adbe0fed879112
|
/ecommerce/Loma/migrations/0022_auto_20190204_1200.py
|
4724c3c1c3f80c03fa75c1a13fc32a1f6bb13401
|
[] |
no_license
|
Maheshwari2604/ecommercee
|
9ebbf18b4fbf933a0d9641009f7f17ce836de587
|
4411e7e10eccda907711200d2c0d873db3d7f803
|
refs/heads/master
| 2020-04-20T18:03:49.575124
| 2019-02-12T16:02:05
| 2019-02-12T16:02:05
| 169,007,411
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-04 06:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Loma', '0021_auto_20190203_1829'),
]
operations = [
migrations.AlterField(
model_name='promocode_model',
name='promocode_name',
field=models.CharField(max_length=11),
),
]
|
[
"maheshwarishivam2604@gmail.com"
] |
maheshwarishivam2604@gmail.com
|
404cc31ac2c1214b9ebd5d4d1ef590b04436a905
|
0e130ed05664c02888ed2f7305ddacc34192519f
|
/changecsv.py
|
590aea1330fe38d20bbd249578a1c18e515dd5a0
|
[] |
no_license
|
victormm88/Click_Through_Rate_Prediction
|
86acd70784fc11d56bb113a9738ce1b549b6abd1
|
cc2df8724dc95776f3ec6974f13e61a34408ba8c
|
refs/heads/master
| 2021-01-21T07:38:57.697293
| 2015-02-13T05:16:16
| 2015-02-13T05:16:16
| 30,741,451
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' '''
__author__ = 'Wang Junq'
import csv;
pre=33563901./6865066;
pre=1-pre/(pre+1);
f_init=open('l1005.csv','rb');
f_result=open('l1005-change.csv','wb');
csv_init=csv.reader(f_init);
csv_result=csv.writer(f_result);
tittle=csv_init.next();
csv_result.writerow(tittle);
for row in csv_init:
# pre=float(row[1]);
# if pre<0.25 and pre>0.11:
# pre=0.1698;
# elif pre>0.6:
# pre=0.99;
# elif pre>0.4:
# pre=0.6;
# elif pre>0.35:
# pre=0.5;
temp_list=[row[0],pre];
csv_result.writerow(temp_list);
f_init.close();
f_result.close();
|
[
"351868656@qq.com"
] |
351868656@qq.com
|
74a40fde608fbfe9129efe89a8eff85127fc7b21
|
2d8898337f9b16a084bec9c447af9a59d4a8c69c
|
/server_less/fargate/container-app/main.py
|
0b4ea91b8e7c73b62847457e7511aa1accc70e6b
|
[] |
no_license
|
hayaosato/advent-calendar-2019
|
de22e780ea2a5131da5da5943b93a354dd2e21e9
|
eba09cf3abfbde2e05f7b0e9eb5ca47fab54cdc1
|
refs/heads/master
| 2022-12-28T23:04:32.755496
| 2019-12-20T03:20:18
| 2019-12-20T03:20:18
| 225,546,645
| 0
| 1
| null | 2022-12-08T04:03:50
| 2019-12-03T06:24:04
|
HCL
|
UTF-8
|
Python
| false
| false
| 132
|
py
|
"""
hoge
"""
import sys
def main(arg):
"""
hoge
"""
print(arg)
if __name__ == "__main__":
main(sys.argv[1])
|
[
"jake.bibikyary.880@gmail.com"
] |
jake.bibikyary.880@gmail.com
|
1c07148d7ab0dac268d97289f85bcfd5323f3892
|
4c7ccea26d2a6f7197fcdd7b8413652cea199485
|
/IPython/SdA/StackeddAutoencoder.py
|
88ef597fa78dc3337214ffa36a0bb97d7a894564
|
[] |
no_license
|
cgallego/Section3
|
77fc1c8e5f6dfa273775f165cfb54f28c05e0f52
|
1745cb018811541b6ece603f2762ef05cc263b3b
|
refs/heads/master
| 2021-01-19T06:41:31.153702
| 2016-08-08T16:45:43
| 2016-08-08T16:45:43
| 60,637,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,074
|
py
|
"""
Stacked denoising auto-encoders (SdA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import os
import sys
import timeit
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from MultilayerPerceptron import HiddenLayer
from dAutoencoder import dA
from LogisticRegression import LogisticRegression
# start-snippet-1
class SdA(object):
"""Stacked denoising auto-encoder class (SdA)
A stacked denoising autoencoder model is obtained by stacking several
dAs. The hidden layer of the dA at layer `i` becomes the input of
the dA at layer `i+1`. The first layer dA gets as input the input of
the SdA, and the hidden layer of the last dA represents the output.
Note that after pretraining, the SdA is dealt with as a normal MLP,
the dAs are only used to initialize the weights.
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
n_ins=None,
hidden_layers_sizes=None,
corruption_levels=None,
n_outs=None
):
""" This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the sdA
:type n_layers_sizes: list of ints
:param n_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:type corruption_levels: list of float
:param corruption_levels: amount of corruption to use for each
layer
"""
self.sigmoid_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
# The SdA is an MLP, for which all weights of intermediate layers
# are shared with a different denoising autoencoders
# We will first construct the SdA as a deep multilayer perceptron,
# and when constructing each sigmoidal layer we also construct a
# denoising autoencoder that shares weights with that layer
# During pretraining we will train these autoencoders (which will
# lead to chainging the weights of the MLP as well)
# During finetunining we will finish training the SdA by doing
# stochastich gradient descent on the MLP
for i in range(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question...
# but we are going to only declare that the parameters of the
# sigmoid_layers are parameters of the StackedDAA
# the visible biases in the dA are parameters of those
# dA, but not the SdA
self.params.extend(sigmoid_layer.params)
# Construct a denoising autoencoder that shared weights with this
# layer
dA_layer = dA(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
bhid=sigmoid_layer.b)
self.dA_layers.append(dA_layer)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs
)
self.params.extend(self.logLayer.params)
# construct a function that implements one step of finetunining
# compute the cost for second phase of training,
# defined as the negative log likelihood
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, np_train_y, batch_size):
''' Generates a list of functions, each of them implementing one
step in trainnig the dA corresponding to the layer with same index.
The function will require as input the minibatch index, and to train
a dA you just need to iterate, calling the corresponding function on
all minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: float
:param learning_rate: learning rate used during training for any of
the dA layers
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
corruption_level = T.scalar('corruption') # % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dAuto,kdA in zip(self.dA_layers, range(len(self.dA_layers))):
print(kdA,dAuto)
# get the cost and the updates list
cost, updates = dAuto.get_cost_updates(corruption_level,
learning_rate)
# compile the theano function
fn = theano.function(
inputs=[
index,
theano.In(corruption_level, value=0.2),
theano.In(learning_rate, value=0.1)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin: batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches //= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches //= batch_size
# compute number of minibatches for training, validation and testing
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = [
(param, param - gparam * learning_rate)
for param, gparam in zip(self.params, gparams)
]
train_fn = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='train'
)
test_score_i = theano.function(
[index],
self.errors,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='test'
)
valid_score_i = theano.function(
[index],
self.errors,
givens={
self.x: valid_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: valid_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='valid'
)
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in range(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in range(n_test_batches)]
return train_fn, valid_score, test_score
def sigmoid_activate(self, Xtest, W, b):
# code and compute
sigmoid_input = Xtest
sigmoid_output = np.tanh(np.dot(sigmoid_input, W.get_value(borrow=True)) + b.get_value(borrow=True))
return sigmoid_output
def softmax_activate(self, Xtest, logLayer):
# code and compute
softmax_input = Xtest
v = np.exp( np.dot(softmax_input, logLayer.W.get_value(borrow=True)) + logLayer.b.get_value(borrow=True))
softmax_output = v/np.sum(v)
return softmax_output
def predict_functions(self, Xtest):
''' Given a set_x of examples produce a vector y' of predictions by the sDA.
'''
tmp = Xtest
for L in self.sigmoid_layers:
tmp = self.sigmoid_activate( tmp, L.W, L.b )
# finalize with log layer
tmp = self.softmax_activate( tmp, self.logLayer )
return tmp
|
[
"admin@webdsdesign.com"
] |
admin@webdsdesign.com
|
1e92f6030603376b040c99b7ed7806971393cfca
|
a500d0a13e025a7e25376592188663f26c13385e
|
/lpthw/ex24.py
|
088c897cf7916cf73da44aba4071ebebfe6f2a79
|
[] |
no_license
|
sraywall/GitTutorial
|
c6096cfa9dc5c89ebaedee10ee93fed69118f296
|
cd0de5db58e42fb4a5094504147ba804b0424247
|
refs/heads/master
| 2021-04-27T20:36:30.290444
| 2020-05-07T19:27:06
| 2020-05-07T19:27:06
| 122,381,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
print("Let's practice everything.")
print('You\'d need to know \'bout excapes with \\ that do:')
print('\n newlines and \t tabs.')
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print("--------------")
print(poem)
print("--------------")
five = 10 - 2 + 3 - 6
print(f"This should be five: {five}")
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
#remember that this is another way to format a string
print("With a starting point of: {}".format(start_point))
# it's just like with an f"" string
print(f"We'd have {beans} beans, {jars} jars, and {crates} crates.")
start_point = start_point / 10
print("We can also do that this way:")
formula = secret_formula(start_point)
# this is an easy way to apply a list to a format string
print("We'd have {} beans, {} jars, and {} crates.".format(*formula))
|
[
"sraywall@gmail.com"
] |
sraywall@gmail.com
|
9c68ae44c857794289d718b86b9cf28781944546
|
d49f38323dc30a3cb4a581b451f7db7eec220324
|
/app.py
|
c50f59488d7cad0a63272dce103f97c62cf594dd
|
[] |
no_license
|
bbiyongel/NaverAPI-telegram
|
0e67259ed2faa86860014f0a5ff1ee0528175b67
|
bfcffdb03c6c2cb2387aee461490c520542227bf
|
refs/heads/master
| 2022-01-15T19:50:28.409431
| 2019-07-12T09:00:15
| 2019-07-12T09:00:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,740
|
py
|
from pprint import pprint
from flask import Flask, request
import requests
from decouple import config
import random
app = Flask(__name__)
token = config('TELEGRAM_TOKEN')
base_url = f"https://api.telegram.org/bot{token}"
naver_client_id = config('NAVER_CLIENT_ID')
naver_client_secret = config('NAVER_CLIENT_SECRET')
@app.route(f'/{token}', methods=['POST']) #
def telegram():
response = request.get_json()
chat_id = response.get('message').get('chat').get('id')
# 사진 파일이 온다면,
if response.get('message').get('photo'):
# 사진 파일의 id를 가져온다
file_id = response.get('message').get('photo')[-1].get('file_id')
# 텔레그램 서버에 파일의 경로를 받아온다.
file_response = requests.get(
f'{base_url}/getFile?file_id={file_id}').json()
# 파일 경로를 통해 URL을 만든다.
file_path = file_response.get('result').get('file_path')
file_url = f'https://api.telegram.org/file/bot{token}/{file_path}'
# print(file_url)
response = requests.get(file_url, stream=True)
image = response.raw.read()
# 2. URL 설정
naver_url = 'https://openapi.naver.com/v1/vision/celebrity'
# 3. 요청보내기! POST
headers = {'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
response = requests.post(naver_url, headers=headers, files={'image': image}).json()
if response.get('faces'):
best = response.get('faces')[0].get('celebrity')
if best.get('confidence') > 0.2:
text = f"{best.get('confidence')*100}%만큼 {best.get('value')}를 닮으셨네요"
else:
text = "연예인을 닮지 않음..."
else:
text = "사람 아닌듯"
# print(text)
api_url = f'{base_url}/sendMessage?chat_id={chat_id}&text={text}'
requests.get(api_url)
# text가 온다면
elif response.get('message').get('text'):
# 사용자가 보낸 메시지를 text 변수에 저장, 사용자 정보는 chat_id에 저장
text = response.get('message').get('text')
chat_id = response.get('message').get('chat').get('id')
if '/번역 ' == text[0:4]:
headers = {'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
data = {
'source': 'ko',
'target': 'en',
'text': text[4:]
}
# data = {
# 'source': 'en',
# 'target': 'ko',
# 'text': 'War never again! Never again war!'
# }
response = requests.post(naver_url, headers=headers, data=data).json()
text = response.get('message').get('result').get('translatedText')
# if 인사말이 오면, 나만의 인사해주기
elif '안녕' in text or 'hi' in text:
text = '간디'
elif '로또' in text:
text = sorted(random.sample(range(1,46), 6))
# 마지막 url 만들어서 메시지 보내기
if text=='호우':
text = '장마임'
if text=='패드립':
text = '패드립 머신 가동'
api_url = f'{base_url}/sendMessage?chat_id={chat_id}&text={text}'
requests.get(api_url)
return 'OK', 200 # 200 : 응답 상태 코드
if __name__ == '__main__':
import os
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
[
"jjgk91@naver.com"
] |
jjgk91@naver.com
|
e6a2a28a5d17ffa3424d45048710a8687df2c863
|
9256eeff108787245a1d9a8e27f80c04377ba10f
|
/src/datasets/mnist.py
|
49071693a70659a10514560cc67cff58309b79cf
|
[
"MIT"
] |
permissive
|
martinhavlicek/meta-inference-public
|
99a22daef937921deb9f677f68aa1c954e456e55
|
3cad0b84acd407f3d790f3d75d3045f62bdbf250
|
refs/heads/master
| 2022-04-12T14:15:42.514426
| 2020-03-31T21:39:50
| 2020-03-31T21:39:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,748
|
py
|
import math
import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
# ----- ROTATED MNIST -----
ROTATIONS = np.arange(-180, 180, 20)
DEFAULT_ROTATIONS = ROTATIONS[0::2]
UNSEEN_ROTATIONS = ROTATIONS[1::2]
DEFAULT_ROTATIONS_SPARSE = np.array([-160, -80, 0, 80, 160])
UNSEEN_ROTATIONS_SPARSE = np.array([-180, -140, -120, -100, -60, -40, -20, 20, 40, 60, 100, 120, 140])
DEFAULT_ROTATIONS_DISJOINT = ROTATIONS[:len(ROTATIONS) // 2 + 1]
UNSEEN_ROTATIONS_DISJOINT = ROTATIONS[len(ROTATIONS) // 2 + 1:]
ALL_ROTATIONS = ROTATIONS
DEFAULT_ROTATIONS_DICT = {
'standard': DEFAULT_ROTATIONS,
'sparse': DEFAULT_ROTATIONS_SPARSE,
'disjoint': DEFAULT_ROTATIONS_DISJOINT
}
UNSEEN_ROTATIONS_DICT = {
'standard': UNSEEN_ROTATIONS,
'sparse': UNSEEN_ROTATIONS_SPARSE,
'disjoint': UNSEEN_ROTATIONS_DISJOINT
}
def load_many_rotated_mnist(data_dir, image_size=32, train=True,
rotations=DEFAULT_ROTATIONS):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular rotation.
"""
return [
load_rotated_mnist( data_dir, image_size=image_size,
train=train, rotation=rotation)
for rotation in rotations
]
def load_rotated_mnist(data_dir, image_size=32, train=True, rotation=0):
"""
Load a MNIST dataset where each image has a rotation.
"""
rotate_image = rotate_transform(rotation)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
rotate_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def rotate_transform(angle):
def f(img):
return transforms.functional.rotate(img, angle)
return f
# ----- SCALED MNIST -----
SCALES = np.arange(0.5, 2.0, 0.1)
DEFAULT_SCALES = SCALES[0::2]
UNSEEN_SCALES = SCALES[1::2]
DEFAULT_SCALES_SPARSE = np.array([0.6, 1.0 ,1.4, 1.8])
UNSEEN_SCALES_SPARSE = np.array([0.5, 0.7, 0.8, 0.9, 1.1, 1.2, 1.3, 1.5, 1.6, 1.7, 1.9])
DEFAULT_SCALES_DISJOINT = SCALES[:len(SCALES) // 2 + 1]
UNSEEN_SCALES_DISJOINT = SCALES[len(SCALES) // 2 + 1:]
ALL_SCALES = SCALES
DEFAULT_SCALES_DICT = {
'standard': DEFAULT_SCALES,
'sparse': DEFAULT_SCALES_SPARSE,
'disjoint': DEFAULT_SCALES_DISJOINT
}
UNSEEN_SCALES_DICT = {
'standard': UNSEEN_SCALES,
'sparse': UNSEEN_SCALES_SPARSE,
'disjoint': UNSEEN_SCALES_DISJOINT
}
def load_many_scaled_mnist( data_dir, image_size=32, train=True,
scales=DEFAULT_SCALES):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular scale.
"""
return [
load_scaled_mnist( data_dir, image_size=image_size,
train=train, scale=scale)
for scale in scales
]
def load_scaled_mnist(data_dir, image_size=32, train=True, scale=1):
"""
Load a MNIST dataset where each image has is scaled by a scale.
"""
scale_image = scale_transform(scale)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
scale_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def scale_transform(scale):
def f(img):
size = img.size
i, j, h, w = get_crop_params(img, scale, ratio=1)
return transforms.functional.resized_crop(
img, i, j, h, w, size, Image.BILINEAR)
return f
def get_crop_params(img, scale, ratio=1):
w = img.size[0] * scale
h = img.size[1] * scale
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
# ----- SHEARED MNIST -----
SHEARS = np.arange(-180, 180, 20)
DEFAULT_SHEARS = SHEARS[0::2]
UNSEEN_SHEARS = SHEARS[1::2]
DEFAULT_SHEARS_SPARSE = np.array([-160, -80, 0, 80, 160])
UNSEEN_SHEARS_SPARSE = np.array([-180, -140, -120, -100, -60, -40, -20, 20, 40, 60, 100, 120, 140])
DEFAULT_SHEARS_DISJOINT = SHEARS[:len(SHEARS) // 2 + 1]
UNSEEN_SHEARS_DISJOINT = SHEARS[len(SHEARS) // 2 + 1:]
ALL_SHEARS = SHEARS
DEFAULT_SHEARS_DICT = {
'standard': DEFAULT_SHEARS,
'sparse': DEFAULT_SHEARS_SPARSE,
'disjoint': DEFAULT_SHEARS_DISJOINT
}
UNSEEN_SHEARS_DICT = {
'standard': UNSEEN_SHEARS,
'sparse': UNSEEN_SHEARS_SPARSE,
'disjoint': UNSEEN_SHEARS_DISJOINT
}
def load_many_sheared_mnist(data_dir, image_size=32, train=True,
shears=DEFAULT_SHEARS):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular shear.
"""
return [
load_sheared_mnist( data_dir, image_size=image_size,
train=train, shear=shear)
for shear in shears
]
def load_sheared_mnist(data_dir, image_size=32, train=True, shear=0):
"""
Load a MNIST dataset where each image has a rotation.
"""
shear_image = shear_transform(shear)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
shear_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def shear_transform(shear):
def f(img):
return transforms.functional.affine(img, 0, (0, 0), 1, shear)
return f
|
[
"me@mikewuis.me"
] |
me@mikewuis.me
|
8c1b2c443b10f64ad81dbb48b78341c22ec527dc
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/discount_info_v3.py
|
3eeec1c5d49a77c443407f9193187e6c6e93816a
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,663
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DiscountInfoV3:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'discount_id': 'str',
'discount_value': 'str',
'discount_type': 'int',
'orders': 'list[OrderV3]'
}
attribute_map = {
'discount_id': 'discount_id',
'discount_value': 'discount_value',
'discount_type': 'discount_type',
'orders': 'orders'
}
def __init__(self, discount_id=None, discount_value=None, discount_type=None, orders=None):
"""DiscountInfoV3 - a model defined in huaweicloud sdk"""
self._discount_id = None
self._discount_value = None
self._discount_type = None
self._orders = None
self.discriminator = None
self.discount_id = discount_id
self.discount_value = discount_value
self.discount_type = discount_type
self.orders = orders
@property
def discount_id(self):
"""Gets the discount_id of this DiscountInfoV3.
订单的可用折扣ID。 支付订单时,输入该参数的值,即可使用折扣。
:return: The discount_id of this DiscountInfoV3.
:rtype: str
"""
return self._discount_id
@discount_id.setter
def discount_id(self, discount_id):
"""Sets the discount_id of this DiscountInfoV3.
订单的可用折扣ID。 支付订单时,输入该参数的值,即可使用折扣。
:param discount_id: The discount_id of this DiscountInfoV3.
:type: str
"""
self._discount_id = discount_id
@property
def discount_value(self):
"""Gets the discount_value of this DiscountInfoV3.
折扣率或者满减值,如果折扣模式是一口价,这个值为空。
:return: The discount_value of this DiscountInfoV3.
:rtype: str
"""
return self._discount_value
@discount_value.setter
def discount_value(self, discount_value):
"""Sets the discount_value of this DiscountInfoV3.
折扣率或者满减值,如果折扣模式是一口价,这个值为空。
:param discount_value: The discount_value of this DiscountInfoV3.
:type: str
"""
self._discount_value = discount_value
@property
def discount_type(self):
"""Gets the discount_type of this DiscountInfoV3.
折扣类型,取值为 0:促销折扣1:合同折扣2:商务优惠3:合作伙伴授予折扣609:订单调价折扣
:return: The discount_type of this DiscountInfoV3.
:rtype: int
"""
return self._discount_type
@discount_type.setter
def discount_type(self, discount_type):
"""Sets the discount_type of this DiscountInfoV3.
折扣类型,取值为 0:促销折扣1:合同折扣2:商务优惠3:合作伙伴授予折扣609:订单调价折扣
:param discount_type: The discount_type of this DiscountInfoV3.
:type: int
"""
self._discount_type = discount_type
@property
def orders(self):
"""Gets the orders of this DiscountInfoV3.
可使用折扣的订单列表。 具体请参见表3。
:return: The orders of this DiscountInfoV3.
:rtype: list[OrderV3]
"""
return self._orders
@orders.setter
def orders(self, orders):
"""Sets the orders of this DiscountInfoV3.
可使用折扣的订单列表。 具体请参见表3。
:param orders: The orders of this DiscountInfoV3.
:type: list[OrderV3]
"""
self._orders = orders
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiscountInfoV3):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
7f19a3de1a2177407921827f9a30e9f957520c64
|
ace2dc6096eb0b7a540f28e57df8459adafad6ed
|
/Algorithmic Toolbox/week3_greedy_algorithms/MaxValueofLoot.py
|
d0556e46829682f74ac9c48922ded067c88a5f6e
|
[] |
no_license
|
tdslivensky/AlgorithmsAndDataStructures
|
6ad2c28204600b1f8f72228c13d29d2c3c9437c9
|
e8b1011ab5210bc52854f911e2a7e41a83b36740
|
refs/heads/master
| 2023-01-11T16:32:49.399654
| 2020-11-13T13:49:18
| 2020-11-13T13:49:18
| 289,050,279
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
def get_optimal_value(capacity, weights, values):
TotalWeight = capacity
value = 0
weightValueIndex = 0
arr = [0] * len(weights)
# write your code here
for i in range(len(weights)):
WeightPerValue = values[i]/weights[i]
arr[i] = [weights[i],values[i],WeightPerValue]
a = sorted(arr, key=lambda x:float(x[2]), reverse=True)
while(TotalWeight != 0):
if(len(weights)==1):
if(TotalWeight > a[weightValueIndex][0]):
value = a[weightValueIndex][1]
return value
else:
value += (TotalWeight * a[weightValueIndex][2])
return value
elif(TotalWeight > a[weightValueIndex][0]):
TotalWeight -= a[weightValueIndex][0]
value += a[weightValueIndex][1]
weightValueIndex += 1
else:
value += (TotalWeight * a[weightValueIndex][2])
TotalWeight = 0
return value
if __name__ == "__main__":
capacity = 10
values = [500]
weights = [30]
opt_value = get_optimal_value(capacity, weights, values)
print("{:.10f}".format(opt_value))
|
[
"tslivensky@emailatg.com"
] |
tslivensky@emailatg.com
|
4e8fb660e0be3d0885aa9b36d0333165ee44736b
|
a33ee2ee3d67526fa353060b7efe48398d38e8db
|
/demovibes/webview/views.py
|
cc242832d23da2a871237f1c2cc6ad7ce9bc131c
|
[] |
no_license
|
rj76/demovibes-cvgm
|
5666164f57a5458872f6add1eb18620aa0fd5072
|
8c0f5e011baec3c9b732165c9c74dd07c87c290f
|
refs/heads/master
| 2023-06-02T11:41:16.093070
| 2021-06-19T02:28:38
| 2021-06-19T02:28:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86,960
|
py
|
from webview import models as m
from webview import forms as f
from webview import common
from webview.decorators import atomic, cached_method
from openid_provider.models import TrustedRoot
from mybaseview import MyBaseView
from tagging.models import TaggedItem
import tagging.utils
from forum import models as fm
from django.template import Context, loader
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect, HttpResponseNotFound, HttpResponse
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth import logout
from django.shortcuts import get_object_or_404, redirect
from django.template import TemplateDoesNotExist
from django.conf import settings
from django.views.generic.simple import direct_to_template
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.core.cache import cache
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import authenticate, login
from django.db.models import Count, Sum, Avg, Max
from django.db.models import Q as DQ
import logging
import datetime
import j2shim
import hashlib
import re
import random
L = logging.getLogger('webview.views')
class WebView(MyBaseView):
basetemplate = "webview/"
class SongView(WebView):
def initialize(self):
songid = self.kwargs['song_id']
self.context['song'] = self.song = get_object_or_404(m.Song, id=songid)
class SongAddScreenshot(SongView):
def GET(self):
return create_screenshot(self.request, self.song)
class CompilationView(WebView):
def initialize(self):
compid = self.kwargs['compilation_id']
self.context['compilation'] = self.compilation = get_object_or_404(m.Compilation, id=compid)
class CompilationAddScreenshot(CompilationView):
def GET(self):
return create_screenshot(self.request, self.compilation)
class ProfileView(WebView):
def initialize(self):
username = self.kwargs['user']
self.user = get_object_or_404(m.User, username = username)
self.profile = common.get_profile(self.user)
def check_permissions(self):
return self.profile.viewable_by(self.request.user)
class ListByLetter(WebView):
"""
List a model by letter, if given.
Model need to have "startswith" and letter var need to be "letter"
"""
model = None
alphalist_cache_prefix = "ListByLetter-alphalist-"
desc_function = None
# Support for that should be included in the template
list_title = "List"
letter_url_name = ""
all_url_name = ""
def initialize (self):
query_hexdigest = hashlib.md5 (str(self.get_objects ().query)).hexdigest()
self.__alphalist_cache_key = self.alphalist_cache_prefix + query_hexdigest
alphalist = self.get_alphalist ()
letter = self.kwargs.get ("letter", False)
if letter and not letter in alphalist or letter == '-':
letter = '#'
self.letter = letter
self.context ['letter'] = letter
self.context ['al'] = alphalist
def get_list_title (self):
return self.list_title
def get_objects (self):
return self.model.objects.all()
def get_alphalist (self):
@cached_method (key = self.__alphalist_cache_key, timeout = 3)
def get ():
return map (lambda x: x['startswith'] == '#' and '-' or x['startswith'],
self.get_objects ().distinct().values ('startswith').order_by('startswith'))
return get ()
def set_context(self):
if self.model:
if self.letter:
results = self.get_objects().filter (startswith = self.letter)
else:
results = self.get_objects()
return {'object_list' : results,
'list_title' : self.get_list_title (),
'letter_url_name' : self.letter_url_name,
'all_url_name' : self.all_url_name,
'desc_function' : self.desc_function}
return {}
class AjaxifyView(WebView):
redirect_to = "dv-root"
def GET(self):
if not self.request.is_ajax():
self.redirect(self.redirect_to)
return HttpResponse("")
def make_ajax_return(self):
return HttpResponse("You forgot to define 'make_ajax_return', mate!")
def POST(self):
if not self.request.user.is_authenticated():
if self.request.is_ajax():
return HttpResponse("")
return self.redirect("/account/signin/")
songid = self.request.POST.get("songid")
if songid:
self.song = m.Song.objects.get(id = songid)
self.handle_form(self.request.POST)
if self.request.is_ajax():
return self.make_ajax_return()
self.redirect(self.request.META.get('HTTP_REFERER') or self.redirect_to)
def check_muted(request):
profile = request.user.get_profile()
muted = profile.is_muted()
if muted:
return j2shim.r2r('webview/muted.html', {'muted' : muted}, request)
#-------------------------------------------------------
class ListSmileys(WebView):
template = "smileys.html"
def set_context(self):
return {'smileys': settings.SMILEYS}
class PlaySong(SongView):
template="playsong.html"
def check_permissions(self):
return self.song.downloadable_by(self.request.user)
def set_context(self):
limit = None
if m.CHEROKEE_SECRET:
key = "urlgenlimit_%s" % self.request.user.id
number = m.get_cherokee_limit(self.request.user).get("number",0)
limit = number - cache.get(key, 0)
self.song.log(self.request.user, "Song preview / download")
return {'song': self.song, 'limit': limit}
class AddCompilation(WebView):
template = "add_compilation.html"
login_required = True
forms = [
(f.CreateCompilationForm, "compform"),
]
action = "created"
def pre_view(self):
self.context['songsinput']=""
def save_compilation(self, compdata, songs):
newcf = compdata.save(commit=False)
if not newcf.id:
newcf.created_by = self.request.user
newcf.status = "U"
newcf.last_updated = datetime.datetime.now() # Fixes bug of new compilations not appearing in Recent Updates
newcf.save()
compdata.save_m2m()
artists = []
playtime = 0
newcf.reset_songs()
for index, S in enumerate(songs):
newcf.add_song(S, index)
playtime = playtime + S.get_songlength()
for a in S.get_metadata().artists.all():
if a not in artists:
artists.append(a)
newcf.running_time = playtime
newcf.prod_artists.clear()
for a in artists:
newcf.prod_artists.add(a)
newcf.save()
newcf.log(self.request.user, "Compilation %s" % self.action)
return newcf
def POST(self):
songstr = self.request.POST.get("songsinput", "").split(",")
self.context['songsinput'] = self.request.POST.get("songsinput", "")
songs = []
if songstr:
for S in songstr:
# By default songsinput is empty but in fact we have one entry in list (u'')
# So the code will goes here ... but not valid S
if S:
songs.append(m.Song.objects.get(id=S))
if self.forms_valid and songs:
newcf = self.save_compilation(self.context["compform"], songs)
self.redirect(newcf)
class EditCompilation(AddCompilation):
staff_required = True
action = "edited"
def form_compform_init(self):
ci = self.kwargs.get("comp_id")
self.c = m.Compilation.objects.get(id=ci)
return {'instance': self.c}
def post_view(self):
if not self.context['songsinput']:
songs = self.c.get_songs()
self.context['songsinput'] = ','.join([ str(s.id) for s in songs ])
def about_pages(request, page):
try:
return direct_to_template(request, template="about/%s.html" % page)
except TemplateDoesNotExist:
return HttpResponseNotFound()
@login_required
def inbox(request):
pms = request.GET.get('type','')
delete = request.GET.get('delete','')
if delete:
try:
delpm = int(delete)
pm = m.PrivateMessage.objects.get(pk = delpm, to = request.user)
except:
return HttpResponseNotFound()
pm.visible = False
pm.save()
if pms == "sent":
mails = m.PrivateMessage.objects.filter(sender = request.user, visible = True)
else:
pms = "received" #to remove injects
mails = m.PrivateMessage.objects.filter(to = request.user, visible = True)
return j2shim.r2r('webview/inbox.html', {'mails' : mails, 'pms': pms}, request=request)
@login_required
def read_pm(request, pm_id):
pm = get_object_or_404(m.PrivateMessage, id = pm_id)
if pm.to == request.user:
pm.unread = False
pm.save()
return j2shim.r2r('webview/view_pm.html', {'pm' : pm}, request=request)
if pm.sender == request.user:
return j2shim.r2r('webview/view_pm.html', {'pm' : pm}, request=request)
return HttpResponseRedirect(reverse('dv-inbox'))
@login_required
def send_pm(request):
r = check_muted(request)
if r:
return r
if request.method == 'POST':
form = f.PmForm(request.POST)
if form.is_valid():
F = form.save(commit=False)
F.sender=request.user
F.save()
m.send_notification("%s sent you a <a href='%s'>message</a> with title '%s'" % (escape(F.sender.username), F.get_absolute_url(), escape(F.subject)), F.to)
return HttpResponseRedirect(reverse('dv-inbox'))
else:
title = request.GET.get('title', "")
to = request.GET.get('to', "")
try:
U = m.User.objects.get(username=to)
except:
U = None
form = f.PmForm(initial= {'to': U, 'subject' : title})
return j2shim.r2r('webview/pm_send.html', {'form' : form}, request)
class addComment(SongView):
"""
Add a comment to a song.
"""
login_required = True
def pre_view(self):
self.redirect(self.song)
def POST(self):
r = check_muted(self.request)
if r: return r
comment = self.request.POST.get("Comment", "").strip()
if comment:
m.SongComment.objects.create(comment = comment, song = self.song, user = self.request.user)
if getattr(settings, "NOTIFY_NEW_SONG_COMMENT", False):
m.send_notification("%s commented on the song <a href='%s'>%s</a>" % (escape(self.request.user.username), self.song.get_absolute_url(), escape(self.song.title)), None, 2)
def site_about(request):
"""
Support for a generic 'About' function
"""
return j2shim.r2r('webview/site-about.html', { }, request)
def chat(request):
"""
Support for a generic 'chat' page
"""
return j2shim.r2r('webview/chat.html', { }, request)
class ListQueue(WebView):
"""
Display the current song, the next songs in queue, and the latest 20 songs in history.
Also provides a way to view DJRandom mood.
"""
template = "queue_list.html"
def set_context(self):
# DJRandom status - - - - - - - --
djrandom_options = m.DJRandomOptions.snapshot ()
mood = djrandom_options.mood
avoid_explicit = djrandom_options.avoid_explicit
mood_form = f.DJRandomMoodForm (initial = {'mood' : mood})
mood_html = mood_form.get_mood_html (set_by = mood.comment)
ae_form = f.DJRandomAvoidExplicitForm (initial = {'avoid_explicit' : avoid_explicit})
ae_html = ae_form.get_avoid_explicit_html (set_by = avoid_explicit.comment)
return {'djrandom_mood_html' : mood_html,
'djrandom_mood_field_html' : mood_form.get_mood_field_html (),
'djrandom_avoid_explicit_html' : ae_html,
'djrandom_avoid_explicit_field_html' : ae_form.get_avoid_explicit_field_html (),
'now_playing' : "",
'history' : common.get_history(),
'queue' : common.get_queue(),
}
# Slightly modified template of list_songs, to show songs via year
def list_year(request, year_id):
songs = m.Song.active.filter (songmetadata__active = True, songmetadata__release_year = year_id).order_by('title')
params = {
'object_list' : songs,
'year' : year_id,
'letter_url_name' : "dv-year"
}
return j2shim.r2r ('webview/year_list.html', params, request)
def list_song(request, song_id):
song = get_object_or_404 (m.Song, id = song_id)
# Simple queries, it is expected that they are evaluated from inside the template only
# .. otherwise cache is quite useless. Just try to keep it simple here
comps = m.Compilation.objects.filter (songs__id = song.id)
remixes = m.Song.active.filter (songmetadata__active = True, songmetadata__remix_of_id = song.id)
def calc_tag_cloud ():
tags = m.Tag.objects.filter (id__in = song.tags).annotate (count = Count ("items"))
return tagging.utils.calculate_cloud (tags)
params = {
'object' : song,
'vote_range': [1, 2, 3, 4, 5],
'comps' : comps,
'remixes' : remixes,
'related_f': (lambda: m.Song.tagged.related_to (song, num = 5)),
'tags_f': calc_tag_cloud
}
return j2shim.r2r ('webview/song_detail.html', params, request)
# This can probbably be made a generic object
def list_screenshot(request, screenshot_id):
screenshot = get_object_or_404(m.Screenshot, id=screenshot_id)
return j2shim.r2r('webview/screenshot_detail.html', { 'object' : screenshot }, request)
class ViewUserFavs(ProfileView):
"""
List the favorites of a user
"""
template = "user_favorites.html"
def set_context(self):
favorites = m.Favorite.objects.filter(user = self.user)
return {'favorites':favorites, 'favuser': self.user}
class MyProfile(WebView):
template = "my_profile.html"
login_required = True
forms = [(f.ProfileForm, "form")]
def initialize(self):
self.profile = common.get_profile(self.request.user)
if self.profile.have_artist():
self.context['lic'] = f.LicenseForm()
self.links = LinkCheck("U", object = self.profile)
def pre_view(self):
rootid = self.request.REQUEST.get("killroot", False)
if rootid and rootid.isdigit():
root = TrustedRoot.objects.get(id=rootid)
if root.openid.user == self.request.user:
root.delete()
return self.redirect("dv-my_profile")
def handle_artistedit(self):
L = f.LicenseForm(self.request.POST)
if L.is_valid():
artist = self.request.user.artist
lic = L.cleaned_data['license']
for song in artist.get_songs():
song.log(self.request.user, "License Mass Change to %s" % lic)
song.license = lic
song.save()
self.redirect("dv-my_profile")
def POST(self):
if self.profile.have_artist() and self.request.POST.get("artistdata"):
self.handle_artistedit()
elif self.forms_valid and self.links.is_valid(self.request.POST):
self.context['form'].save()
self.links.save(self.profile)
self.redirect("dv-my_profile")
def form_form_init(self):
return {'instance': self.profile}
def set_context(self):
return {'profile': self.profile, 'links': self.links}
class ViewProfile(ProfileView):
"""
View a user's profile
"""
template = "view_profile.html"
def set_context(self):
return {'profile': self.profile}
def search(request):
"""
Return the first 40 matches of songs, artists and groups.
"""
if request.method == 'POST' and "Search" in request.POST:
searchterm = request.POST['Search']
result_limit = getattr(settings, 'SEARCH_LIMIT', 40)
if settings.USE_FULLTEXT_SEARCH == True:
users = m.User.objects.filter(username__search = searchterm)[:result_limit]
songs = m.Song.objects.select_related(depth=1).filter(title__search = searchterm)[:result_limit]
artists = m.Artist.objects.filter(handle__search = searchterm)|m.Artist.objects.filter(name__search = searchterm)[:result_limit]
groups = m.Group.objects.filter(name__search = searchterm)[:result_limit]
compilations = m.Compilation.objects.filter(name__search = searchterm)[:result_limit]
labels = m.Label.objects.filter(name__search = searchterm)[:result_limit]
else:
users = m.User.objects.filter(username__icontains = searchterm)[:result_limit]
songs = m.Song.objects.select_related(depth=1).filter(title__icontains = searchterm)[:result_limit]
artists = m.Artist.objects.filter(handle__icontains = searchterm)|m.Artist.objects.filter(name__icontains = searchterm)[:result_limit]
groups = m.Group.objects.filter(name__icontains = searchterm)[:result_limit]
compilations = m.Compilation.objects.filter(name__icontains = searchterm)[:result_limit]
labels = m.Label.objects.filter(name__icontains = searchterm)[:result_limit]
return j2shim.r2r('webview/search.html', \
{ 'songs' : songs, 'artists' : artists, 'groups' : groups, 'users' : users, 'compilations' : compilations, 'labels' : labels }, \
request=request)
return j2shim.r2r('webview/search.html', {}, request=request)
def show_approvals(request):
"""
Shows the most recently approved songs in it's own window
"""
result_limit = getattr(settings, 'UPLOADED_SONG_COUNT', 150)
songs = m.SongApprovals.objects.order_by('-id')[:result_limit]
return j2shim.r2r('webview/recent_approvals.html', { 'songs': songs , 'settings' : settings }, request=request)
class ListArtists(ListByLetter):
template = "artist_list.html"
model = m.Artist
list_title = "Complete Artist List"
letter_url_name = "dv-artists_letter"
all_url_name = "dv-artists"
class ListGroups(ListByLetter):
template = "group_list.html"
model = m.Group
class ListLabels(ListByLetter):
template = "label_list.html"
model = m.Label
class ListComilations(ListByLetter):
template = "compilation_list.html"
model = m.Compilation
list_title = "Complete Compilation / Album / Production List"
letter_url_name = "dv-compilations_letter"
all_url_name = "dv-compilations"
class ListSongs(ListByLetter):
template = "song_list.html"
model = m.Song
list_title = "List Of Songs"
letter_url_name = "dv-songs_letter"
all_url_name = "dv-songs"
class ListScreenshots(ListByLetter):
template = "screenshot_list.html"
model = m.Screenshot
list_title = "Gallery Of Images"
letter_url_name = "dv-screenshots_letter"
all_url_name = "dv-screenshots"
def get_objects(self):
return self.model.objects.filter(status="A")
class ThemeClass(WebView):
def initialize(self):
themeid = self.kwargs['theme_id']
self.context['theme'] = self.theme = get_object_or_404(m.Theme, id=themeid)
class ThemeInfo(ThemeClass):
template = "theme_details.html"
class ThemeEdit(ThemeClass):
template = "theme_edit.html"
forms = [(f.ThemeForm, "form")]
login_required = True
def form_form_init(self):
return {'instance': self.theme}
def POST(self):
if self.forms_valid and self.request.user == self.theme.creator:
self.context['form'].save()
self.redirect(self.context['theme'])
class ThemeAddImage(ThemeClass):
def GET(self):
if self.request.user == self.theme.creator:
return create_screenshot(self.request, self.theme)
self.redirect("/")
class ThemeList(WebView):
template = "themes_list.html"
def get_objects(self):
q = m.Theme.objects.filter (active=True)
q = q.annotate (user_count = Count("userprofile"))
# Add user who didn't care to select a theme
themeless = m.Userprofile.objects.filter (theme = None).count ()
if themeless:
default_theme = m.Theme.objects.all().order_by("-default")
if default_theme:
default_theme = default_theme[0]
for t in q:
if t.id == default_theme.id:
t.user_count += themeless
return q
def POST(self):
id = int(self.request.POST.get("theme_id"))
theme = m.Theme.objects.get(id=id)
if self.request.user.is_authenticated():
p = self.request.user.get_profile()
p.theme = theme
p.save()
self.redirect("dv-themelist")
def set_context(self):
return {"themes": self.get_objects() }
@login_required
def log_out(request):
"""
Show a user a form, and then logs user out if a form is sent in to that address.
"""
if request.method == 'POST':
logout(request)
return HttpResponseRedirect("/")
return j2shim.r2r('webview/logout.html', {}, request=request)
class songHistory(SongView):
"""
List queue history of song
"""
template = "song_history.html"
def set_context(self):
return {'requests': self.song.queue_set.all()}
class songVotes(SongView):
"""
List vote history of song
"""
template = "song_votes.html"
def set_context(self):
return {'votelist': self.song.songvote_set.all()}
class songComments(SongView):
"""
List the comments belonging to a song
"""
template = "song_comments.html"
def set_context(self):
return {'commentlist': self.song.songcomment_set.all()}
def view_compilation(request, comp_id):
"""
Try to view a compilation entry.
"""
permission = request.user.has_perm("webview.make_session")
comp = get_object_or_404(m.Compilation, id=comp_id) # Find it, or return a 404 error
if permission:
sessionform = f.CreateSessionForm()
else:
sessionform = False
if request.method == "POST" and permission:
sessionform = f.CreateSessionForm(request.POST)
if sessionform.is_valid():
desc = sessionform.cleaned_data['description']
playtime = sessionform.cleaned_data['time']
for song in comp.get_songs():
m.Queue.objects.create(song=song, played=False, playtime=playtime, requested_by = request.user, description = desc)
common.get_queue(True)
return redirect("dv-queue")
return j2shim.r2r('webview/compilation.html',
{ 'comp' : comp, 'user' : request.user , 'sessionform': sessionform},
request=request)
class OnelinerHistorySearch(WebView):
template = "oneliner_search.html"
forms = [(f.OnelinerHistory, "form")]
results = []
staff_required = True
def POST(self):
if self.forms_valid:
r = m.Oneliner.objects.all()
data = self.context["form"].cleaned_data
user = data["username"]
if user:
user = m.User.objects.get(username=user)
r = r.filter(user=user)
start = data["start"]
num = data["results"]
self.results = r[start:num+start]
def set_context(self):
return {"results": self.results}
def oneliner(request):
oneliner = m.Oneliner.objects.select_related(depth=1).order_by('-id')[:20]
return j2shim.r2r('webview/oneliner.html', {'oneliner' : oneliner}, \
request=request)
@login_required
def oneliner_submit(request):
"""
Add a text line to the oneliner.
Returns user to referrer position, or to /
"""
message = request.POST.get('Line').strip()
common.add_oneliner(request.user, message)
try:
refer = request.META['HTTP_REFERER']
return HttpResponseRedirect(refer)
except:
return HttpResponseRedirect("/")
@login_required
def list_favorites(request):
"""
Display a user's favorites.
"""
user = request.user
songs = m.Favorite.objects.filter(user=user)
try:
user_profile = m.Userprofile.objects.get(user = user)
use_pages = user_profile.paginate_favorites
except:
# In the event it bails, revert to pages hehe
use_pages = True
if(use_pages):
paginator = Paginator(songs, settings.PAGINATE)
page = int(request.GET.get('page', '1'))
try:
songlist = paginator.page(page)
except (EmptyPage, InvalidPage):
songlist = paginator.page(paginator.num_pages)
return j2shim.r2r('webview/favorites.html', \
{'songs': songlist.object_list, 'page' : page, 'page_range' : paginator.page_range}, \
request=request)
return j2shim.r2r('webview/favorites.html', { 'songs': songs }, request=request)
class QueueSong(AjaxifyView):
redirect_to = "dv-queue"
def handle_form(self, form):
self.r = common.queue_song(self.song, self.request.user)
def make_ajax_return(self):
if self.r:
return HttpResponse("""<span style="display:none">l</span>
<img class="song_tail" src="%slock.png" title="Locked" alt="Locked"/>""" %
settings.MEDIA_URL)
return HttpResponse("")
class ChangeFavorite(AjaxifyView):
redirect_to = "dv-favorites"
def handle_form(self, form):
P = form.get
if P("change") == "remove":
Q = m.Favorite.objects.filter(user = self.request.user, song = self.song)
for x in Q:
x.delete() # For running Favorite.delete() logic
m.send_notification("Song removed from your favorites", self.request.user)
if P("change") == "add":
try:
m.Favorite.objects.create(user = self.request.user, song = self.song)
m.send_notification("Song added to your favorites", self.request.user)
except:
pass
def make_ajax_return(self):
s = "{{ display.favorite(song, user) }}"
c = {'song': self.song, 'user': self.request.user}
return HttpResponse(j2shim.render_string(s, c))
class VoteSong(AjaxifyView):
redirect_to = "dv-root"
@atomic("vote")
def handle_form(self, form):
self.int_vote = int(form.get("vote", form.get("ajaxvote")))
if self.int_vote <= 5 and self.int_vote > 0:
self.song.set_vote(self.int_vote, self.request.user)
def make_ajax_return(self):
s = "{{ display.song_vote(song, value) }}"
c = {'song': self.song, 'value': self.int_vote}
return HttpResponse(j2shim.render_string(s, c))
class LinkCheck(object):
def __init__(self, linktype, object = None, status = 0, user = None, add=False):
self.type = linktype
self.add = add
self.verified = []
self.user = user
self.status = status
self.object = object
self.valid = False
self.get_list()
self.title = "External Resources"
def get_link_for(self, o, generic):
if not o or not generic:
return None
bla = ContentType.objects.get_for_model(o)
r = m.GenericLink.objects.filter(content_type__pk=bla.id, object_id=o.id, link=generic)
return r and r[0] or None
def get_list(self):
self.linklist = m.GenericBaseLink.objects.filter(linktype = self.type)
r = []
for x in self.linklist:
val = self.get_link_for(self.object, x)
value=val and val.value or ""
r.append({'link': x, 'value': value, "error": "", "comment": ""})
self.links = r
return self.linklist
def __unicode__(self):
return self.as_table()
def as_table(self):
"""
Print links form as table
"""
return j2shim.r2s('webview/t/linksform.html', \
{'links': self.links, 'title': self.title })
def is_valid(self, postdict):
"""
Check if given links are valid according to given regex
"""
self.valid = True
for entry in self.links:
l = entry['link'] # GenericBaseLink object
key = "LL_%s" % l.id
if postdict.has_key(key):
val = postdict[key].strip()
if val:
ckey = key+"_comment"
comment = postdict.has_key(ckey) and postdict[ckey].strip() or ""
#Fill out dict in case it needs to be returned to user
entry['value'] = val
entry['comment'] = comment
if re.match(l.regex + "$", val):
self.verified.append((l, val, comment)) #Add to approved list
else:
self.valid = False
entry['error'] = "The input did not match expected value"
else:
self.verified.append((l, "", "")) #No value for this link
return self.valid
def save(self, obj):
"""
Save links to database
"""
if self.verified and self.valid:
for l, val, comment in self.verified:
r = self.get_link_for(obj, l)
if val:
if r and not self.add:
r.value = val
r.save()
else:
m.GenericLink.objects.create(
content_object=obj,
value=val,
link=l,
status = self.status,
comment = comment,
user = self.user
)
else:
if r and not self.add:
r.delete()
obj.save() # For caching
@permission_required('webview.change_songmetadata')
def new_songinfo_list(request):
alink = request.GET.get("alink", False)
status = request.GET.get("status", False)
if alink and status.isdigit():
link = get_object_or_404(m.GenericLink, id=alink)
link.status = int(status)
link.content_object.save()
link.save()
nusonginfo = m.SongMetaData.objects.filter(checked=False).order_by('added') # Oldest info events will be shown first
nulinkinfo = m.GenericLink.objects.filter(status=1)
c = {'metainfo': nusonginfo, 'linkinfo': nulinkinfo}
return j2shim.r2r("webview/list_newsonginfo.html", c, request)
@permission_required('webview.change_songmetadata')
def list_songinfo_for_song(request, song_id):
song = get_object_or_404(m.Song, id=song_id)
metalist = m.SongMetaData.objects.filter(song=song)
c = {'metalist':metalist, 'song': song}
return j2shim.r2r("webview/list_songinfo.html", c, request)
@login_required
def add_songlinks(request, song_id):
song = get_object_or_404(m.Song, id=song_id)
links = LinkCheck("S", status=1, user = request.user, add = True)
if request.method == "POST":
if links.is_valid(request.POST):
links.save(song)
return redirect(song)
c = {'song': song, 'links': links}
return j2shim.r2r("webview/add_songlinks.html", c, request)
@permission_required('webview.change_songmetadata')
def view_songinfo(request, songinfo_id):
meta = get_object_or_404(m.SongMetaData, id=songinfo_id)
post_ok = getattr(settings, 'ADMIN_EMAIL_ON_INFO_APPROVE', False) # Do we send an email on info approve?
if request.method == "POST":
if request.POST.has_key("activate") and request.POST["activate"]:
if post_ok :
if not meta.checked and meta.user:
meta.user.get_profile().send_message(
subject="Song info approved",
message="Your metadata for song [song]%s[/song] is now active :)" % meta.song.id,
sender = request.user
)
meta.song.log(request.user, "Approved song metadata")
meta.set_active()
if request.POST.has_key("deactivate") and request.POST["deactivate"]:
if not meta.checked and meta.user:
meta.user.get_profile().send_message(
subject="Song info not approved",
message="Your metadata for song [song]%s[/song] was not approved :(" % meta.song.id,
sender = request.user
)
meta.checked = True
meta.song.log(request.user, "Rejected metadata %s" % meta.id)
meta.save()
c = {'meta': meta }
return j2shim.r2r("webview/view_songinfo.html", c, request)
#Not done
class editSonginfo(SongView):
template = "edit_songinfo.html"
forms = [f.EditSongMetadataForm, "form"]
login_required = True
def form_form_init(self):
if self.method == "POST":
meta = m.SongMetaData(song=self.song, user=self.request.user)
else:
meta = self.song.get_metadata()
meta.comment = ""
return {'instance': meta}
def POST(self):
if self.forms_valid:
self.context['form'].save()
self.redirect(self.context['song'])
@login_required
def edit_songinfo(request, song_id):
song = get_object_or_404(m.Song, id=song_id)
meta = song.get_metadata()
meta.comment = ""
form2 = False
if (request.user.get_profile().have_artist() and request.user.artist in meta.artists.all()) or (request.user.is_staff):
form2 = f.SongLicenseForm(instance=song)
if request.method == "POST":
meta = m.SongMetaData(song=song, user=request.user)
if form2 and request.POST.get("special") == "licchange":
form2 = f.SongLicenseForm(request.POST, instance=song)
if form2.is_valid():
s = form2.save()
song.log(request.user, "Changed song license to %s" % s.license)
return redirect(song)
else:
form = f.EditSongMetadataForm(request.POST, instance=meta)
if form.is_valid():
form.save()
return redirect(song)
else:
form = f.EditSongMetadataForm(instance=meta)
c = {'form': form, 'song': song, 'form2': form2}
return j2shim.r2r("webview/edit_songinfo.html", c, request)
@login_required
def upload_song(request, artist_id):
# Check to see if Uploading is currently disabled
DisableUploads = getattr(settings, 'DISABLE_UPLOADS', False)
if DisableUploads:
# Uploads are currently disabled in the system
return HttpResponseRedirect(reverse('dv-queue'))
artist = get_object_or_404(m.Artist, id=artist_id)
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_UPLOADS', 0)
artist_auto_approve = getattr(settings, 'ARTIST_AUTO_APPROVE_UPLOADS', 1)
links = LinkCheck("S", user = request.user)
# Quick test to see if the artist is currently active. If not, bounce
# To the current queue!
if artist.status != 'A':
return HttpResponseRedirect(reverse('dv-queue'))
if request.method == 'POST':
if artist_auto_approve and artist.link_to_user == request.user:
# Auto Approved Song. Set Active, Add to Recent Uploads list
status = 'A'
else:
status = 'U'
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
a = m.Song(uploader = request.user, status = status)
form = f.UploadForm(request.POST, request.FILES, instance = a)
infoform = f.SongMetadataForm(request.POST)
if links.is_valid(request.POST) and form.is_valid() and infoform.is_valid():
new_song = form.save(commit=False)
new_song.save()
songinfo = infoform.save(commit=False)
songinfo.user = request.user
songinfo.song = new_song
songinfo.checked = True
songinfo.save()
infoform.save_m2m()
form.save_m2m()
songinfo.artists.add(artist)
songinfo.set_active()
links.save(new_song)
if(new_song.status == 'A'):
# Auto Approved!
try:
# If the song entry exists, we shouldn't care
exist = m.SongApprovals.objects.get(song = new_song)
except:
# Should throw when the song isn't found in the DB
Q = m.SongApprovals(song = new_song, approved_by=request.user, uploaded_by=request.user)
Q.save()
return HttpResponseRedirect(new_song.get_absolute_url())
else:
form = f.UploadForm()
infoform = f.SongMetadataForm()
return j2shim.r2r('webview/upload.html', \
{'form' : form, 'infoform': infoform, 'artist' : artist, 'links': links }, \
request=request)
@permission_required('webview.change_song')
def activate_upload(request):
if "song" in request.GET and "status" in request.GET:
songid = int(request.GET['song'])
status = request.GET['status']
song = m.Song.objects.get(id=songid)
url = m.Site.objects.get_current()
if status == 'A':
stat = "Accepted"
song.status = "A"
song.log(request.user, "Approved song")
if status == 'R':
stat = "Rejected"
song.status = 'R'
song.log(request.user, "Rejected song")
# This used to be propriatary, it is now a template. AAK
mail_tpl = loader.get_template('webview/email/song_approval.txt')
c = Context({
'songid' : songid,
'song' : song,
'site' : m.Site.objects.get_current(),
'stat' : stat,
'url' : url,
})
song.save()
# Only add if song is approved! Modified to check to see if song exists first!
# There is probbably a better way of doing this crude check! AAK
if(status == 'A'):
try:
# If the song entry exists, we shouldn't care
exist = m.SongApprovals.objects.get(song = song)
except:
# Should throw when the song isn't found in the DB
Q = m.SongApprovals(song=song, approved_by=request.user, uploaded_by=song.uploader)
Q.save()
if getattr(settings, "NOTIFY_NEW_SONG_APPROVED", False):
m.send_notification("Song <a href='%s'>%s</a> was accepted and is now avaliable for queuing!" % (
song.get_absolute_url(),
escape(song.title),
), None, 2)
if song.uploader.get_profile().pm_accepted_upload and status == 'A' or status == 'R':
song.uploader.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Song Upload Status Changed To: %s" % stat
)
songs = m.Song.objects.filter(status = "U").order_by('added')
return j2shim.r2r('webview/uploaded_songs.html', {'songs' : songs}, request=request)
def showRecentChanges(request):
# Get some default stat values
artist_limit = getattr(settings, 'RECENT_ARTIST_VIEW_LIMIT', 20)
song_limit = getattr(settings, 'RECENT_SONG_VIEW_LIMIT', 20)
label_limit = getattr(settings, 'RECENT_LABEL_VIEW_LIMIT', 20)
group_limit = getattr(settings, 'RECENT_GROUP_VIEW_LIMIT', 20)
comp_limit = getattr(settings, 'RECENT_COMP_VIEW_LIMIT', 20)
# Make a list of stuff needed for the stats page
songlist = m.Song.objects.order_by('-songmetadata__added')[:song_limit]
artistlist = m.Artist.objects.order_by('-last_updated')[:artist_limit]
labellist = m.Label.objects.order_by('-last_updated')[:label_limit]
grouplist = m.Group.objects.order_by('-last_updated')[:group_limit]
complist = m.Compilation.objects.order_by('-last_updated')[:comp_limit]
# And now return this as a template. default page cache is 5 minutes, which is ample enough
# To show real changes, without stressing out the SQL loads
return j2shim.r2r('webview/recent_changes.html', {'songs' : songlist, 'artists' : artistlist, 'groups' : grouplist,
'labels' : labellist, 'compilations' : complist}, request=request)
class UsersOverview (WebView):
template = "users_overview.html"
def set_context (self):
limit = 50
country_stats_q = m.User.objects.values ("userprofile__country")
country_stats_q = country_stats_q.annotate (count = Count("pk"))
country_stats_q = country_stats_q.order_by ('-count', "userprofile__country")
by_votes_q = m.User.objects.values ("username", 'userprofile__country')
by_votes_q = by_votes_q.annotate (count = Count("songvote"), avg = Avg('songvote__vote'))
by_votes_q = by_votes_q.order_by ('-count')
by_votes_q = by_votes_q [:limit]
by_oneliner_q = m.User.objects.values ("username", 'userprofile__country')
by_oneliner_q = by_oneliner_q.annotate (count = Count("oneliner"))
by_oneliner_q = by_oneliner_q.order_by ('-count')
by_oneliner_q = by_oneliner_q [:limit]
by_uploads_q = m.SongApprovals.objects.values ("uploaded_by__username", 'uploaded_by__userprofile__country')
by_uploads_q = by_uploads_q.annotate (count = Count("pk"))
by_uploads_q = by_uploads_q.order_by ('-count')
by_uploads_q = by_uploads_q [:limit]
by_tagging_q = m.TagHistory.objects.values ("user__username", 'user__userprofile__country')
by_tagging_q = by_tagging_q.annotate (count = Count("pk"))
by_tagging_q = by_tagging_q.order_by ('-count')
by_tagging_q = by_tagging_q [:limit]
by_requester_q = m.Queue.objects.values ("requested_by__username", 'requested_by__userprofile__country')
by_requester_q = by_requester_q.annotate (count = Count("pk"), avg = Avg ("song__rating"))
by_requester_q = by_requester_q.order_by ('-count')
by_requester_q = by_requester_q [:limit]
by_comments_q = m.SongComment.objects.values ("user__username", 'user__userprofile__country')
by_comments_q = by_comments_q.annotate (count = Count("pk"))
by_comments_q = by_comments_q.order_by ('-count')
by_comments_q = by_comments_q [:limit]
by_posts_q = fm.Post.objects.values ("author__username", 'author__userprofile__country')
by_posts_q = by_posts_q.annotate (count = Count("pk"))
by_posts_q = by_posts_q.order_by ('-count')
by_posts_q = by_posts_q [:limit]
# We can return queries, since they are lazy. It is supposed that access is cached in html
return {'by_votes_q' : by_votes_q,
'by_oneliner_q' : by_oneliner_q,
'by_requester_q' : by_requester_q,
'by_comments_q' : by_comments_q,
'by_posts_q' : by_posts_q,
'by_tagging_q' : by_tagging_q,
'by_uploads_q' : by_uploads_q,
'country_stats_q' : country_stats_q}
class RadioOverview (WebView):
# This is supposed to be cached both on HTML level (to avoid overheads on HTML rendering)
# and on code level to avoid set_context method overheads
template = "radio_overview.html"
@cached_method (key = "RadioOverview-get_total_played_length", timeout = 60)
def get_total_played (self):
q = m.Song.active.extra (
select = {"total_played_length" : "sum(song_length * times_played)",
"total_times_played" : "sum(times_played)"})
return list (q.values ("total_played_length", "total_times_played")) [0]
@cached_method (key = "RadioOverview-stats_by_status", timeout = 60)
def list_stats_by_status (self):
return self.__list_grouped_by (m.Song.objects, 'status')
@cached_method (key = "RadioOverview-votes_by_status", timeout = 60)
def list_votes_stats (self):
return self.__list_grouped_by (m.Song.active, 'rating_votes', limit = 6)
@cached_method (key = "RadioOverview-source_stats", timeout = 60)
def list_source_stats (self):
type_by_id = {None : m.Struct (title = "----------------")}
for type in m.SongType.objects.all():
type_by_id [type.id] = type
stats = self.__list_grouped_by (m.Song.active.filter (songmetadata__active = True),
'songmetadata__type')
for stat in stats:
stat ['source'] = type_by_id [stat['songmetadata__type']].title
return stats
@cached_method (key = "RadioOverview-country_stats", timeout = 86400)
def list_country_stats (self):
return self.__list_grouped_by (
m.Song.active.filter (songmetadata__active = True),
'songmetadata__artists__home_country',
order_by = ['-count', 'songmetadata__artists__home_country'])
@cached_method (key = "RadioOverview-set_context", timeout = 60)
def set_context (self):
# Overview
stats_by_status = self.list_stats_by_status ()
total_songs = 0
total_length = 0
unlocked_songs = 0
unlocked_length = 0
status_dict = dict (m.Song.STATUS_CHOICES)
for stat in stats_by_status:
stat ['status'] = status_dict [stat ['status']]
total_songs += stat ['count']
total_length += stat ['total_playtime']
unlocked_songs += stat ['unlocked_count']
unlocked_length += stat ['unlocked_playtime']
# Result
return {'vote_stats' : self.list_votes_stats (),
"stats_by_status" : stats_by_status,
"source_stats" : self.list_source_stats (),
"country_stats" : self.list_country_stats (),
'total_length' : total_length,
'total_songs' : total_songs,
'unlocked_length' : unlocked_length,
'unlocked_songs' : unlocked_songs,
'total_played' : self.get_total_played ()}
def __list_grouped_by (self, qmanager, field, limit = None, order_by = None):
# It is hard or impossible to write that with current django without issuing two queries
# because django doesn't support expressions in annotations...
def qfiltered (f = None):
q = qmanager
if f:
q = q.filter (f)
q = q.values (field)
q = q.annotate (count = Count("pk"), total_playtime = Sum('song_length'))
if order_by:
q = q.order_by (*order_by)
else:
q = q.order_by (field)
if limit:
return q [:limit]
else:
return q.all ()
# Get total
by_field = {}
stats = qfiltered ()
for stat in stats:
by_field [stat[field]] = stat
stat ['unlocked_count'] = 0
stat ['unlocked_playtime'] = 0
# Mix-in playable stats
for pstat in qfiltered (m.Song.unlocked_condition()):
fieldv = pstat [field]
if fieldv in by_field:
stat = by_field [fieldv]
stat ['unlocked_count'] = pstat ['count']
stat ['unlocked_playtime'] = pstat ['total_playtime']
# Force evaluation, otherwise django's cache doesn't cache it at all! :E
return list (stats)
class RadioStatus(WebView):
template = "stat_songs.html"
def list_favorites(self):
return m.Song.objects.order_by('-num_favorited')
def list_voted(self):
limit = getattr(settings, "RADIO_STATUS_VOTED_MIN_VOTES", 1)
return m.Song.objects.filter(rating_votes__gt = limit - 1).order_by('-rating','-rating_votes')
def list_leastvotes (self):
return m.Song.objects.filter (m.Song.unlocked_condition ()).order_by ('rating_votes', '?')[:100]
def list_forgotten (self):
q = m.Song.active.filter (m.Song.unlocked_condition ())
q = q.annotate (last_requested = Max("queue__requested"))
q = q.order_by ('last_requested')
q = q[:100]
return q
def list_random(self):
max_id = m.Song.objects.order_by('-id')[0].id
max_songs = m.Song.objects.filter(status="A").count()
num_songs = 100
num_songs = num_songs < max_songs and num_songs or max_songs
songlist = []
r_done = []
r = random.randint(0, max_id+1)
while len(songlist) < num_songs:
r_list = []
curr_count = (num_songs - len(songlist) + 2)
for x in range(curr_count):
while r in r_done:
r = random.randint(0, max_id+1)
r_list.append(r)
r_done.extend(r_list)
songlist.extend([s for s in m.Song.objects.filter(id__in=r_list, status="A")])
return songlist
def list_mostvotes(self):
return m.Song.objects.order_by('-rating_votes')
def list_queued2(self):
return m.Song.objects.filter(m.Song.unlocked_condition()).order_by('times_played', 'locked_until')
def list_queued(self):
return m.Song.objects.filter(status="A").order_by('-times_played')
def initialize(self):
self.stats = {
'random': ("A selection of random songs from the database!",
"rating_votes",
"# Votes",
self.list_random),
'leastvotes': ("Songs with the least number of votes in the database.",
"rating_votes",
"# Votes",
self.list_leastvotes),
'forgotten': ("Songs which have not been played in a long time (or not al all).",
"times_played",
"# Plays",
self.list_forgotten),
'favorites': ("Songs which appear on more users favourite lists.",
"num_favorited",
"# Faves",
self.list_favorites),
'voted': ("The highest rated songs in the database.",
"rating",
"Rating",
self.list_voted),
'queued': ("The most played songs in the database.",
"times_played",
"# Plays",
self.list_queued),
'unplayed': ("The least played songs in the database.",
"times_played",
"# Plays",
self.list_queued2),
'mostvotes': ("Songs with the highest number of votes cast.",
"rating_votes",
"# Votes",
self.list_mostvotes),
}
self.stattype = self.kwargs.get("stattype", "")
def set_context(self):
if self.stattype in self.stats.keys():
title, stat, name, songs = self.stats[self.stattype]
return {'songs': songs()[:100],
'title': title,
'numsongs': 100,
'stat': stat,
'name': name}
self.template = "radio_status.html"
return {'keys' : self.stats}
class HelpusWithArtists (ListArtists):
list_title = "Artists with incorrect/missing information"
letter_url_name = "dv-helpus-artist_letter"
all_url_name = "dv-helpus-artist"
condition = ~DQ (home_country__in = m.country_codes2, status = 'A')
condition |= DQ (artist_pic = '', status = 'A')
def get_objects (self):
return self.model.objects.filter (self.condition)
def desc_function (self, artist):
"""Describe what is wrong with an artist."""
problems = []
if artist.status == 'A':
country_lc = artist.home_country.lower()
if country_lc == "":
problems.append (_("no country"))
elif country_lc not in m.country_codes2:
problems.append (_("unknown country (" + artist.home_country + ")"))
if artist.artist_pic == "":
problems.append (_("no picture"))
if problems:
problems = ", ".join (problems)
problems = problems[0].upper() + problems[1:]
return " - " + problems + "."
else:
# WTF? why are we here then?
return ""
class HelpusWithSongs (ListSongs):
list_title = "Songs with problems"
letter_url_name = "dv-helpus-song_letter"
all_url_name = "dv-helpus-song"
# Kaput
condition = DQ (status = 'K')
# Active but no compilation
condition |= DQ (status = 'A',
compilationsonglist = None,
songmetadata__active = True,
songmetadata__type__compilation_expected = True)
# No source (song type)
condition |= DQ (status = 'A',
songmetadata__type = None,
songmetadata__active = True)
def get_objects (self):
q = self.model.objects.filter (self.condition)
q = q.annotate (comps_count = Count("compilationsonglist__pk"))
# I hate that but until it is not django 1.4 we can't do better
q = q.extra (select = {'compilation_expected' : '`webview_songtype`.`compilation_expected`',
'songtype' : '`webview_songtype`.`id`'})
return q
def desc_function (self, song):
"""Describe what is wrong with an artist."""
problems = []
if song.status == 'K':
problems.append ("bad status")
if song.compilation_expected and song.comps_count == 0 and song.status == 'A':
problems.append ("no compilations")
if song.status == 'A' and song.songtype == None:
problems.append ("no source")
if problems:
problems = ", ".join (problems)
problems = problems[0].upper() + problems[1:]
return problems
else:
# WTF? why are we here then?
return ""
class HelpusWithComps (ListComilations):
list_title = "Compilations with problems"
letter_url_name = "dv-helpus-comp_letter"
all_url_name = "dv-helpus-comp"
def get_objects (self):
# That is the only way.. ;( Django's contenttype magic inserts content_type_id=29 into where clause
# making it impossible to filter screenshots=None, so we have to use inner join
active_and_with_image_q = self.model.objects.filter (status = 'A', screenshots__image__status = 'A')
# Active and without an image
condition = DQ (status = 'A') & ~DQ (pk__in = active_and_with_image_q)
# Active and no songs (messed up via admin interface or songs are deleted...)
condition |= DQ (status = 'A', songs = None)
q = self.model.objects.filter (condition)
q = q.annotate (screenshots_count = Count("screenshots"),
songs_count = Count ("songs"))
return q
def desc_function (self, comp):
"""Describe what is wrong with the compilation."""
problems = []
if comp.status == 'A':
if comp.screenshots_count == 0:
problems.append (_("no cover image"))
if comp.songs_count == 0:
problems.append (_("no songs"))
if problems:
problems = ", ".join (problems)
problems = problems[0].upper() + problems[1:]
return " - " + problems + "."
else:
# WTF? why are we here then?
return ""
class HelpusWithScreenshots (ListScreenshots):
list_title = "Images with problems"
letter_url_name = "dv-helpus-screenshot_letter"
all_url_name = "dv-helpus-screenshot"
# Connected to nothing
condition = DQ (status = 'A', screenshotobjectlink = None)
def get_objects (self):
q = self.model.objects.filter (self.condition)
q = q.annotate (slink_count = Count("screenshotobjectlink"))
return q
def desc_function (self, scr):
"""Describe what is wrong with the screenshot."""
problems = []
if scr.status == 'A':
if scr.slink_count == 0:
problems.append (_("connected to nothing"))
if problems:
problems = ", ".join (problems)
problems = problems[0].upper() + problems[1:]
return " - " + problems + "."
else:
# WTF? why are we here then?
return ""
class TagCloud(WebView):
template = "tag_cloud.html"
cache_key = "tag_cloud"
cache_duration = 24*60*60
def get_cache_key(self):
tag_id = cache.get("tagver", 0)
key = "tag_cloud_%s" % tag_id
return key
def set_cached_context(self):
min_count = getattr(settings, 'TAG_CLOUD_MIN_COUNT', 1)
tags = m.Song.tags.cloud(min_count=min_count)
return {'tags': tags}
class MuteOneliner(WebView):
template = "oneliner_mute.html"
forms = [
(f.MuteOnelinerForm, "banform"),
]
def check_permissions(self):
return self.request.user.has_perm("webview.add_mute_oneliner")
def POST(self):
if self.forms_valid:
data = self.context["banform"].cleaned_data
user = data["username"]
endtime = datetime.datetime.now() + datetime.timedelta(minutes=data["mute_minutes"])
entry = m.OnelinerMuted(
user=user,
muted_to=endtime,
reason=data["reason"],
added_by=self.request.user,
details=data["details"],
)
if data["ban_ip"]:
profile = user.get_profile()
if profile.last_ip:
entry.ip_ban = profile.last_ip
entry.save()
if getattr(m.settings, "BAN_ANNOUNCE", False):
m.send_notification("User '%s' have been silenced for %s minutes. Reason: %s" % (user.username,data["mute_minutes"], data["reason"]), None)
user.get_profile().log(self.request.user, "Silenced for %s minutes. Reason: %s" % (data["mute_minutes"], data["reason"]))
self.redirect("dv-muteoneliner")
def set_context(self):
active = m.OnelinerMuted.objects.filter(muted_to__gt=datetime.datetime.now())
history = m.OnelinerMuted.objects.filter(muted_to__lt=datetime.datetime.now())[:10]
return {"active": active, "history": history}
class TagDetail(WebView):
template = "tag_detail.html"
cache_duration = 24 * 60 * 60
def get_cache_key(self):
tag_id = cache.get ("tagver", 0)
key = "tagdetail_%s_%s" % (self.kwargs.get("tag", ""), tag_id)
return hashlib.md5(key).hexdigest()
def set_cached_context(self):
tag = self.kwargs.get ("tag", "")
songs = TaggedItem.objects.get_by_model (m.Song, tag)
related = m.quickly_get_related_tags (songs,
exclude_tags_str = tag,
limit_to_model = m.Song,
count = True)
related = tagging.utils.calculate_cloud (related)
return {'songs' : songs,
'related' : related,
'tag' : tag}
class TagEdit(SongView):
login_required=True
template = "tag_edit.html"
def POST(self):
t = self.request.POST.get('tags', "")
self.song.tags = re.sub(r'[^a-zA-Z0-9!_\-?& ]+', '', t)
self.song.log(self.request.user, "Edited tags")
self.song.save() # For updating the "last changed" value
m.TagHistory.objects.create(user=self.request.user, song=self.song, tags = self.request.POST['tags'])
try:
cache.incr("tagver")
except:
cache.set("tagver", 1)
return self.redirect(self.song)
def set_context(self):
tags = tagging.utils.edit_string_for_tags(self.song.tags)
changes = m.TagHistory.objects.filter(song=self.song).order_by('-id')[:5]
return {'tags': tags, 'changes': changes}
@login_required
def create_artist(request):
"""
Simple form to allow registereed users to create a new artist entry.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_ARTIST', 0)
links = LinkCheck("A")
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'U'
a = m.Artist(created_by = request.user, status = status)
form = f.CreateArtistForm(request.POST, request.FILES, instance = a)
if form.is_valid() and links.is_valid(request.POST):
new_artist = form.save(commit=False)
new_artist.save()
form.save_m2m()
links.save(new_artist)
return HttpResponseRedirect(new_artist.get_absolute_url())
else:
form = f.CreateArtistForm()
return j2shim.r2r('webview/create_artist.html', \
{'form' : form, 'links': links }, \
request=request)
@permission_required('webview.change_artist')
def activate_artists(request):
"""
Shows the most recently added artists who have a 'U' status in their upload marker
"""
if "artist" in request.GET and "status" in request.GET:
artistid = int(request.GET['artist'])
status = request.GET['status']
artist = m.Artist.objects.get(id=artistid)
url = m.Site.objects.get_current() # Pull this into a variable
if status == 'A':
stat = "Accepted"
artist.log(request.user, "Activated artist")
artist.status = "A"
if status == 'R':
stat = "Rejected"
artist.log(request.user, "Rejected artist")
artist.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/artist_approval.txt')
c = Context({
'artist' : artist,
'site' : m.Site.objects.get_current(),
'stat' : stat,
'url' : url,
})
artist.save()
# Send the email to inform the user of their request status
if artist.created_by.get_profile().email_on_artist_add and status == 'A' or status == 'R':
artist.created_by.get_profile().send_message(sender = request.user,
message = mail_tpl.render(c),
subject = u"Artist %s : %s" % (artist.handle, stat)
)
artists = m.Artist.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_artists.html', { 'artists': artists }, request=request)
@login_required
def create_group(request):
"""
Simple form to allow registereed users to create a new group entry.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_GROUP', 0)
links = LinkCheck("G")
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'U'
if request.method == 'POST':
g = m.Group(created_by = request.user, status = status)
form = f.CreateGroupForm(request.POST, request.FILES, instance = g)
if form.is_valid() and links.is_valid(request.POST):
new_group = form.save(commit=False)
new_group.save()
form.save_m2m()
links.save(new_group)
return HttpResponseRedirect(new_group.get_absolute_url())
else:
form = f.CreateGroupForm()
return j2shim.r2r('webview/create_group.html', \
{'form' : form, 'links': links }, \
request=request)
@permission_required('webview.change_group')
def activate_groups(request):
"""
Shows the most recently added groups who have a 'U' status in their upload marker
"""
if "group" in request.GET and "status" in request.GET:
groupid = int(request.GET['group'])
status = request.GET['status']
group = m.Group.objects.get(id=groupid)
if status == 'A':
stat = "Accepted"
group.status = "A"
if status == 'R':
stat = "Rejected"
group.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/group_approval.txt')
c = Context({
'group' : group,
'site' : m.Site.objects.get_current(),
'stat' : stat,
})
group.save()
# Send the email to inform the user of their request status
if group.created_by.get_profile().email_on_group_add and status == 'A' or status == 'R':
group.created_by.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Group Request Status Changed To: %s" % stat
)
groups = m.Group.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_groups.html', { 'groups': groups }, request=request)
@permission_required('webview.change_compilation')
def activate_compilations(request):
"""
Shows the most recently added compilations who have a 'U' status in their upload marker
"""
if "compilation" in request.GET and "status" in request.GET:
compilationid = int(request.GET['compilation'])
status = request.GET['status']
compilation = m.Compilation.objects.get(id=compilationid)
if status == 'A':
stat = "Accepted"
compilation.status = "A"
if status == 'R':
stat = "Rejected"
compilation.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/compilation_approval.txt')
c = Context({
'compilation' : compilation,
'site' : m.Site.objects.get_current(),
'stat' : stat,
})
compilation.save()
# Send the email to inform the user of their request status
if compilation.created_by.get_profile().email_on_group_add and status == 'A' or status == 'R':
compilation.created_by.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Compilation Request Status Changed To: %s" % stat
)
compilations = m.Compilation.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_compilations.html', { 'compilations': compilations }, request=request)
@login_required
def create_label(request):
"""
Simple form to allow registereed users to create a new label entry.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_LABEL', 0)
links = LinkCheck("L")
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'U'
if request.method == 'POST':
l = m.Label(created_by = request.user, status = status)
form = f.CreateLabelForm(request.POST, request.FILES, instance = l)
if form.is_valid() and links.is_valid(request.POST):
new_label = form.save(commit=False)
new_label.save()
form.save_m2m()
links.save(new_label)
return HttpResponseRedirect(new_label.get_absolute_url())
else:
form = f.CreateLabelForm()
return j2shim.r2r('webview/create_label.html', \
{'form' : form, 'links': links }, \
request=request)
@permission_required('webview.change_label')
def activate_labels(request):
"""
Shows the most recently added labels who have a 'U' status in their upload marker
"""
if "label" in request.GET and "status" in request.GET:
labelid = int(request.GET['label'])
status = request.GET['status']
this_label = m.Label.objects.get(id=labelid)
if status == 'A':
stat = "Accepted"
this_label.status = "A"
if status == 'R':
stat = "Rejected"
this_label.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/label_approval.txt')
c = Context({
'label' : this_label,
'site' : m.Site.objects.get_current(),
'stat' : stat,
})
this_label.save()
# Send the email to inform the user of their request status
if this_label.created_by.get_profile().email_on_group_add and status == 'A' or status == 'R':
this_label.created_by.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Label Request Status Changed To: %s" % stat
)
labels = m.Label.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_labels.html', { 'labels': labels }, request=request)
@login_required
def create_screenshot(request, obj=None):
"""
Simple form to allow registereed users to create a new screenshot entry.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_SCREENSHOT', 0)
error=""
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'U'
if request.method == 'POST':
new_screenshot = None
l = m.Screenshot(added_by = request.user, status = status)
form = f.CreateScreenshotForm(request.POST, request.FILES, instance = l)
form2 = f.GenericInfoForm(request.POST)
if form2.is_valid():
connectval = request.POST.get("connectto")
ct = form2.cleaned_data['content_type']
id = form2.cleaned_data['object_id']
# User links existing screenshot instead of creating new.
if connectval:
try:
if connectval.isdigit():
new_screenshot = m.Screenshot.objects.get(id=connectval)
else:
new_screenshot = m.Screenshot.objects.get(name=connectval)
if not new_screenshot.is_active():
error = "'{0}' is not active! Get an admin to approve it.".format(connectval)
new_screenshot = None
else:
m.ScreenshotObjectLink.objects.create(content_type=ct, object_id=id, image=new_screenshot)
new_screenshot.save()
except:
error = "Screenshot not found!"
if not connectval and form.is_valid():
new_screenshot = form.save(commit=False)
new_screenshot.save()
form.save_m2m()
m.ScreenshotObjectLink.objects.create(content_type=ct, object_id=id, image=new_screenshot)
# Generate a request for the thumbnail
new_screenshot.create_thumbnail()
new_screenshot.save()
# Leave this place :)
if new_screenshot:
return HttpResponseRedirect(new_screenshot.get_absolute_url())
else:
if obj:
ct = ContentType.objects.get_for_model(obj.__class__)
i = {'content_type': ct, 'object_id': obj.id }
else:
i = {}
form = f.CreateScreenshotForm()
form2 = f.GenericInfoForm(initial=i)
return j2shim.r2r('webview/create_screenshot.html', \
{'form' : form, 'form2': form2, "obj":obj, 'error':error }, \
request=request)
@permission_required('webview.change_screenshot')
def activate_screenshots(request):
"""
Shows the most recently added labels who have a 'U' status in their upload marker
"""
if "screenshot" in request.GET and "status" in request.GET:
screenshotid = int(request.GET['screenshot'])
status = request.GET['status']
this_screenshot = m.Screenshot.objects.get(id=screenshotid)
url = m.Site.objects.get_current()
if status == 'A':
stat = "Accepted"
this_screenshot.status = "A"
if status == 'R':
stat = "Rejected"
this_screenshot.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/screenshot_approval.txt')
c = Context({
'screenshot' : this_screenshot,
'site' : m.Site.objects.get_current(),
'stat' : stat,
'url' : url,
})
this_screenshot.save()
# Send the email to inform the user of their request status
if this_screenshot.added_by.get_profile().email_on_group_add and status == 'A' or status == 'R':
this_screenshot.added_by.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Screenshot Request Status Changed To: %s" % stat
)
screenshots = m.Screenshot.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_screenshots.html', { 'screenshots': screenshots }, request=request)
@permission_required('webview.change_screenshot')
def rebuild_thumb(request, screenshot_id):
screenshot = get_object_or_404(m.Screenshot, id=screenshot_id) #m.Screenshot.objects.get(id=screenshot_id) #get_object_or_404(m.Screenshot, id=screenshot_id)
screenshot.create_thumbnail()
screenshot.save()
return j2shim.r2r('webview/screenshot_detail.html', { 'object' : screenshot }, request)
def users_online(request):
timefrom = datetime.datetime.now() - datetime.timedelta(minutes=5)
userlist = m.Userprofile.objects.filter(last_activity__gt=timefrom).order_by('user__username')
return j2shim.r2r('webview/online_users.html', {'userlist' : userlist}, request=request)
@login_required
def set_rating_autovote(request, song_id, user_rating):
"""
Set a user's rating on a song. From 0 to 5
"""
int_vote = int(user_rating)
if int_vote <= 5 and int_vote > 0:
S = m.Song.objects.get(id = song_id)
S.set_vote(int_vote, request.user)
#add_event(event="nowplaying")
# Successful vote placed.
try:
refer = request.META['HTTP_REFERER']
return HttpResponseRedirect(refer)
except:
return HttpResponseRedirect("/")
# If the user tries any funny business, we redirect to the queue. No messing!
return HttpResponseRedirect(reverse("dv-queue"))
@login_required
def set_rating(request, song_id):
"""
Set a user's rating on a song. From 0 to 5
"""
if request.method == 'POST':
try:
R = int(request.POST['Rating'])
except:
return HttpResponseRedirect(reverse('dv-song', args=[song_id]))
if R <= 5 and R >= 1:
S = m.Song.objects.get(id = song_id)
S.set_vote(R, request.user)
return HttpResponseRedirect(S.get_absolute_url())
def link_category(request, slug):
"""
View all links associated with a specific link category slug
"""
link_cat = get_object_or_404(m.LinkCategory, id_slug = slug)
link_data_txt = m.Link.objects.filter(status="A").filter(link_type="T").filter(url_cat=link_cat) # See what linkage data we have
return j2shim.r2r('webview/links_category.html', \
{'links_txt' : link_data_txt, 'cat' : link_cat}, \
request=request)
@login_required
def link_create(request):
"""
User submitted links appear using this form for moderators to approve. Once sent, they are directed to
A generic 'Thanks' page.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_LINK', 0)
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'P'
l = m.Link(submitted_by = request.user, status = status)
form = f.CreateLinkForm(request.POST, request.FILES, instance = l)
if form.is_valid():
new_link = form.save(commit=False)
new_link.save()
form.save_m2m()
return j2shim.r2r('webview/link_added.html', request=request) # Redirect to 'Thanks!' screen!
else:
form = f.CreateLinkForm()
return j2shim.r2r('webview/create_link.html', { 'form' : form }, request=request)
@permission_required('webview.change_link')
def activate_links(request):
"""
Show all currently pending links in the system. Only the l33t may access.
"""
if "link" in request.GET and "status" in request.GET:
linkid = int(request.GET['link'])
status = request.GET['status']
this_link = m.Link.objects.get(id=linkid)
if status == 'A':
this_link.status = "A"
this_link.log(request.user, "Accepted link")
this_link.approved_by = request.user
if status == 'R':
this_link.status = "R"
this_link.log(request.user, "Rejected link")
this_link.approved_by = request.user
# Save this to the DB
this_link.save()
#links = Link.objects.filter(status = "P")
links_txt = m.Link.objects.filter(status="P").filter(link_type="T")
#links_but = Link.objects.filter(status="P").filter(link_type="U")
#links_ban = Link.objects.filter(status="P").filter(link_type="B")
return j2shim.r2r('webview/pending_links.html', { 'text_links' : links_txt }, request=request)
def site_links(request):
"""
Show all active links for this site
"""
link_cats = m.LinkCategory.objects.all() # All categories in the system
return j2shim.r2r('webview/site-links.html', { 'link_cats' : link_cats }, request=request)
def memcached_status(request):
try:
import memcache
except ImportError:
return HttpResponseRedirect("/")
if not (request.user.is_authenticated() and
request.user.is_staff):
return HttpResponseRedirect("/")
# get first memcached URI
match = re.match(
"memcached://([.\w]+:\d+)", settings.CACHE_BACKEND
)
if not match:
return HttpResponseRedirect("/")
host = memcache._Host(match.group(1))
host.connect()
host.send_cmd("stats")
class Stats:
pass
stats = Stats()
while 1:
line = host.readline().split(None, 2)
if line[0] == "END":
break
stat, key, value = line
try:
# convert to native type, if possible
value = int(value)
if key == "uptime":
value = datetime.timedelta(seconds=value)
elif key == "time":
value = datetime.datetime.fromtimestamp(value)
except ValueError:
pass
setattr(stats, key, value)
host.close_socket()
return j2shim.r2r(
'webview/memcached_status.html', dict(
stats=stats,
hit_rate=100 * stats.get_hits / stats.cmd_get,
time=datetime.datetime.now(), # server time
), request=request)
class LicenseList(WebView):
template = "licenselist.html"
def set_context(self):
licenses = m.SongLicense.objects.all()
return {'licenses': licenses}
class License(WebView):
template = "license.html"
def set_context(self):
id = self.kwargs.get("id")
license = m.SongLicense.objects.get(id=id)
return {'license': license}
class Login(MyBaseView):
template="registration/login.html"
MAX_FAILS_PER_HOUR = getattr(settings, "MAX_FAILED_LOGINS_PER_HOUR", 5)
def pre_view(self):
self.context['next'] = self.request.REQUEST.get("next", "")
self.context['username'] = self.request.REQUEST.get("username", "")
self.context['error'] = ""
def check_limit(self, keys):
for key in keys:
if cache.get(key, 0) > self.MAX_FAILS_PER_HOUR:
return True
return False
def add_to_limit(self, keys):
for key in keys:
if cache.get(key, None) == None:
cache.set(key, 1, 60*60)
else:
cache.incr(key)
def POST(self):
ip = self.request.META.get("REMOTE_ADDR")
username = self.request.POST.get('username', "")
password = self.request.POST.get('password', "")
key1 = hashlib.md5("loginfail" + username).hexdigest()
key2 = hashlib.md5("loginfail" + ip).hexdigest()
if self.check_limit((key1, key2)):
self.context['error'] = _("Too many failed logins. Please wait an hour before trying again.")
return False
next = self.request.POST.get("next", False)
if not username or not password:
self.context['error'] = _(u"You need to supply a username and password")
return
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(self.request, user)
return self.redirect(next or 'dv-root')
else:
self.context['error'] = _(u"I'm sorry, your account have been disabled.")
else:
self.add_to_limit((key1, key2))
self.context['error'] = _(u"I'm sorry, the username or password seem to be wrong.")
def play_stream(request):
streamurl = getattr(settings, "FLASH_STREAM_URL", False)
if not streamurl:
surl = m.RadioStream.objects.filter(streamtype="M").order_by('?')
if surl:
streamurl = surl[0].url
else:
streamurl = "No MP3 Streams!"
return j2shim.r2r(
'webview/radioplay.html', dict(
streamurl=streamurl,
), request=request)
def upload_progress(request):
"""
Return JSON object with information about the progress of an upload.
"""
progress_id = ''
if 'X-Progress-ID' in request.GET:
progress_id = request.GET['X-Progress-ID']
elif 'X-Progress-ID' in request.META:
progress_id = request.META['X-Progress-ID']
if progress_id:
from django.utils import simplejson
cache_key = "%s_%s" % (request.META['REMOTE_ADDR'], progress_id)
data = cache.get(cache_key)
return HttpResponse(simplejson.dumps(data))
else:
return HttpResponseServerError('Server Error: You must provide X-Progress-ID header or query param.')
|
[
"fishguy8765@gmail.com"
] |
fishguy8765@gmail.com
|
f0338b1f24a90d5fbc5b99ebe5f32f64d18dd26f
|
34f1693e4bd6b85abc289725d535656b36fb5e72
|
/.file/hash/6.py
|
64c612f22b34aebed2e2831867886eeed92feae8
|
[] |
no_license
|
mels595/termux-toolkit
|
f15aeeb8f673082e2ee6cde50f72f6d40481eb61
|
872b9220e9fe857b65502ff775073e26fedbc0b9
|
refs/heads/master
| 2022-11-17T18:35:09.892480
| 2020-07-11T18:46:29
| 2020-07-11T18:46:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
import hashlib
text = raw_input("\033[00m[\033[1;31m+\033[00m] Text\033[1;31m: \033[0;36m")
m = hashlib.new('sha384')
m.update(text)
md4 = m.hexdigest()
print "\033[00m[\033[1;32m+\033[00m] SHA384 \033[1;31m: \033[0;33m"+md4
|
[
"bangaslanz@yahoo.com"
] |
bangaslanz@yahoo.com
|
b5c1fff82ac0901d1ae985cd1826ca4b47c6f5af
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/Bisong19Building/I_PartVIII/C_Chapter47/index.py
|
cce9e2225cec24eabc5302e3a2817b1a5b9cd72f
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651
| 2019-10-26T04:40:49
| 2019-10-26T04:40:49
| 213,980,247
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,830
|
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
from .A_Overviewof.index import Overviewof as A_Overviewof
from .B_Createa.index import Createa as B_Createa
from .C_BuildContainers.index import BuildContainers as C_BuildContainers
from .D_Compilethe.index import Compilethe as D_Compilethe
from .E_Uploadand.index import Uploadand as E_Uploadand
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CHAPTER 47
#
#
#
# Deploying
# an End-to-End Machine
# Learning Solution
# on Kubeflow Pipelines
# A Kubeflow pipeline component is an implementation of a pipeline task. A component
# is a step in the workflow. Each task takes one or more artifacts as input and may produce
# one or more artifacts as output.
# Each component usually includes two parts:
#
# • Client code: The code that talks to endpoints to submit jobs, for
# example, code to connect with the Google Cloud Machine Learning
# Engine.
#
# • Runtime code: The code that does the actual job and usually runs in
# the cluster, for example, the code that prepares the model for training
# on Cloud MLE.
# A component consists of an interface (inputs/outputs), the implementation
# (a Docker container image and command-line arguments), and metadata (name,
# description).
#
#
#
#
# 687
# © Ekaba Bisong 2019
# E. Bisong, Building Machine Learning and Deep Learning Models on Google Cloud Platform,
# https://doi.org/10.1007/978-1-4842-4470-8_47
#
# Chapter 47 Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines
#
#
# Overview of a Simple End-to-End Solution Pipeline
# In this simple example, we will implement a deep neural regressor network to predict the
# closing prices of Bitcoin crypto-currency. The machine learning code itself is pretty basic
# as it is not the focus of this article. The goal here is to orchestrate a machine learning
# engineering solution using microservice architectures on Kubernetes with Kubeflow
# Pipelines. The code for this chapter is in the book code repository. Clone the repository
# from the GCP Cloud Shell.
# The pipeline consists of the following components:
#
# 1. Move raw data hosted on GitHub to a storage bucket.
#
# 2. Transform the dataset using Google Dataflow.
#
# 3. Carry out hyper-parameter training on Cloud Machine
# Learning Engine.
#
# 4. Train the model with the optimized hyper-parameters.
#
# 5. Deploy the model for serving on Cloud MLE.
#
#
#
# Create a Container Image for Each Component
# First, we’ll package the client and runtime code into a Docker image. This image
# also contains the secure service account key to authenticate against GCP. For example,
# the component to transform the dataset using Dataflow has the following files built into
# its image:
# • __ Dockerfile: Dockerfile to build the Docker image.
#
# • __ build.sh: Script to initiate the container build and upload to
# Google Container Registry.
#
# • __ dataflow_transform.py: Code to run the beam pipeline on
# Cloud Dataflow.
#
# • __ service_account.json: Secure key to authenticate container
# on GCP.
#
# • __ local_test.sh: Script to run the image pipeline component
# locally.
#
#
# 688
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
self.add(mbk("# Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Chapter47(HierNode):
def __init__(self):
super().__init__("Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines")
self.add(Content())
self.add(A_Overviewof())
self.add(B_Createa())
self.add(C_BuildContainers())
self.add(D_Compilethe())
self.add(E_Uploadand())
# eof
|
[
"lawrence.mcafee@gmail.com"
] |
lawrence.mcafee@gmail.com
|
9ca3d949f4eba7c4f5c4434c364d62be9b136a99
|
aa4024b6a846d2f6032a9b79a89d2e29b67d0e49
|
/UMLRT2Kiltera_MM/graph_MT_post__Model.py
|
3f264f3c35aea6264d6efa85f991b713f54237a9
|
[
"MIT"
] |
permissive
|
levilucio/SyVOLT
|
41311743d23fdb0b569300df464709c4954b8300
|
0f88827a653f2e9d3bb7b839a5253e74d48379dc
|
refs/heads/master
| 2023-08-11T22:14:01.998341
| 2023-07-21T13:33:36
| 2023-07-21T13:33:36
| 36,246,850
| 3
| 2
|
MIT
| 2023-07-21T13:33:39
| 2015-05-25T18:15:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,610
|
py
|
"""
__graph_MT_post__Model.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
___________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_post__Model(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 172, 82
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([189.0, 62.0, 189.0, 62.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([20.0, 20.0, 190.0, 100.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf4 = GraphicalForm(drawing, h, "gf4")
self.graphForms.append(self.gf4)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([110.0, 41.0, 110.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_post__Model_S', width = '0', justify= 'left', stipple='' )
self.gf66 = GraphicalForm(drawing, h, 'gf66', fontObject=font)
self.graphForms.append(self.gf66)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_post__Model
|
[
"levi"
] |
levi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.