blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
52f3d10940eb5f5efd8b9faf107b3cfc0cebc8c1 | 8c836f4422d28b289d2e2e05cecbb3ad283fc32b | /matrix_generator.py | 8883efc42c92e51af6c081dfe3bee9a92871e251 | [] | no_license | J3FALL/MatrixEvolution | 75588bdd00ac4f242847f55c69f5808b6e69330d | 3a91708fda3c6b070ba3bac646af8e6cfd983cae | refs/heads/master | 2023-07-21T18:14:38.612502 | 2020-04-16T00:28:45 | 2020-04-16T00:28:45 | 219,976,301 | 1 | 0 | null | 2023-07-06T21:49:52 | 2019-11-06T10:59:48 | Python | UTF-8 | Python | false | false | 2,446 | py | from functools import partial
from math import (
cos, sin,
radians)
import numpy as np
from scipy.optimize import (
minimize)
def initial_diag_matrix(matrix_size, range_value):
diag_vector = initial_diagonal_scaled(size=matrix_size, range_value=range_value)
matrix = np.diag(diag_vector)
return matrix
def rotate_matrix(source_matrix, axis, angle):
size = source_matrix.shape[0]
rot_matrix = rotation_matrix(size=size, axis=axis, angle=angle)
resulted = np.dot(rot_matrix, source_matrix)
return resulted
def rotation_matrix(size, axis, angle):
i, j = axis
angle_rad = radians(angle)
rot_matrix = np.diag(np.ones((size,)))
rot_matrix[i, i] = cos(angle_rad)
rot_matrix[j, j] = cos(angle_rad)
rot_matrix[i, j] = (-1.0) * sin(angle_rad)
rot_matrix[j, i] = sin(angle_rad)
return rot_matrix
def product_min(diag_values, range_value):
return np.linalg.norm(range_value - np.prod(diag_values))
def initial_diagonal_minimized(matrix_size, range_value, shuffle=True):
initial_values = np.random.randn(matrix_size, 1)
result = minimize(partial(product_min, range_value=range_value), initial_values,
method='SLSQP',
options={'disp': False})
diag_values = result.x
if shuffle:
np.random.shuffle(diag_values)
matrix = np.diag(diag_values)
return matrix
def initial_diagonal_scaled(size, range_value):
scaled_value = np.random.randint(int(10e6), 5 * int(10e6))
int_parts = random_integers(amount=int(size / 2))
frac_parts = []
while (len(int_parts) + len(frac_parts)) < size:
value = int_parts.pop(int_parts.index(min(int_parts)))
frac_parts.append(1.0 / value)
int_parts.append(value * value)
prod = np.prod(np.asarray(int_parts + frac_parts))
scale = prod / range_value
resulted = np.asarray(int_parts + frac_parts)
resulted = resulted / scale
np.random.shuffle(resulted)
return resulted
def prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
def random_integers(amount=10):
values_range = np.arange(1, 100)
values_range = values_range[values_range != 0]
values = list(np.random.choice(values_range, amount))
return values
| [
"crisp.coad@gmail.com"
] | crisp.coad@gmail.com |
a3717cc3e36cf3503192398c17a60fe57613198c | e651496a8acb437be45130fdaf69eb73055c254b | /classification/perceptron_pacman.py | 79e3179f3af18e9dca62ea7cff1b7d4ba1d92ae1 | [] | no_license | ntp28459hteam/INT3401_9 | 40390dd9d4ff6e872c24ac07f1949e1327b87eb1 | 32b239771d45cb5cb5a7d6a53c456a26e85945ff | refs/heads/master | 2022-03-09T05:09:02.113812 | 2019-11-19T17:48:57 | 2019-11-19T17:48:57 | 208,970,229 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,571 | py | # perceptron_pacman.py
# --------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# Perceptron implementation for apprenticeship learning
import util
from perceptron import PerceptronClassifier
from pacman import GameState
PRINT = True
class PerceptronClassifierPacman(PerceptronClassifier):
def __init__(self, legalLabels, maxIterations):
PerceptronClassifier.__init__(self, legalLabels, maxIterations)
self.weights = util.Counter()
def classify(self, data ):
"""
Data contains a list of (datum, legal moves)
Datum is a Counter representing the features of each GameState.
legalMoves is a list of legal moves for that GameState.
"""
guesses = []
for datum, legalMoves in data:
vectors = util.Counter()
for l in legalMoves:
vectors[l] = self.weights * datum[l] #changed from datum to datum[l]
guesses.append(vectors.argMax())
return guesses
def train( self, trainingData, trainingLabels, validationData, validationLabels ):
self.features = trainingData[0][0]['Stop'].keys() # could be useful later
# DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR
# THE AUTOGRADER WILL LIKELY DEDUCT POINTS.
# for i in range(5):
# print (trainingData[i])
# print ("\n")
# print (self.weights)
# print (trainingLabels)
for iteration in range(self.max_iterations):
print "Starting iteration ", iteration, "..."
for i in range(len(trainingData)):
"*** YOUR CODE HERE ***"
# util.raiseNotDefined()
f = trainingData[i]
ytrue = trainingLabels[i]
ypred = self.classify([f])[0] # ypred = self.classify(f)[0] # Wrong!
if ypred != ytrue:
self.weights+=f[0][ytrue]-f[0][ypred]
print (self.weights)
| [
"ntp28459hteam@gmail.com"
] | ntp28459hteam@gmail.com |
0418b9c1cbf29490405340e1e46c8ae785ffc414 | d11a49197ef92d23d5d77253b224a54380cbbe8d | /testapp.py | ccff800ae00094775a2a4e481a2de374c65bddc1 | [] | no_license | sockheadrps/BaxterAndFriends | 9bf8a7b8007fa34e28f8ddac0022be9efae09dda | 7f0d153d46d223a0028863d55deec43900d2d8e7 | refs/heads/master | 2022-08-04T22:35:20.267632 | 2020-05-31T02:00:23 | 2020-05-31T02:00:23 | 266,780,423 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,507 | py | import hug
import falcon
def look_for_current_request():
next_up = None
with open('songRequests.txt', 'r+') as fin:
data = fin.readlines()
if data:
next_up = data[0]
if next_up is not "\n" and next_up is not None:
user = next_up[:next_up.index(":")].replace("{", "")
song = next_up[next_up.index(":"):].replace("}", "")
song = song.replace(": ", "")
song = song.rstrip("\n")
return user, song
else:
user = "."
song = "No song in queue!"
return user, song
def look_for_next_request():
next_up = None
with open('songRequests.txt', 'r+') as fin:
data = fin.readlines()
if len(data) > 1:
next_up = data[1]
print(next_up)
if next_up is not "\n" and next_up is not None:
user = next_up[:next_up.index(":")].replace("{", "")
song = next_up[next_up.index(":"):].replace("}", "")
song = song.replace(": ", "")
song = song.rstrip("\n")
return user, song
else:
user = "to request a song"
song = "!sr SONG AND ARTIST NAME"
return user, song
@hug.post("/add_song")
def add_song(data: hug.types.json):
print(data)
pass
@hug.get("/skip_song")
def skip_song():
print('Skip song')
pass
@hug.get("/music")
@hug.local()
def music():
"""Returns current song playing and user who requested it"""
user, song = look_for_current_request()
next_user, next_song = look_for_next_request()
user = user.capitalize()
song = song.title()
next_user = next_user.capitalize()
next_song = next_song.title()
return {'user': user, 'song': song, 'next_user': next_user, 'next_song': next_song}
@hug.get("/client", output=hug.output_format.file)
def client():
return "./Client/client.html"
@hug.get("/client_styles.css", output=hug.output_format.file)
def client_styles():
return "./Client/styles.css"
@hug.get("/client_scripts.js", output=hug.output_format.file)
def client_scripts():
return "./Client/scripts.js"
@hug.get("/favicon.ico")
def favicon():
raise falcon.HTTPForbidden("fuck you")
@hug.get("/music_thing", output=hug.output_format.file)
def music_thing():
return "./nowPlaying.html"
@hug.get("/styles.css", output=hug.output_format.file)
def music_thing_css():
return "./styles.css"
@hug.get("/scripts.js", output=hug.output_format.file)
def music_thing_js():
return "./scripts.js"
#
# receive_client_slider_data()
| [
"r.p.skiles@gmail.com"
] | r.p.skiles@gmail.com |
fd57d33c643143a4cd19384011907536cfa8de5d | 4864834342f99fff07f3c8b61c39f90228988ccf | /goldi_locks.py | acc88c342cb4f2fcc6722f6c6256ae7bb472caf4 | [] | no_license | Kyle628/dailyprogrammer | 6999d37d5449942e3a1a04800bf4999c2530d06b | 7985f6ecaf88d0e6d1247d38959c17e90256e1d4 | refs/heads/master | 2020-06-23T13:40:27.510734 | 2017-05-10T17:48:57 | 2017-05-10T17:48:57 | 74,647,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import sys
input_str = "100 80\n30 50\n130 75\n90 60\n150 85\n120 70\n200 200\n110 100"
lines = input_str.split('\n')
first_line = lines.pop(0).split(" ")
weight = int(first_line[0])
temp_tolerance = int(first_line[1])
for i,line in enumerate(lines):
line_arr = line.split(" ")
weight_capacity = int(line_arr[0])
soup_temp = int(line_arr[1])
if weight_capacity > weight and soup_temp < temp_tolerance:
sys.stdout.write(str(i + 1) + " ")
print ''
| [
"kyjoconn@ucsc.edu"
] | kyjoconn@ucsc.edu |
cf231e55c68e90e0cf6334ab13286bd140164514 | 7af496ea625b6fe21d71d980b744857bc53495bc | /Curso/ex039.py | 80a38c99007f804dd490cf98ddc0d4c5b48bdd6e | [] | no_license | DiogoM14/Python-Exercicios | 52fad65f271f8846f04d9317a29322a276009aef | 7ecf7615a649f229e54032ffaf419504a4d9c12f | refs/heads/main | 2023-02-03T08:12:25.454313 | 2020-12-17T23:40:31 | 2020-12-17T23:40:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | idade = int(input('Qual é a tua idade? '))
if idade == '18':
print('Está na hora de ires à inspeção!')
elif idade < 18:
n = 18 - idade
print('Faltam {} anos para ires para a inspeção!'.format(n))
else:
n2 = idade - 18
print('Já foste à inspeção há {} anos!'.format(n2))
| [
"diogomartins200214@gmail.com"
] | diogomartins200214@gmail.com |
8c0ee08b61836fa5388ef5e53460488f7c307034 | 03afa9df5e088558fffdf1594344d609ab199720 | /model_full_no_stage2_1RIM.py | df200ad92466861e2e25ce5de6a6c2cb3cb04976 | [] | no_license | tungvd345/Deraining | 46489a376446c717914362ed36d997622df14c27 | 3dde575c620ddabca44341a4d078a34a9c67f6ea | refs/heads/master | 2023-03-18T13:45:27.630232 | 2021-02-19T06:26:11 | 2021-02-19T06:26:11 | 265,502,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,268 | py | import torch
import torch.nn as nn
from torch.nn import init
import torchvision.transforms as transforms
import torchvision.models as models
import functools
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from math import log10
class Deraining(nn.Module):
def __init__(self,args):
super(Deraining, self).__init__()
self.args = args
self.upsample = F.interpolate
self.upx2 = nn.Upsample(scale_factor=2, mode = 'bilinear', align_corners=True)
self.up_feature = up_feature(in_channels=16*16*3)
self.ats_model = SCA_UNet(in_channel=3, out_channel=3)
self.operation_layer = operation_layer(in_channels=3)
self.add_layer = add_layer()
self.mul_layer = mul_layer()
self.relu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.conv = nn.Conv2d(in_channels=9, out_channels=3, kernel_size=3, padding=1)
self.channel_att = channel_attention(in_channels=9)
self.rcan = RCAN(args)
def forward(self, x, kpts):
b, c, height, width = x.size()
# x = self.upsample1(x)
# features_add = self.up_feature(kpts)
# features_add = self.upsample(features_add, size=(height, width), mode='bilinear', align_corners=True)
#
# features_mul = self.up_feature(kpts)
# features_mul = self.upsample(features_mul, size=(height, width), mode='bilinear', align_corners=True)
# atm, trans, streak = self.ats_model(x)
# clean = (x - (1-trans) * atm) / (trans + 0.0001) - streak
clean,feature = self.ats_model(x)
# feature = self.ats_feature(x)
# add_residual = self.operation_layer(features_add)
# add_layer = x + add_residual
add_residual = self.add_layer(feature)
add_residual = self.upsample(add_residual, size=(height, width), mode='bilinear', align_corners=True)
add_layer = x + add_residual
# mul_residual = self.mul_layer(feature)
mul_residual = self.mul_layer(feature)
mul_residual = self.upsample(mul_residual, size=(height, width), mode='bilinear', align_corners=True)
mul_layer = x * (mul_residual)
concatenates = torch.cat((clean, add_layer, mul_layer), dim=1)
# concatenates = torch.cat((clean, mul_layer), dim=1)
# w0 = self.channel_att(add_layer)
# out_comb = w0 * add_layer
# out_comb = self.conv(concatenates)
w0, w1, w2 = self.channel_att(concatenates)
out_comb = w0 * clean + w1 * add_layer + w2 * mul_layer
# w1, w2 = self.channel_att(concatenates)
# out_comb = w1 * clean + w2 * mul_layer
# out_SR = self.rcan(out_comb)
out_SR = out_comb
out_combine = out_comb
return out_SR, out_combine, clean, add_layer, mul_layer, add_residual, mul_residual
# return out_SR, out_combine, add_layer, add_layer, add_layer, add_residual, add_residual
# return out_SR, out_combine, clean, clean, clean
class ATS_model(nn.Module):
def __init__(self, args, in_channels):
super(ATS_model, self).__init__()
self.conv1 = nn.Conv2d(in_channels = in_channels, out_channels = 64, kernel_size = 3, padding = 1)
# self.batch_norm = nn.BatchNorm2d(64)
self.relu1 = nn.LeakyReLU(0.2, True)
self.conv2 = nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3, padding = 1)
# self.pooling = nn.AvgPool2d(kernel_size = (3,3))
# self.fc = nn.Linear(in_features = in_channels * (args.patch_size//6) * (args.patch_size//4), out_features = 3) # (patch*3//2) //3 = patch // 2
# self.sigmoid = nn.Sigmoid()
self.predict_S = predict_S(in_channel=3, out_channel=3)
self.predict_A = predict_A(128)
self.predict_T = predict_T(in_channel=3, out_channel=3)
# self.conv = nn.Conv2d(in_channels, out_channels=128, kernel_size=3, padding=1)
def forward(self,x):
# T = self.predict_T(x)
S = self.predict_S(x)
T = self.predict_T(x)
x = self.relu1(self.conv1(x))
x = self.relu1(self.conv2(x))
# conv_T = self.conv2(self.relu1(self.batch_norm(self.conv1(x))))
# T = self.sigmoid(conv_T)
# T = self.predict_A(x)
# pooling = self.pooling(x)
# b, c, h, w = pooling.size()
# pooling = pooling.view(b,-1)
# A = self.sigmoid(self.fc(pooling))
# A = A.view(b,3,1,1)
A = self.predict_A(x)
# conv_S = self.conv2(self.relu1(self.batch_norm(self.conv1(x))))
# S = self.sigmoid(conv_S)
#clean = (img_in - (1 - T) * A) / (T + 0.0001) - S
return A, T, S
class predict_S(nn.Module):
def __init__(self, in_channel, out_channel=3):
super(predict_S, self).__init__()
self.conv1 = nn.Conv2d(in_channel, 32, kernel_size=3, padding=1)
self.dense_block1 = dense_block(in_channel=32, up_channel=32)
# self.dense_block = dense_block(in_channel=in_channel, out_channel=in_channel)
self.conv2 = nn.Conv2d(32, 64, kernel_size=1)
self.dense_block2 = dense_block(in_channel=64, up_channel=64)
self.relu = nn.ReLU()
sequence = [nn.Conv2d(64, 64 // 2, kernel_size=1),
nn.ReLU(True),
nn.Conv2d(64 // 2, out_channel, kernel_size=1),
nn.Dropout2d()
]
self.down_conv = nn.Sequential(*sequence)
self.reset_params()
def forward(self, x):
# dense_block = self.dense_block(x)
x = self.relu(self.conv1(x))
dense_block1 = self.dense_block1(x)
dense_block2 = self.relu(self.conv2(dense_block1))
dense_block2 = self.dense_block2(dense_block2)
streak = self.down_conv(dense_block2)
return streak
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
# init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
class predict_T(nn.Module):
def __init__(self, in_channel, out_channel=3):
super(predict_T, self).__init__()
self.trans_unet = TransUNet(in_channel, out_channel)
self.reset_params()
def forward(self, x):
trans = self.trans_unet(x)
return trans
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
# init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
class predict_A(nn.Module):
def __init__(self, in_channel):
super(predict_A, self).__init__()
self.conv1 = nn.Conv2d(in_channel, in_channel//4, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(in_channel//4, in_channel//4, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(in_channel//4, in_channel//16, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channel//16, in_channel//16, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(in_channel//16, in_channel//64, kernel_size=3, padding=1)
self.relu = nn.ReLU()
self.pooling1 = nn.AdaptiveAvgPool2d((128, 128))
self.pooling2 = nn.AdaptiveAvgPool2d((64, 64))
self.pooling3 = nn.AdaptiveAvgPool2d((32, 32))
self.pooling4 = nn.AdaptiveAvgPool2d((16,16))
self.pooling5 = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(in_channel//64, 1)
self.reset_params()
def forward(self, x):
b, c, h, w = x.size()
atm1 = self.pooling1(self.relu(self.conv1(x)))
atm2 = self.pooling2(self.relu(self.conv2(atm1)))
atm3 = self.pooling3(self.relu(self.conv3(atm2)))
atm4 = self.pooling4(self.relu(self.conv4(atm3)))
atm5 = self.pooling5(self.relu(self.conv5(atm4)))
atm5 = atm5.view(b, -1)
atm = self.fc(atm5)
atm = atm.view(b, 1, 1, 1)
return atm
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
# init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
##################################################################################
# dense_block use pretrained dense-net
##################################################################################
# class dense_block(nn.Module):
# def __init__(self, in_channel, out_channel):
# super(dense_block, self).__init__()
# model_dense_net = models.densenet121(pretrained=True)
# model_dense_net = list(model_dense_net.children())[:]
# self.dense_block = model_dense_net[0].denseblock1
# self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=64, kernel_size=7, padding=3)
# self.relu = nn.ReLU(True)
# # sequence = []
# sequence = [nn.Conv2d(256, 224, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(224, 192, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(192, 160, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(160, 128, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(128, 96, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(96, 64, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(64, 3, kernel_size = 1),
# nn.Dropout2d()]
# self.down_conv = nn.Sequential(*sequence)
#
# def forward(self, x):
# x = self.relu(self.conv1(x))
# dense_block = self.relu(self.dense_block(x))
# out = self.down_conv(dense_block)
#
# return out
##################################################################################
##################################################################################
# dense_block don't use pretrained
##################################################################################
class dense_block(nn.Module):
def __init__(self, in_channel, up_channel=32, num_dense_layer=4):
super(dense_block, self).__init__()
in_chan = in_channel
sequence_1 = []
for i in range(num_dense_layer):
sequence_1.append(dense_layer(in_chan, up_channel))
in_chan += up_channel
self.dense_block = nn.Sequential(*sequence_1)
sequence_2 = [nn.Conv2d(in_chan, in_chan//2, kernel_size=1),
nn.ReLU(True),
nn.Conv2d(in_chan//2, in_channel, kernel_size = 1),
nn.Dropout2d()
]
self.down_conv = nn.Sequential(*sequence_2)
def forward(self, x):
dense_block = self.dense_block(x)
out = self.down_conv(dense_block)
out = out + x
return out
class dense_layer(nn.Module):
def __init__(self, in_channel, up_channel):
super(dense_layer, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channel, out_channels=up_channel, kernel_size=3, padding=1)
self.relu = nn.ReLU(True)
def forward(self, x):
out =self.relu(self.conv(x))
out = torch.cat((x, out), 1)
return out
##################################################################################
##################################################################################
# Defines the Unet-transmission
##################################################################################
class TransUNet(nn.Module):
def __init__(self, in_channel, n_classes):
super(TransUNet, self).__init__()
self.conv1x1 = nn.Conv2d(in_channel, in_channel, kernel_size=1, stride=1, padding=0)
# self.inc = inconv(in_channel, 64)
self.inconv = nn.Sequential(
nn.Conv2d(in_channel, 32, 3, padding=1),
# nn.InstanceNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, 3, padding=1),
# nn.InstanceNorm2d(64),
nn.ReLU(inplace=True)
)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 32)
self.outconv = nn.Conv2d(32, n_classes, kernel_size=1)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x):
x = self.conv1x1(x)
x1 = self.inconv(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
# decoder
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.tanh(self.outconv(x))
return x
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
# nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
# nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
# nn.MaxPool2d(2),
# double_conv(in_ch, out_ch)
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(True),
nn.Conv2d(out_ch, out_ch, kernel_size=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(True)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=False):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x2.size()[2] - x1.size()[2]
diffY = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (0, diffY, 0, diffX))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
##################################################################################
# Defines the SCA-clean - base on UNet
##################################################################################
class SCA_UNet(nn.Module):
def __init__(self, in_channel, out_channel):
super(SCA_UNet, self).__init__()
self.conv1x1 = nn.Conv2d(in_channel, in_channel, kernel_size=1, stride=1, padding=0)
# self.inc = inconv(in_channel, 64)
self.inconv = nn.Sequential(
nn.Conv2d(in_channel, 32, 3, padding=1),
# nn.InstanceNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, 3, padding=1),
# nn.InstanceNorm2d(64),
nn.ReLU(inplace=True)
)
self.down1 = down_SCA(64, 128)
self.down2 = down_SCA(128, 256)
self.down3 = down_SCA(256, 512)
self.down4 = down_SCA(512, 512)
self.up1 = up_SCA(1024, 256)
self.up2 = up_SCA(512, 128)
self.up3 = up_SCA(256, 64)
self.up4 = up_SCA(128, 32)
self.outconv = nn.Conv2d(32, out_channel, kernel_size=1)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x):
x = self.conv1x1(x)
x1 = self.inconv(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
# decoder
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = (self.outconv(x))
in_feature = x2
return x, in_feature
class SCA_feature(nn.Module):
def __init__(self, in_channel, out_channel):
super(SCA_feature, self).__init__()
self.conv1x1 = nn.Conv2d(in_channel, in_channel, kernel_size=1, stride=1, padding=0)
# self.inc = inconv(in_channel, 64)
self.inconv = nn.Sequential(
nn.Conv2d(in_channel, 32, 3, padding=1),
# nn.InstanceNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, 3, padding=1),
# nn.InstanceNorm2d(64),
nn.ReLU(inplace=True)
)
self.down1 = down_SCA(64, 128)
self.down2 = down_SCA(128, 256)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x):
x = self.conv1x1(x)
x1 = self.inconv(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
# x4 = self.down3(x3)
# x5 = self.down4(x4)
feature = x3
return feature
class down_SCA(nn.Module):
def __init__(self, in_chan, out_chan, reduce=16):
super(down_SCA, self).__init__()
self.conv1 = nn.Conv2d(in_chan, out_chan, kernel_size=3, stride=2, padding=1)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.sigmoid = nn.Sigmoid()
self.ca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(out_chan, out_chan//reduce, kernel_size=1, padding=0),
nn.ReLU(),
nn.Conv2d(out_chan//reduce, out_chan, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
x = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(x))
conv3_1 = self.conv3(conv2)
conv3_2 = self.sigmoid(self.conv3(conv2))
spatial = conv3_1 * conv3_2
channel = self.ca(spatial)
sca = channel * conv2
out_layer = x + sca
return out_layer
class up_SCA(nn.Module):
def __init__(self, in_chan, out_chan, reduce=16, bilinear=True):
super(up_SCA, self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_chan//2, in_chan//2, 2, stride=2)
# self.conv = double_conv(in_ch, out_ch)
self.conv1 = nn.Conv2d(in_chan, out_chan, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.sigmoid = nn.Sigmoid()
self.ca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(out_chan, out_chan // reduce, kernel_size=1, padding=0),
nn.ReLU(),
nn.Conv2d(out_chan // reduce, out_chan, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x2.size()[2] - x1.size()[2]
diffY = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (0, diffY, 0, diffX))
x = torch.cat([x2, x1], dim=1)
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(conv1))
conv3_1 = self.conv3(conv2)
conv3_2 = self.sigmoid(self.conv3(conv2))
spatial = conv3_1 * conv3_2
channel = self.ca(spatial)
sca = channel * conv2
out_layer = conv1 + sca
# x = self.conv(x)
return out_layer
# class outconv(nn.Module):
# def __init__(self, in_ch, out_ch):
# super(outconv, self).__init__()
# self.conv = nn.Conv2d(in_ch, out_ch, 1)
#
# def forward(self, x):
# x = self.conv(x)
# return x
##################################################################################
# class feature_extractor(nn.Module):
# def __init__(self, out_channels = 128):
# super(feature_extractor, self).__init__()
# resnet18 = models.resnet18(pretrained = True)
# num_ftrs = resnet18.fc.in_features
# layer = list(resnet18.children())[:-2]
# layer.append(nn.Conv2d(num_ftrs, out_channels, 1))
# self.feature_extractor = nn.Sequential(*layer)
# #print('feature extraction: \n',self.feature_extractor)
#
# def forward(self,x):
# feature = self.feature_extractor(x)
# return feature
##################################################################################Oct09-new add,mul layer
class operator_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(operator_block, self).__init__()
# self.conv0 = nn.Conv2d(in_channels, in_channels, kernel_size=1)
self.conv1 = nn.Conv2d(out_channels, out_channels, kernel_size=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=7, padding=3)
self.conv3 = nn.Conv2d(out_channels, out_channels, kernel_size=5, padding=2)
self.conv4 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
self.relu = nn.LeakyReLU(0.2, True)
def forward(self, x):
# conv0 = self.relu(self.conv0(x))
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(conv1))
conv2 = self.relu(self.conv2(conv2))
conv3 = self.relu(self.conv3(conv1))
conv3 = self.relu(self.conv3(conv3))
conv4 = self.relu(self.conv4(conv1))
conv4 = self.relu(self.conv4(conv4))
out = torch.cat((conv2, conv3, conv4), dim=1)
return out
class add_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(add_block, self).__init__()
self.conv0 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
self.oper_blk = operator_block(in_channels=out_channels, out_channels=out_channels)
self.conv = nn.Conv2d(in_channels=out_channels*3, out_channels=out_channels, kernel_size=1)
self.relu = nn.LeakyReLU(0.2, True)
def forward(self, x):
conv0 = self.conv0(x)
operator = self.oper_blk(conv0)
conv = self.conv(operator)
out = conv + conv0
return out
class add_layer(nn.Module):
def __init__(self, pretrained=256, num_chan=64):
super(add_layer, self).__init__()
# self.conv1 = nn.Conv2d(3, num_chan, kernel_size=3, padding=1)
# self.add_blk1 = add_block(in_channels=pretrained, out_channels=128)
self.add_blk2 = add_block(in_channels=128, out_channels=num_chan)
# self.add_blk3 = add_block(in_channels=num_chan, out_channels=num_chan)
self.conv2 = nn.Conv2d(num_chan, 32, kernel_size=1)
self.conv3 = nn.Conv2d(32, 3, kernel_size=1)
self.relu = nn.LeakyReLU(0.2, True)
def forward(self, x):
# operator = self.conv1(x)
# add1 = self.add_blk1(x)
add2 = self.add_blk2(x)
# add3 = self.add_blk3(add2)
# add3 = operator + add1
conv = self.relu(self.conv2(add2))
out = (self.conv3(conv))
return out
class mul_block(nn.Module):
def __init__(self, in_channels, out_channels, reduce=16):
super(mul_block, self).__init__()
self.conv0 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
self.oper_blk = operator_block(in_channels=out_channels, out_channels=out_channels)
self.relu = nn.LeakyReLU(0.2, True)
self.pooling = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(3*out_channels, out_channels//reduce, kernel_size=1)
self.conv2 = nn.Conv2d(out_channels//reduce, out_channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
conv0 = self.conv0(x)
operator = self.oper_blk(conv0)
pooling = self.pooling(operator)
conv_1 = self.relu(self.conv1(pooling))
conv_2 = self.sigmoid(self.conv2(conv_1))
out = conv_2 * conv0
return out
class mul_layer(nn.Module):
def __init__(self, num_pretrained=256, num_chan=64):
super(mul_layer, self).__init__()
# self.conv1 = nn.Conv2d(3, num_chan, kernel_size=3, padding=1)
# self.mul_blk1 = mul_block(in_channels=num_pretrained, out_channels=128)
self.mul_blk2 = mul_block(in_channels=128, out_channels=num_chan)
# self.mul_blk3 = mul_block(in_channels=num_chan, out_channels=num_chan)
self.conv2 = nn.Conv2d(num_chan, 32, kernel_size=1)
self.conv3 = nn.Conv2d(32, 3, kernel_size=1)
self.relu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# operator = self.conv1(x)
# mul1 = self.mul_blk1(x)
mul2 = self.mul_blk2(x)
# mul2 = mul1 + mul2
# mul3 = self.mul_blk3(mul2)
# mul3 = operator + mul1
conv = self.relu(self.conv2(mul2))
out = self.sigmoid(self.conv3(conv))
return out
##################################################################################
class operation_layer(nn.Module):
def __init__(self, in_channels):
super(operation_layer, self).__init__()
self.conv1 = nn.Conv2d(in_channels = in_channels, out_channels = 64, kernel_size = 3, padding = 1)
# self.batch_norm1 = nn.BatchNorm2d(64)
self.batch_norm1 = nn.InstanceNorm2d(64)
self.relu1 = nn.LeakyReLU(0.2, True)
self.conv2 = nn.Conv2d(in_channels = 64, out_channels = 3, kernel_size = 3, padding = 1)
# self.batch_norm2 = nn.BatchNorm2d(3)
self.batch_norm2 = nn.InstanceNorm2d(3)
self.relu2 = nn.ReLU(True)
def forward(self, x):
conv1 = self.relu1(self.batch_norm1(self.conv1(x)))
R_layer = (self.batch_norm2(self.conv2(conv1)))
return R_layer
class up_feature(nn.Module):
def __init__(self, in_channels, out_channels=3):#, up_size = (200,300)):
super(up_feature, self).__init__()
sequence = [
nn.Conv2d(in_channels=in_channels, out_channels=512, kernel_size=3, padding=1),
nn.LeakyReLU(0.2, True),
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1), # 32x48
nn.LeakyReLU(0.2, True),
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1), # 64x96
nn.LeakyReLU(0.2, True),
nn.ConvTranspose2d(128, 32, kernel_size=4, stride=2, padding=1), # 128x192
nn.LeakyReLU(0.2, True),
nn.ConvTranspose2d(32, 8, kernel_size=4, stride=2, padding=1), # 256x384
nn.LeakyReLU(0.2, True),
# nn.Upsample(up_size, mode = 'bilinear', align_corners=True),
nn.Conv2d(8, out_channels, kernel_size=1),
nn.Dropout2d(0.5)
]
self.sequence = nn.Sequential(*sequence)
def forward(self, x):
x = self.sequence(x)
return x
class channel_attention(nn.Module):
def __init__(self, in_channels = 15):
super(channel_attention, self).__init__()
sequence1 = [
nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=1),
nn.BatchNorm2d(128), nn.LeakyReLU(0.2, True),
nn.Conv2d(in_channels = 128, out_channels = 64, kernel_size =1),
nn.BatchNorm2d(64), nn.LeakyReLU(0.2, True),
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1),
nn.BatchNorm2d(32), nn.LeakyReLU(0.2, True),
nn.Conv2d(in_channels = 32, out_channels = in_channels, kernel_size = 1),
# nn.Softmax2d()
]
self.model1 = nn.Sequential(*sequence1)
sequence2 = [
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels=in_channels, out_channels=in_channels//4, kernel_size=1),
# nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1),
nn.LeakyReLU(0.2, True),
nn.Conv2d(in_channels=in_channels//4, out_channels=in_channels, kernel_size=1),
# nn.Conv2d(in_channels=1, out_channels=in_channels, kernel_size=1),
nn.Sigmoid()
]
self.model2 = nn.Sequential(*sequence2)
def forward(self, x):
x = self.model1(x)
y = self.model2(x)
out = x * y
out0 = out[:,0:3,:,:]
out1 = out[:,3:6,:,:]
out2 = out[:,6:9,:,:]
return out0, out1, out2
class SCA_block(nn.Module):
def __init__(self, in_chan, out_chan, reduce=16):
super(SCA_block, self).__init__()
self.conv1 = nn.Conv2d(in_chan, out_chan, kernel_size=3, padding=1)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.sigmoid = nn.Sigmoid()
self.ca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(out_chan, out_chan//reduce, kernel_size=1, padding=0),
nn.ReLU(),
nn.Conv2d(out_chan//reduce, out_chan, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(conv1))
conv3_1 = self.conv3(conv2)
conv3_2 = self.sigmoid(self.conv3(conv2))
spatial = conv3_1 * conv3_2
channel = self.ca(spatial)
sca = channel * conv2
out_layer = x + sca
return out_layer
class RCAN(nn.Module):
def __init__(self, args):
super(RCAN, self).__init__()
nChannel = args.nchannel
scale = args.scale
self.args = args
# Define Network
# ===========================================
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(nChannel, 64, kernel_size=7, padding=3)
# self.RG1 = residual_group(64, 64)
# self.RG2 = residual_group(64, 64)
# # self.RG3 = residual_group(64, 64)
self.SCAB1 = SCA_block(64, 64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 32, kernel_size=5, padding=2)
self.conv4 = nn.Conv2d(32, 3, kernel_size=3, padding=1)
# self.reset_params()
# ===========================================
def forward(self, x):
# Make a Network path
# ===========================================
x = self.relu(self.conv1(x))
sca1 = self.SCAB1(x)
sca2 = self.SCAB1(sca1)
sca3 = self.SCAB1(sca2)
sca3 = sca3 + sca2
sca4 = self.SCAB1(sca3)
sca4 = sca4 + sca1
sca5 = self.SCAB1(sca4)
sca5 = sca5 + x
x = self.relu(self.conv3(sca5))
# x = self.pixel_shuffle(x)
x = self.conv4(x)
# ===========================================
return x
# @staticmethod
# def weight_init(m):
# if isinstance(m, nn.Conv2d):
# init.xavier_normal_(m.weight)
# # init.constant(m.bias, 0)
#
# def reset_params(self):
# for i, m in enumerate(self.modules()):
# self.weight_init(m)
class residual_group(nn.Module):
def __init__(self, in_channels, out_channels):
super(residual_group, self).__init__()
self.rca_block1 = RCAB(in_channels, 64)
self.rca_block2 = RCAB(64, out_channels)
def forward(self, x):
rcab1 = self.rca_block1(x)
rcab2 = self.rca_block2(rcab1)
return x + rcab2
class RCAB(nn.Module):
def __init__(self, in_channels, out_channels):
super(RCAB, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.ca_block = CA_block(64, out_channels)
# self.reset_params()
def forward(self, x):
conv1 = self.conv1(x)
conv1 = self.relu(conv1)
conv2 = self.conv2(conv1)
ca = self.ca_block(conv2)
return x + ca
# @staticmethod
# def weight_init(m):
# if isinstance(m, nn.Conv2d):
# init.xavier_normal_(m.weight)
# # init.constant(m.bias, 0)
#
# def reset_params(self):
# for i, m in enumerate(self.modules()):
# self.weight_init(m)
class CA_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(CA_block, self).__init__()
# global average pooling
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_down_up = nn.Sequential(
nn.Conv2d(in_channels, 16, kernel_size=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(16, out_channels, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_down_up(y)
return x * y
| [
"noreply@github.com"
] | noreply@github.com |
636022ef17714db27f131c08daa673606f4185d8 | 511b7b19ec49be34bec240ee7c7cf4178cd36ca3 | /gasolinestation/migrations/0013_auto_20200304_0909.py | fb6c52a8156995aa62443e5a937be261f2953067 | [] | no_license | francisguchie/360POS | 58de516fe52e83d6b99bd195d22c8aa902daee18 | 68f9e20ac263c75ec0c9b0fe75d7f648b8744ea8 | refs/heads/master | 2023-02-08T16:38:42.667538 | 2020-03-12T16:05:00 | 2020-03-12T16:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # Generated by Django 3.0.3 on 2020-03-04 09:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gasolinestation', '0012_transactionsales'),
]
operations = [
migrations.AddField(
model_name='transactionsales',
name='dispensed_liter',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
migrations.AlterField(
model_name='transactionsales',
name='price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
]
| [
"monde.lacanlalay@gmail.com"
] | monde.lacanlalay@gmail.com |
f20a1f49d564b9bb5bdee9d117e1c5832706526f | 639d66b4a667db97c2638132dd028b7f5b865ef0 | /splash_screen.py | 6e5e635b1b0dac9fae73c7f54c4e3271555746a6 | [] | no_license | liturreg/blackjack_pythonProject | d91d21494b21159667f48a683b919ea68401c56c | b88f15ac35db8fbeb8b00234084c5b114383d6cd | refs/heads/master | 2023-01-29T18:45:08.531471 | 2020-12-07T19:57:33 | 2020-12-07T19:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | def splash_screen():
print(r""" /$$$$$$$ /$$ /$$ /$$$$$$$$ /$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$$ /$$ /$$$$$$ /$$$$$$ /$$ /$$ /$$$$$ /$$$$$$ /$$$$$$ /$$ /$$
| $$__ $$| $$ /$$/|__ $$__/| $$ | $$ /$$__ $$| $$$ | $$ | $$__ $$| $$ /$$__ $$ /$$__ $$| $$ /$$/ |__ $$ /$$__ $$ /$$__ $$| $$ /$$/
| $$ \ $$ \ $$ /$$/ | $$ | $$ | $$| $$ \ $$| $$$$| $$ | $$ \ $$| $$ | $$ \ $$| $$ \__/| $$ /$$/ | $$| $$ \ $$| $$ \__/| $$ /$$/
| $$$$$$$/ \ $$$$/ | $$ | $$$$$$$$| $$ | $$| $$ $$ $$ | $$$$$$$ | $$ | $$$$$$$$| $$ | $$$$$/ | $$| $$$$$$$$| $$ | $$$$$/
| $$____/ \ $$/ | $$ | $$__ $$| $$ | $$| $$ $$$$ | $$__ $$| $$ | $$__ $$| $$ | $$ $$ /$$ | $$| $$__ $$| $$ | $$ $$
| $$ | $$ | $$ | $$ | $$| $$ | $$| $$\ $$$ | $$ \ $$| $$ | $$ | $$| $$ $$| $$\ $$ | $$ | $$| $$ | $$| $$ $$| $$\ $$
| $$ | $$ | $$ | $$ | $$| $$$$$$/| $$ \ $$ | $$$$$$$/| $$$$$$$$| $$ | $$| $$$$$$/| $$ \ $$| $$$$$$/| $$ | $$| $$$$$$/| $$ \ $$
|__/ |__/ |__/ |__/ |__/ \______/ |__/ \__/ |_______/ |________/|__/ |__/ \______/ |__/ \__/ \______/ |__/ |__/ \______/ |__/ \__/""" + "\n")
| [
"nicolas.gasco92@gmail.com"
] | nicolas.gasco92@gmail.com |
b01ea9b981eaf809aed4db02cdf99add3ef4992e | a4753147801dbabfec45f6f9f47572cda77efb81 | /debugging-constructs/ibmfl/util/data_handlers/mnist_pytorch_data_handler.py | 29cc18afb938e575e71025d9007fd67f722221b9 | [
"MIT"
] | permissive | SEED-VT/FedDebug | e1ec1f798dab603bd208b286c4c094614bb8c71d | 64ffa2ee2e906b1bd6b3dd6aabcf6fc3de862608 | refs/heads/main | 2023-05-23T09:40:51.881998 | 2023-02-13T21:52:25 | 2023-02-13T21:52:25 | 584,879,212 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | """
Licensed Materials - Property of IBM
Restricted Materials of IBM
20221069
© Copyright IBM Corp. 2022 All Rights Reserved.
"""
import logging
import numpy as np
from ibmfl.data.data_handler import DataHandler
from ibmfl.util.datasets import load_mnist
logger = logging.getLogger(__name__)
class MnistPytorchDataHandler(DataHandler):
def __init__(self, data_config=None):
super().__init__()
self.file_name = None
if data_config is not None:
if 'npz_file' in data_config:
self.file_name = data_config['npz_file']
# load the datasets
(self.x_train, self.y_train), (self.x_test, self.y_test) = self.load_dataset()
# pre-process the datasets
self.preprocess()
def get_data(self):
"""
Gets pre-process mnist training and testing data.
:return: training data
:rtype: `tuple`
"""
return (self.x_train, self.y_train), (self.x_test, self.y_test)
def load_dataset(self, nb_points=500):
"""
Loads the training and testing datasets from a given local path.
If no local path is provided, it will download the original MNIST \
dataset online, and reduce the dataset size to contain \
500 data points per training and testing dataset.
Because this method
is for testing it takes as input the number of datapoints, nb_points,
to be included in the training and testing set.
:param nb_points: Number of data points to be included in each set if
no local dataset is provided.
:type nb_points: `int`
:return: training and testing datasets
:rtype: `tuple`
"""
if self.file_name is None:
(x_train, y_train), (x_test, y_test) = load_mnist()
x_train = x_train[:nb_points]
y_train = y_train[:nb_points]
x_test = x_test[:nb_points]
y_test = y_test[:nb_points]
else:
try:
logger.info('Loaded training data from ' + str(self.file_name))
data_train = np.load(self.file_name)
x_train = data_train['x_train']
y_train = data_train['y_train']
x_test = data_train['x_test']
y_test = data_train['y_test']
except Exception:
raise IOError('Unable to load training data from path '
'provided in config file: ' +
self.file_name)
return (x_train, y_train), (x_test, y_test)
def preprocess(self):
"""
Preprocesses the training and testing dataset, \
e.g., reshape the images according to self.channels_first; \
convert the labels to binary class matrices.
:return: None
"""
img_rows, img_cols = 28, 28
self.x_train = self.x_train.astype('float32').reshape(self.x_train.shape[0], 1, img_rows, img_cols)
self.x_test = self.x_test.astype('float32').reshape(self.x_test.shape[0], 1,img_rows, img_cols)
# print(self.x_train.shape[0], 'train samples')
# print(self.x_test.shape[0], 'test samples')
self.y_train = self.y_train.astype('int64')
self.y_test = self.y_test.astype('int64')
# print('y_train shape:', self.y_train.shape)
# print(self.y_train.shape[0], 'train samples')
# print(self.y_test.shape[0], 'test samples')
| [
"waris@vt.edu"
] | waris@vt.edu |
bc77e7a35dfac6f9b3eef8dfadff882bd5412e64 | d0452eb707f82f892c236c7e70a15f561968cc05 | /conftest.py | 459d2ee7f091611be0629d8501f9c21e4108703a | [
"Apache-2.0"
] | permissive | Treshch1/python_traning | 0ff28442ad559c7e3ed2dfcb5de0fc430ecb71cb | de796861b7227fab176d342b67cf47acbd2b166f | refs/heads/master | 2020-06-16T12:26:59.431595 | 2019-08-23T19:24:03 | 2019-08-23T19:24:03 | 195,573,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,811 | py | import pytest
import json
import os.path
import importlib
import jsonpickle
from fixture.application import Application
from fixture.db import DbFixture
from fixture.orm import ORMFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))["web"]
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=web_config["base_url"])
fixture.session.ensure_login(username=web_config["username"], password=web_config["password"])
return fixture
@pytest.fixture(scope='session')
def db(request):
db_config = load_config(request.config.getoption("--target"))["db"]
dbfixture = DbFixture(host=db_config["host"], name=db_config["name"],
username=db_config["username"], password=db_config["password"])
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
@pytest.fixture(scope='session')
def orm(request):
db_config = load_config(request.config.getoption("--target"))["db"]
ormfixture = ORMFixture(host=db_config["host"], name=db_config["name"],
username=db_config["username"], password=db_config["password"])
return ormfixture
@pytest.fixture(scope='session', autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
test_data = load_from_module(fixture[5:])
metafunc.parametrize(fixture, test_data, ids=[str(x) for x in test_data])
elif fixture.startswith("json_"):
test_data = load_from_json(fixture[5:])
metafunc.parametrize(fixture, test_data, ids=[str(x) for x in test_data])
def load_from_module(module):
return importlib.import_module(f"data.{module}").test_data
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), f"data/{file}.json")) as f:
return jsonpickle.decode(f.read())
| [
"vladislavtreshcheyko@gmail.com"
] | vladislavtreshcheyko@gmail.com |
088161084dc2576df5bc96e4eb1d6578c97c42f5 | 0f7127951bbaf9a7559365d13f562f7af8f1bba1 | /Python/5kyu/Find the smallest.py | 41f2f67a16f4fa96c6529b6bc7ac7fb1c180f3e9 | [
"BSD-2-Clause"
] | permissive | SamiaAitAyadGoncalves/codewars-1 | c5548640341b4453bca2bdf53193960b4026dc78 | 915149b052dc5ec7d2276f1a235076028a3ba997 | refs/heads/master | 2020-07-27T02:38:29.497964 | 2019-08-19T10:52:06 | 2019-08-19T10:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,033 | py | # https://www.codewars.com/kata/573992c724fc289553000e95
#
# You have a positive number n consisting of digits. You can do at most one
# operation: Choosing the index of a digit in the number, remove this digit
# at that index and insert it back to another place in the number.
#
# Doing so, find the smallest number you can get.
#
# #Task: Return an array or a tuple or a string depending on the language
# #(see "Sample Tests") with
#
# 1) the smallest number you got
# 2) the index i of the digit d you took, i as small as possible
# 3) the index j (as small as possible) where you insert this digit
# d to have the smallest number.
#
# Example:
#
# smallest(261235) --> [126235, 2, 0] or (126235, 2, 0) or "126235, 2, 0"
#
# 126235 is the smallest number gotten by taking 1 at index 2 and
# putting it at index 0
#
# smallest(209917) --> [29917, 0, 1] or ...
#
# [29917, 1, 0] could be a solution too but index `i` in [29917, 1, 0]
# is greater than
# index `i` in [29917, 0, 1].
#
# 29917 is the smallest number gotten by taking 2 at index 0 and putting
# it at index 1 which gave 029917 which is the number 29917.
#
# smallest(1000000) --> [1, 0, 6] or ...
#
# Note
#
# Have a look at "Sample Tests" to see the input and output in each language
def move(s, i, j):
result = ''
for k in range(len(s)):
if k != i:
result += s[k]
return result[:j] + s[i] + result [j:]
def smallest(n):
s = str(n)
numbers = [[s, 0, 0]]
for i in range(len(numbers[0][0])):
for j in range(len(numbers[0][0])):
numbers.append([move(s, i, j), i, j])
mini = numbers[0]
for element in numbers:
if element[0] <= mini[0]:
mini = element
for element in numbers:
if element[0] == mini[0] and element[1] <= mini[1]:
mini = element
for element in numbers:
if element[0] == mini[0] and element[1] == mini[1] and element[2] <= mini[2]:
mini = element
mini[0] = int(mini[0])
return mini
| [
"noreply@github.com"
] | noreply@github.com |
f4eb8b1786097761faab44c8b7108bc2429527f2 | afa24f666ecbecd25d74baefe014127970e1e3c5 | /VideoUtils.py | 8eaaff3927e8302426296416e668b9d958007c64 | [] | no_license | shashank879/AI | 9a4edf80d53d0fc21b74eb4a6a75f9f15477d414 | a27868154194a84d655093440695a5389925bacf | refs/heads/master | 2020-09-21T04:19:39.583576 | 2016-12-15T14:35:21 | 2016-12-15T14:35:21 | 67,039,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,700 | py | import cv2
import re
from datetime import datetime
def time_in_range(start, end, curr):
"""Return true if curr is in the range [start, end]"""
if start <= end:
return start <= curr <= end
else:
return False
def mm_ss_exclusions(file_path):
"""Exclude ranges annotated in a file. """
regex = '(\d+:\d+) - (\d+:\d+)'
time_format = '%M:%S'
zero = datetime.strptime('00:00', time_format)
ranges = []
with open(file_path) as infile:
for line in infile:
(st_time, en_time) = re.findall(regex, line)[0]
st_time = datetime.strptime(st_time, time_format) - zero
en_time = datetime.strptime(en_time, time_format) - zero
ranges.append([st_time.total_seconds() * 1000, en_time.total_seconds() * 1000])
return ranges
class VideoLoader:
"""Class to aid loading of video frames in batches as training, test and validation data"""
def __init__(self, vid_path, batch_size=21, exclude_ranges=None, square_crop=False, convert_to_gray=False):
"""Initialize the loader
Keyword Arguments:
vid_path -- Path to video
batch_size -- Number of frames to load in one batch
exclude_ranges -- Array of 2 element arrays, containing the times to exclude from the batches.
"""
self.vid_path = vid_path
self.batch_size = batch_size
self.vidcap = None
self.exclude_ranges = exclude_ranges
self.square_crop = square_crop
self.convert_to_gray = convert_to_gray
self.range_to_check = 0
def __enter__(self):
print("Opening video")
self.vidcap = cv2.VideoCapture(self.vid_path)
self.range_to_check = 0
print("Video opened")
def __exit__(self, exception_type, exception_value, traceback):
print("Closing video")
self.vidcap.release()
def fetch_next_batch(self):
"""Returns the next batch of frames with the specified batch_size."""
frames = []
i = 0
while self.vidcap.isOpened() and i < self.batch_size:
curr_time = self.vidcap.get(cv2.CAP_PROP_POS_MSEC)
next_range = self.exclude_ranges[self.range_to_check]
# Check if we are going to check the correct range
if self.range_to_check < len(self.exclude_ranges) and \
curr_time > next_range[1]:
self.range_to_check += 1
continue
_, frame = self.vidcap.read()
# Check if this section is to be skipped, if so clear all we have in this batch,
# this will happen till we are in a valid range and
# the batch does not intersect an invalid range at all
if time_in_range(next_range[0], next_range[1], curr_time):
frames = []
i = 0
print("Skipping frame...", curr_time)
continue
if self.convert_to_gray:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if self.square_crop:
h = frame.shape[0]
w = frame.shape[1]
d = int((w - h) / 2)
frame = frame[:, d:(w - d)] if d > 0 else frame[-d:(w + d), :]
frames.append(frame)
i += 1
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
return frames
# excl = mm_ss_exclusions('./data/traffic_junction/Annotations_cleaned.txt')
# vl = VideoLoader(vid_path='./data/traffic_junction/traffic-junction.avi', exclude_ranges=excl)
# with vl:
# while True:
# batch = vl.fetch_next_batch()
# if len(batch) is 0:
# break
| [
"shashank.879@gmail.com"
] | shashank.879@gmail.com |
044ef52da86b27d924c896886eba492a10f16d0d | c843ff5e1471dfac4e470de88784966473057079 | /keywords/ruby_keyword.py | d0214870befd0a44a5062b2b405dc6f809d51194 | [] | no_license | tkyaji/CocosRubyEditor | a48a3a3a504d0f359757589bc9c302bc32fe030b | 4fbb5674db5ad64bd49c6f1181fe7a3519236b64 | refs/heads/master | 2021-01-10T18:41:06.673502 | 2015-03-18T04:46:49 | 2015-03-18T04:46:49 | 32,151,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | import ruby_cocos2dx_3d_auto_keyword as cocos2dx_3d_auto
import ruby_cocos2dx_audioengine_auto_keyword as cocos2dx_audioengine_auto
import ruby_cocos2dx_auto_keyword as cocos2dx_auto
import ruby_cocos2dx_experimental_auto_keyword as cocos2dx_experimental_auto
import ruby_cocos2dx_experimental_video_auto_keyword as cocos2dx_experimental_video_auto
import ruby_cocos2dx_spine_auto_keyword as cocos2dx_spine_auto
import ruby_cocos2dx_ui_auto_keyword as cocos2dx_ui_auto
def get_keywords():
keywords = []
keywords.append(cocos2dx_3d_auto.get_keywords())
keywords.append(cocos2dx_audioengine_auto.get_keywords())
keywords.append(cocos2dx_auto.get_keywords())
keywords.append(cocos2dx_experimental_auto.get_keywords())
keywords.append(cocos2dx_3d_auto.get_keywords())
keywords.append(cocos2dx_experimental_video_auto.get_keywords())
keywords.append(cocos2dx_spine_auto.get_keywords())
keywords.append(cocos2dx_ui_auto.get_keywords())
all_keyword_dict = {'tree': {}, 'classes': {}}
for keyword_dict in keywords:
for ns, item in keyword_dict['tree'].items():
if ns in all_keyword_dict['tree']:
all_keyword_dict['tree'][ns].update(item)
else:
all_keyword_dict['tree'][ns] = item
all_keyword_dict['classes'].update(keyword_dict['classes'])
return all_keyword_dict
| [
"tkyaji@gmail.com"
] | tkyaji@gmail.com |
d3df0d4a64a56492d1ec603be919eb0c002e95f5 | ab9e12bad0ff511d1c2b4c5b42bb0ceddb2cf34f | /examples/echo_server.py | 30c55e1c650d7a9863ad3b509d85450d040a1fb4 | [
"BSD-3-Clause"
] | permissive | elijahr/aiolo | a6014eefb72a615d0c6a5bed843b79385779b230 | c5ec8a3a5df93d4d7a19ccc0181c8a7d8bad75a9 | refs/heads/master | 2023-08-08T19:50:17.593860 | 2020-07-23T04:24:06 | 2020-07-23T04:24:06 | 232,236,708 | 7 | 1 | BSD-3-Clause | 2023-08-03T14:14:05 | 2020-01-07T03:42:32 | Python | UTF-8 | Python | false | false | 818 | py | import asyncio
from aiolo import Address, Midi, Server
async def main():
server = Server(port=12001)
server.start()
# Create endpoints
# /foo accepts an int, a float, and a MIDI packet
foo = server.route('/foo', [int, float, Midi])
ex = server.route('/exit')
address = Address(port=12001)
for i in range(5):
address.send(foo, i, float(i), Midi(i, i, i, i))
# Notify subscriptions to exit in 1 sec
address.delay(1, ex)
# Subscribe to messages for any of the routes
subs = foo.sub() | ex.sub()
async for route, data in subs:
print(f'echo_server: {str(route.path)} received {data}')
if route == ex:
await subs.unsub()
server.stop()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
| [
"elijahr@gmail.com"
] | elijahr@gmail.com |
cf84225fbffedd219649f40d7ee33aca423ff344 | 0d9c0d0b0dedfa3da12f5850e8492b9554b8c383 | /tic_tac_toe_OOP.py | 50f996110a0b67cf69af408a649d2ce7b14f7e58 | [] | no_license | PCassiday88/CS506-Winter-21-TP | 7bdb61c850e6ae875b94049383120fe2659b9339 | 483b19e3afe5d3f2898b7e32791ef095d6ddbeae | refs/heads/main | 2023-03-21T09:41:12.428950 | 2021-03-13T06:32:50 | 2021-03-13T06:32:50 | 345,913,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,761 | py | #This version wis complete without AI
board = []
for square in range(10):
square = str(square)
board.append(square)
class Board:
def __init__(self):
pass
def show_Board(self, board):
print('-----------')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print('-----------')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print('-----------')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print('-----------')
class Human:
def __init__(self):
pass
def makeMove(self, position):
pos = int(position)
if pos >= 1 and pos <= 9:
if (board[pos] == 'X' or board[pos] == 'O'): #Check to see if space is occupied
print(" ") #For appearance
print("You skip your turn for trying to flip a taken square")
else:
board[pos] = "X" #Space isn't occupied and the pos is within range
else: # If you pick a number outside of the range, you are given a chance to pick the pos again
print("Lets try that again")
pos = input("This time pick an open space between 1-9 ")
print(" ")
self.makeMove(pos) # Calls itself with new pos and game continues
class AI: #This class will eventually get the AI built in but at this stage we will control it
def __init__(self):
pass
def makeMove(self, position):
pos = int(position)
if pos >= 1 and pos <= 9:
if (board[pos] == 'X' or board[pos] == 'O'):
print("You skip your turn for trying to flip a taken square")
else:
board[pos] = "O"
else: # If you pick a number outside of the range, you are given a chance to pick the pos again
print("Lets try that again")
pos = input("This time pick an open space between 1-9 ")
print(" ")
self.makeMove(pos) # Calls itself with new pos and game continues
class Judge: #This class will be called to determine is a win or tie has occured
def __init__(self):
pass
def gamePlay(self, t, movesMade):
a = self.checkWinner(t)
if (a == True):
print(t + "'s have Won!!")
return True
if (a == False):
if (movesMade >= 9):
print("Tie Game!")
return False
else:
return False
def checkWinner(self, t): # t == player token
#rows going across
if (board[1] == t and board[2] == t and board[3] == t):
return True
if (board[4] == t and board[5] == t and board[6] == t):
return True
if (board[7] == t and board[8] == t and board[9] == t):
return True
#columns
if (board[1] == t and board[4] == t and board[7] == t):
return True
if (board[2] == t and board[5] == t and board[8] == t):
return True
if (board[3] == t and board[6] == t and board[9] == t):
return True
#diagonal
if (board[1] == t and board[5] == t and board[9] == t):
return True
if (board[3] == t and board[5] == t and board[7] == t):
return True
else:
return False
def main():
#Any move between 0-9 reflects moves made during game
# movesMade values of -1 and -2 are used to dictate messages and reset game play
# before resetting movesMade back to zero and a new game begins with the human
movesMade = 0
#Creating the board and player objects for game play
game = Board()
player1 = Human()
player2 = AI()
judge = Judge()
game.show_Board(board)
while (movesMade < 9):
move = input("Human Move ")
player1.makeMove(move)
game.show_Board(board)
movesMade += 1
if (judge.gamePlay("X", movesMade) == True):
decision = input("Would you like to play again? <Y/N> ").upper()
if (decision == "Y"): #If player wants to play again we clean the board
movesMade = -1 #Skips the AI move
for square in range(10): #Resets board to original values
board[square] = str(square)
else:
movesMade = -2
if (judge.gamePlay("X", movesMade) == False):
if (movesMade == 9):
decision = input("Would you like to play again? <Y/N> ").upper()
if (decision == "Y"): #If player wants to play again we clean the board
for square in range(10):
board[square] = str(square)
movesMade = -1 #To skip the AI move
else:
movesMade = -2 #To prompt the I am done with the game message
print(" ")
if (movesMade < 0):
if (movesMade == -2):
print("Thank you! Come play again weak human!") #Done with the game message
else:
print("Moves Made is: " + str(movesMade))
print(" ")
if (movesMade < 9 and movesMade >= 0): #Check to see if there are moves remaining
move = input("AI Move ")
player2.makeMove(move)
game.show_Board(board)
movesMade += 1
if (judge.gamePlay("O", movesMade) == True):
decision = input("Would you like to play again? <Y/N> ").upper()
if (decision == "Y"): #If player wants to play again we clean the board
movesMade = 0
for square in range(10): #Resets board to original values
board[square] = str(square)
else:
movesMade = -2
if (judge.gamePlay("X", movesMade) == False):
if (movesMade == 9):
decision = input("Would you like to play again? <Y/N> ").upper()
if (decision == "Y"): #If player wants to play again we clean the board
for square in range(10):
board[square] = str(square)
movesMade = 0
else:
movesMade = -2 #To prompt the I am done with the game message
print(" ")
if (movesMade < 0):
if (movesMade == -2):
print("Thank you! Come play again weak human!") #Done with the game message
else:
print("Moves Made is: " + str(movesMade))
print(" ")
if (movesMade == -1):
movesMade = 0 #Resets moves to zero and human starts new game
main()
# for j in range(len(board)): #This loop checks for moves that makes the AI win
# if board[j] == 'X' or board[j] == 'O' or board[j] == '0':
# posSquares.append(k)
# continue #Prevents us from considering squares that have a token or are the zero index
# else:
# posSquares.append(j) #filling container with all possible squares not filled with a player token
# board[j] = "O" #Temp set square
# if AI_judge.gamePlay("O", board, movesMade) == True: #Determine if that would make AI win
# return #If true, return because this move makes AI win
# if AI_judge.gamePlay("O", board, movesMade) == False:
# board[j] = str(j) #If move will not make AI win, set square to its previous value and keep looking
# for i in range(len(board)):
# #After checking for winning moves, check for moves that the AI needs to block or the human will win
# if board[i] == 'X' or board[i] == 'O' or board[i] == '0':
# continue
# else:
# board[i] = "X"
# if AI_judge.gamePlay("X", board, movesMade) == True:
# board[i] = "O" #If the move will result in a human win, mark the square with AI token
# return
# if AI_judge.gamePlay("X") == False:
# board[i] = str(i)
# else: #Likely inaccessible code but acts as a catch all if no if statement is entered somehow
# board[i] = str(i)
# #If a win or a block is not available, check to take a corner
# openCorners = []
# for i in range(len(board)):
# if board[i] == "1" or board[i] == "5" or board[i] == "21" or board[i] == "25":
# openCorners.append(i)
# if len(openCorners) > 0:
# self.randomSelection(openCorners, board)
# # board[move] = "O"
# # return
# return
# #If a win, block, or corner isn't available, take the center
# if 13 in board:
# move = 13
# board[move] = "O"
# return
#If none of the above options are available, take ant open edge
# posEdges = [2,3,4,6,11,16,10,15,20,22,23,24]
# openEdges = []
# for i in range(len(posSquares)):
# # for j in range(len(posEdges)):
# if board[j] == ' ':
# continue
# else:
# openEdges.append(j)
# if len(openEdges) > 0:
# self.randomSelection(openEdges, board)
# board[move] = "O"
# return
#If no edge is available, take any random open square
# if len(posSquares) > 0:
# self.randomSelection(posSquares)
# board[move] = "O"
# return | [
"patcassiday@gmail.com"
] | patcassiday@gmail.com |
cab72ee5c66639fea08c549b6600c6b7dc735786 | a8c6661e002eabd497e688cbbdaad197b7e9ba9f | /Embedded/Main/models.py | 74866d725c5d047aef60b897cb95ee49f05cc96e | [] | no_license | jyahn0623/Embedded-Lab-s-site | 35418ab5e987bdcd51c9400269c6957a54c5f812 | 58a025c17021e41264ac39123b1375e1b4708edf | refs/heads/master | 2020-12-04T17:55:41.505816 | 2019-07-02T00:34:04 | 2019-07-02T00:34:04 | 231,859,244 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
def save_pic_user(instance, name):
return "{0}/{1}".format("Profile", name)
class RankPicture(models.Model):
r_rankcode = models.CharField(max_length=5)
r_images = models.ImageField(null=True)
class Profile(models.Model):
p_user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
p_name = models.CharField(max_length=5, verbose_name="이름")
p_grade = models.CharField(max_length=1, verbose_name="학년")
p_rank = models.CharField(default="재학생", max_length=5, verbose_name="신분", choices=(('재학생', '재학생'), ('졸업생', '졸업생'), ('실장', '실장')))
p_rank_pic = models.OneToOneField("Main.RankPicture", verbose_name="계급 사진", on_delete=models.DO_NOTHING, null=True)
p_birth_date = models.DateField(verbose_name="생년월일", null=True)
p_picture = models.ImageField(verbose_name="사진", null=True, upload_to=save_pic_user)
class Board(models.Model):
b_title = models.CharField(max_length=50, verbose_name="제목")
b_category = models.CharField(max_length=5, choices=(('정보', '정보'), ('기타', '기타') ), verbose_name="분류", default="정보")
b_content = models.CharField(max_length=300, verbose_name="내용")
b_date=models.DateTimeField(auto_now_add=True, verbose_name="작성일", null=True)
b_user=models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="작성자", null=True)
b_isNotice = models.BooleanField(default=False, verbose_name="공지 여부")
class Schedule(models.Model):
s_user = models.CharField(max_length=50)
s_date = models.DateTimeField(null=True)
s_finished_date = models.DateTimeField(null=True)
s_content = models.CharField(max_length=50)
class BoardFiles(models.Model):
b_board = models.ForeignKey(Board, on_delete=models.CASCADE, null=True)
b_file = models.FileField(verbose_name="파일", upload_to="Board")
class GuestBook(models.Model):
g_name = models.CharField(max_length=5)
g_email = models.CharField(max_length=30)
g_title = models.CharField(max_length=20)
g_content = models.CharField(max_length=50)
g_date = models.DateTimeField(auto_now_add=True)
g_ip = models.CharField(max_length=20)
| [
"jyahn0623@gmail.com"
] | jyahn0623@gmail.com |
cc86bef58c7d155debfcde3377df2e4dbe6dfa6d | bf32d19e5cf9650099d4f3a05f24ad3f5d0332a3 | /datagen.py | 0b65a8b378b3d8b516ff0d137a9bda72e359cbef | [] | no_license | DableUTeeF/dictionaries | 5d01b7936ff567c6ca6aac45394370a8168ad816 | c529727325232291a4103d4b1bc72c888cec05f1 | refs/heads/master | 2023-04-11T05:46:31.548797 | 2021-04-22T06:06:55 | 2021-04-22T06:06:55 | 343,296,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,290 | py | from torch.utils.data import Dataset
from torchtext.vocab import Vocab, Counter
import torch
from nltk.corpus import wordnet as wn, stopwords
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import pandas as pd
import re
from transformers import AutoTokenizer
import collections
from bert.bpe_helper import BPE
import sentencepiece as spm
import sys
sys.path.extend(['/home/palm/PycharmProjects/sentence-transformers'])
from sentence_transformers import InputExample
__all__ = ['BertDataset', 'ThaiBertDataset', 'ThaiTokenizer', 'RoyinDataset', 'GPTDataset', 'SentenceDataset',
'SentenceTokenized']
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
def load_vocab(vocab_file):
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r") as reader:
while True:
token = reader.readline()
if token.split():
token = token.split()[0] # to support SentencePiece vocab file
token = convert_to_unicode(token)
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
output = []
for item in items:
output.append(vocab[item])
return output
class ThaiTokenizer(object):
"""Tokenizes Thai texts."""
def __init__(self, vocab_file, spm_file):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.bpe = BPE(vocab_file)
self.s = spm.SentencePieceProcessor()
self.s.Load(spm_file)
self.vocab_size = len(self.vocab)
def tokenize(self, text):
bpe_tokens = self.bpe.encode(text).split(' ')
spm_tokens = self.s.EncodeAsPieces(text)
tokens = bpe_tokens if len(bpe_tokens) < len(spm_tokens) else spm_tokens
split_tokens = []
for token in tokens:
new_token = token
if token.startswith('_') and token not in self.vocab:
split_tokens.append('_')
new_token = token[1:]
if new_token not in self.vocab:
split_tokens.append('<unk>')
else:
split_tokens.append(new_token)
return split_tokens
def __call__(self, text):
return [1] + self.convert_tokens_to_ids(self.tokenize(text)) + [2]
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def decode(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
def generate_batch(batch):
text = [entry[0] for entry in batch]
word = [entry[1] for entry in batch]
text = pad_sequence(text)
word = pad_sequence(word)
return word, text
def generate_bert_batch(batch):
word = [entry[0]['input_ids'][0] for entry in batch]
word = pad_sequence(word, batch_first=True)
text = [entry[1]['input_ids'][0] for entry in batch]
text = pad_sequence(text, batch_first=True)
return word, text
def generate_triplet_batch(batch):
word = [entry[0] for entry in batch]
pos_text = [entry[1] for entry in batch]
neg_text = [entry[1] for entry in batch]
word = pad_sequence(word)
pos_text = pad_sequence(pos_text)
neg_text = pad_sequence(neg_text)
return word, pos_text, neg_text
class QuoraDataset(Dataset):
def __init__(self):
self.csv = pd.read_csv('train.csv')
self.csv.fillna('', inplace=True)
counter = Counter()
for _, row in self.csv.iterrows():
q1 = row['question1']
q2 = row['question2']
counter.update(q1.split(' '))
counter.update(q2.split(' '))
self.vocab = Vocab(counter)
def __len__(self):
return len(self.csv)
def __getitem__(self, index):
row = self.csv.iloc[index]
q1 = row['question1']
q2 = row['question2']
label = row['is_duplicate']
token_ids_1 = list(filter(lambda x: x is not Vocab.UNK, [self.vocab[token]
for token in q1.split(' ')]))
token_ids_2 = list(filter(lambda x: x is not Vocab.UNK, [self.vocab[token]
for token in q2.split(' ')]))
token1 = torch.tensor(token_ids_1)
token2 = torch.tensor(token_ids_2)
return token1, token2, torch.ones(1) - label
class SynonymsDataset(Dataset):
def __init__(self):
self.words = list(set(i for i in wn.words()))
counter = Counter()
for word in self.words:
counter.update([word])
word = wn.synsets(word)
for meaning in word:
counter.update(meaning.definition().split(' '))
self.vocab = Vocab(counter)
def __len__(self):
return len(self.vocab.itos) - 2
def __getitem__(self, index):
word = self.vocab.itos[index + 2]
ss = wn.synsets(word)
lemmas = np.random.choice(ss).lemma_names()
if len(lemmas) > 1 and np.random.rand() > 0.5:
other = np.random.choice(lemmas)
label = torch.ones(1)
else:
possible_lemmas = []
for synset in ss:
possible_lemmas.extend(synset.lemma_names())
while True:
idx = torch.randint(0, len(self), (1,))
if self.vocab.itos[idx + 2] not in possible_lemmas:
break
other = self.vocab.itos[idx + 2]
label = torch.zeros(1)
word = torch.tensor([self.vocab[word]])
other = torch.tensor([self.vocab[other]])
return word, other, label
class SentenceDataset(Dataset):
def __init__(self, language=None, words=None, indices=None, true_only=False):
self.patterns = [r'\([^)]*\)', r'\[[^)]*\]', r'&#[a-z\d]*;', r'<\/[a-z\d]{1,6}>', r'<[a-z\d]{1,6}>']
self.language = language
self.true_only = true_only
if words is None:
if language == 'thai':
self.words, self.indices = self.thai()
elif language == 'eng':
self.words, self.indices = self.eng()
elif language == 'all':
self.words, self.indices = self.all()
else:
raise ValueError('Both `words` and `language` are `None`')
else:
self.words = words
self.indices = indices
def train(self, train, seed=88):
dataset_size = len(self.words)
indices = list(range(dataset_size))
split = int(np.floor(0.2 * dataset_size))
np.random.seed(seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
if train:
return SentenceDataset(language=self.language, words=self.words, indices=train_indices, true_only=self.true_only)
else:
return SentenceDataset(language=self.language, words=self.words, indices=val_indices, true_only=self.true_only)
def all(self):
thai_wn_words = []
thai_wn_lemmas = [x for x in wn.all_lemma_names(lang='tha')]
for word in thai_wn_lemmas:
meanings = wn.synsets(word, lang='tha')
word = word.replace('_', ' ')
for meaning in meanings:
thai_wn_words.append((word, meaning.lemma_names()[0]))
indices = list(range(len(thai_wn_words)))
return thai_wn_words, indices
def eng(self):
words = list(set(i for i in wn.words()))
out_words = []
for word in words:
meanings = wn.synsets(word)
word = word.replace('_', ' ')
for meaning in meanings:
out_words.append((word, meaning.definition()))
indices = list(range(len(out_words)))
return out_words, indices
def thai(self):
words = pd.read_csv('data/royin_dict_2542.tsv', sep='\t')
out_words = []
for _, row in words.iterrows():
word = row.Word1.split(',')[0]
text = row.Definition
for pattern in self.patterns:
text = re.sub(pattern, '', text).split('เช่น')[0].split(';')[0] # todo: use ; to split between meanings
if text.startswith('ดู'):
continue
if len(text) > 5:
if text[2] == ' ':
text = text[3:]
elif text[3] == ' ':
text = text[4:]
elif text[4] == ' ':
text = text[5:]
elif text[5] == ' ':
text = text[6:]
out_words.append((word, text))
indices = list(range(len(out_words)))
return out_words, indices
def __len__(self):
return len(self.indices)
def collate_fn(self, batch):
engs = []
thas = []
labels = []
for sample in batch:
eng, tha = sample.texts
engs.append(eng)
thas.append(tha)
labels.append(sample.label)
return engs, thas, torch.tensor(labels)
def __getitem__(self, index) -> InputExample:
"""
0.5: match
0.4: word - sentence
0.: eng - eng: 100000+
0.: thai - thai: 37706
0.: thai - eng: 93045
0.: eng - thai: 6310
0.05: sentence - sentence: 6310 - match word from thai to eng then pick random sentences
0.05: word - word: 80508
0.5: not match
0.: eng-eng
0.: thai-thai
0.: both
"""
tha, eng = self.words[self.indices[index]]
if np.random.rand() > 0.6 or self.true_only:
out = InputExample(texts=[eng, tha], label=0.8)
else:
while True:
idx = torch.randint(0, len(self), (1,))
other_tha, _ = self.words[self.indices[idx]]
if other_tha != tha:
break
out = InputExample(texts=[eng, other_tha], label=0.2)
return out
class SentenceTokenized(SentenceDataset):
def __init__(self, tokenizer, stage, language=None, words=None, indices=None, true_only=False, bos='[CLS]', eos='[SEP]'):
super().__init__(language=language, words=words, indices=indices, true_only=true_only)
self.tokenizer = tokenizer
self.vocab_size = self.tokenizer.vocab_size
self.stage = stage
self.stops = set(stopwords.words("english"))
self.vocab_size = self.tokenizer.vocab_size
self.cls = self.tokenizer.vocab[bos]
self.sep = self.tokenizer.vocab[eos]
def collate_fn(self, batch):
words = []
meanings = []
labels = []
for sample in batch:
eng, tha = sample.texts
if self.stage == 'second':
meanings.append(eng)
if tha not in self.stops:
splitted_eng = [w for w in eng.split() if w not in self.stops]
else:
splitted_eng = eng.split()
if self.stage == 'second':
tha = tha + ' '.join(np.random.choice(splitted_eng, 1+int(len(splitted_eng)/3)))
else:
meanings.append(' '.join(splitted_eng[:1+int(len(splitted_eng)/3)]))
words.append(tha)
labels.append(sample.label)
return words, self.tokenizer(meanings, return_tensors='pt', padding=True), torch.tensor(labels)
class BertDataset(Dataset):
def __init__(self, reverse=False, name='bert-base-uncased', bos='[CLS]', eos='[SEP]'):
# words = list(set(i for i in wn.words()))
self.tokenizer = AutoTokenizer.from_pretrained(name)
words = [w for w in list(set(i for i in wn.words())) if len(self.tokenizer([w]).data['input_ids'][0]) == 3]
self.words = []
for word in words:
meanings = wn.synsets(word)
# word = word.replace('_', ' ')
for meaning in meanings:
self.words.append((word, meaning.definition()))
if reverse:
break
self.vocab_size = self.tokenizer.vocab_size
self.cls = self.tokenizer.vocab[bos]
self.sep = self.tokenizer.vocab[eos]
def __len__(self):
return len(self.words)
def __getitem__(self, index):
word, text = self.words[index]
return word, text
def decode(self, text):
return self.tokenizer.decode(text)
def collate_fn(self, batch):
text = [entry[1] for entry in batch]
word = [entry[0] for entry in batch]
text = self.tokenizer(text, return_tensors='pt', padding=True)
word = self.tokenizer(word, return_tensors='pt', padding=True)
# text.data['attention_mask'][text.data['input_ids'] == 102] = 0
# text.data['input_ids'][text.data['input_ids'] == 102] = 0
# word.data['attention_mask'][word.data['input_ids'] == 102] = 0
# word.data['input_ids'][word.data['input_ids'] == 102] = 0
return word, text
class GPTDataset(Dataset):
def __init__(self, reverse=False):
# words = list(set(i for i in wn.words()))
self.tokenizer = AutoTokenizer.from_pretrained('gpt2')
words = [w for w in list(set(i for i in wn.words())) if len(self.tokenizer([w]).data['input_ids'][0]) == 1]
self.words = []
for word in words:
meanings = wn.synsets(word)
# word = word.replace('_', ' ')
for meaning in meanings:
self.words.append((word, meaning.definition()))
if reverse:
break
self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
self.tokenizer.add_special_tokens({'eos_token': '[SEP]'})
self.tokenizer.add_special_tokens({'bos_token': '[CLS]'})
self.vocab_size = self.tokenizer.vocab_size + 2
self.cls = self.tokenizer.vocab['[CLS]']
self.sep = self.tokenizer.vocab['[SEP]']
def __len__(self):
return len(self.words)
def __getitem__(self, index):
word, text = self.words[index]
return word, text
def decode(self, text):
return self.tokenizer.decode(text)
def collate_fn(self, batch):
text = [entry[1] for entry in batch]
word = [entry[0] for entry in batch]
text = self.tokenizer(text, return_tensors='pt', padding=True)
word = self.tokenizer(['[CLS]', *word, '[SEP]'], return_tensors='pt', padding=True)
# text.data['attention_mask'][text.data['input_ids'] == 102] = 0
# text.data['input_ids'][text.data['input_ids'] == 102] = 0
# word.data['attention_mask'][word.data['input_ids'] == 102] = 0
# word.data['input_ids'][word.data['input_ids'] == 102] = 0
return word, text
class RoyinDataset(Dataset):
def __init__(self):
self.patterns = [r'\([^)]*\)', r'\[[^)]*\]', r'&#[a-z\d]*;', r'<\/[a-z\d]{1,6}>', r'<[a-z\d]{1,6}>']
self.df = pd.read_csv('data/royin_dict_2542.tsv', sep='\t')
self.tokenizer = ThaiTokenizer(vocab_file='data/th_wiki_bpe/th.wiki.bpe.op25000.vocab',
spm_file='data/th_wiki_bpe/th.wiki.bpe.op25000.model')
self.target = {'[PAD]', '[CLS]', '[SEP]'}
for word in self.df.Word1:
w = word.split(',')[0]
self.target.add(w)
self.target = sorted(self.target)
self.targetid = {k: v for v, k in enumerate(self.target)}
self.vocab_size = len(self.targetid)
self.cls = 1
self.sep = 2
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.iloc[index]
word = row.Word1.split(',')[0]
text = row.Definition
for pattern in self.patterns:
text = re.sub(pattern, '', text)
if len(text) > 512:
text = text[:512]
return word, text
def collate_fn(self, batch):
text = [entry[1] for entry in batch]
word = [entry[0] for entry in batch]
# text = self.thai_tokenizer(text)
# word = self.thai_tokenizer(word)
text = pad_sequence([torch.tensor(self.tokenizer(t)) for t in text], True)
word = pad_sequence([torch.tensor([1, self.targetid[w], 2]) for w in word], True)
return word, text
class ThaiBertDataset(Dataset):
def __init__(self):
self.patterns = [r'\([^)]*\)', r'\[[^)]*\]', r'&#[a-z\d]*;', r'<\/[a-z\d]{1,6}>', r'<[a-z\d]{1,6}>']
self.tokenizer = ThaiTokenizer(vocab_file='data/th_wiki_bpe/th.wiki.bpe.op25000.vocab',
spm_file='data/th_wiki_bpe/th.wiki.bpe.op25000.model')
self.df = pd.read_csv('data/dictdb_th_en.csv', sep=';')
self.target = pd.unique(self.df.sentry)
self.targetid = {k: v + 2 for v, k in enumerate(self.target)}
self.targetid['[PAD]'] = 0
self.targetid['[CLS]'] = 1
self.targetid['[SEP]'] = 2
self.vocab_size = len(self.targetid)
self.cls = 1
self.sep = 2
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.iloc[index]
word = row.sentry
text = row.sdef
return word, text
def collate_fn(self, batch):
text = [entry[1] for entry in batch]
word = [entry[0] for entry in batch]
# text = self.thai_tokenizer(text)
# word = self.thai_tokenizer(word)
text = pad_sequence([torch.tensor(self.tokenizer(t)) for t in text], True)
word = pad_sequence([torch.tensor([1, self.targetid[w], 2]) for w in word], True)
return word, text
class WordDataset(Dataset):
def __init__(self):
words = list(set(i for i in wn.words()))
counter = Counter()
self.max_len = 0
for word in words:
counter.update([word])
word = wn.synsets(word)
for meaning in word:
definition = re.sub(r'\([^)]*\)', '', meaning.definition())
if len(definition) == 0:
continue
if definition[0] == ' ':
definition = definition[1:]
self.max_len = max(self.max_len, len(definition.split(' ')))
counter.update(definition.split(' '))
self.vocab = Vocab(counter, specials=('<unk>', '<pad>', '<sos>', '<eos>'))
self.vocab_len = len(self.vocab)
self.meanings = []
out_counter = Counter()
for word in words:
if counter[word] > 3:
out_counter.update([word])
self.meanings.extend([(word, i.definition()) for i in wn.synsets(word)])
self.out_vocab = Vocab(out_counter, specials=('<unk>', '<pad>', '<sos>', '<eos>'))
self.out_vocab_len = len(self.out_vocab)
def __len__(self):
return len(self.meanings)
def collate_fn(self, batch):
return generate_batch(batch)
def __getitem__(self, index):
word, tokens = self.meanings[index]
# data = wn.synsets(word)
token_ids = [self.vocab['<sos>']] + list(filter(lambda x: x is not Vocab.UNK, [self.vocab[token] for token in tokens.split(' ')])) + [self.vocab['<eos>']]
tokens = torch.tensor(token_ids)
word = torch.tensor([self.out_vocab['<sos>'], self.out_vocab[word], self.out_vocab['<eos>']])
return word, tokens
class WordTriplet(WordDataset):
def __getitem__(self, index):
word = self.words[index]
data = wn.synsets(word)
pos_tokens = np.random.choice(data).definition()
pos_token_ids = list(
filter(lambda x: x is not Vocab.UNK, [self.vocab[token] for token in pos_tokens.split(' ')]))
while True:
idx = torch.randint(0, len(self), (1,))
if idx != index:
break
diff_data = wn.synsets(self.words[idx])
neg_tokens = diff_data[0].definition()
neg_token_ids = list(filter(lambda x: x is not Vocab.UNK, [self.vocab[token] for token in neg_tokens.split(' ')]))
neg_tokens = torch.tensor(neg_token_ids)
pos_tokens = torch.tensor(pos_token_ids)
word = torch.tensor([self.vocab[word]])
neg_out = torch.zeros((self.max_len,), dtype=torch.long)
neg_out[:neg_tokens.size(0)] = neg_tokens
pos_out = torch.zeros((self.max_len,), dtype=torch.long)
pos_out[:pos_tokens.size(0)] = pos_tokens
word_out = torch.zeros((self.max_len,), dtype=torch.long)
word_out[0] = self.vocab[word]
return word, pos_tokens, neg_tokens
def collate_fn(self, batch):
return generate_triplet_batch(batch)
if __name__ == '__main__':
dataset = WordTriplet()
x = dataset[0]
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(0.2 * dataset_size))
np.random.seed(88)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=32,
sampler=train_sampler,
num_workers=1,
collate_fn=generate_batch)
validation_loader = torch.utils.data.DataLoader(dataset, batch_size=32,
sampler=valid_sampler,
num_workers=1,
collate_fn=generate_batch)
for idx, _ in enumerate(train_loader):
pass
print(idx)
for idx, _ in enumerate(validation_loader):
pass
print(idx)
| [
"palm22180@gmail.com"
] | palm22180@gmail.com |
d4e78f9b2e39fc5020173a64a9f37e7020c532ed | 77f6ff840e2e62965d83b509118ea0e053753108 | /src/quicksort.py | 12938a1ab83d3569e69cfd8b9f7d33fd8498080e | [
"BSD-3-Clause"
] | permissive | phiysng/python-algs | 116af6d4ca9154129112e80933e61aa2c3545518 | 1448a252e48d0c005db5301fc7126c7e695ac034 | refs/heads/master | 2020-05-21T17:03:59.934343 | 2019-05-11T08:50:57 | 2019-05-11T08:50:57 | 186,115,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | # -*- coding: utf-8 -*-
'''
快排算法的简单实现
TODO:使用了O(n)的空间 两次遍历了数组
'''
def quicksort(arr: list) -> list:
if len(arr) == 1 or len(arr) == 0:
return arr
pivot = arr[0]
left = [x for x in arr[1:] if x < pivot]
right = [x for x in arr[1:] if x >= pivot]
# 递归排序
lr = quicksort(left)
lr.append(pivot)
rr = quicksort(right)
return lr + rr
| [
"wuyuanshou@protonmail.com"
] | wuyuanshou@protonmail.com |
3f38298a0205c6057eae97a9b96395d892ab83bc | 8c24de6b63f4a7d5280e9a993e2666239563d93e | /base/base_page.py | 024cde348fd65f2ad1d1592ea46f31f8e69a155b | [] | no_license | duanchaoxin/AppAutoTest | 0bc3ec8cc4b923a3f164220dfe47344273c42dc2 | e7099cecd1200931ecc992ee20758057b999f213 | refs/heads/master | 2022-05-24T00:39:55.256637 | 2020-04-14T08:36:40 | 2020-04-14T08:36:40 | 255,523,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from utils import DriverUtil
class BasePage(object):
def __init__(self):
self.driver = DriverUtil.get_driver()
def base_find(self, location, timeout=5, poll=0.5):
return WebDriverWait(self.driver, timeout=timeout, poll_frequency=poll). \
until(lambda x: x.find_element(location[0], location[1]))
def base_click(self, ele):
ele.click()
def base_input_text(self, element, text):
# 给某个元素输入指定内容
element.clear()
element.send_keys(text)
def get_toast(self, text):
xpath = By.XPATH, '//*contains[@text,"{}"]'.format(text)
return self.base_find(xpath).text
| [
"1539436284@qq.com"
] | 1539436284@qq.com |
1a0673132ce3a3857c1937ecfc17012d78b7f11d | 6e840a384968c7711479637c1d9451ce75767cc5 | /music.py | f7e07d0602c8a5599a9ecdda48eb82d86fce1e88 | [] | no_license | mariafbarrera/music-recommender-final-project | 6ac751d96dcbee5d1e793eb6b6ba3d154d17a0c7 | efc5241d24c57686e6aff65cc506db31ad1fa91e | refs/heads/master | 2022-11-23T12:54:01.892203 | 2020-07-28T19:47:40 | 2020-07-28T19:47:40 | 283,001,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,954 | py | # MVP: Music Recommender
print(" _ _ _ _ _ ___ ___ _ ______ _ ")
print("| | | | | | | | | | | | | \/ | (_) | ___ \ | | ")
print("| | | | ___| | ___ ___ _ __ ___ ___ | |_ ___ | |_| |__ ___ | . . |_ _ ___ _ ___ | |_/ /___ ___ ___ ___ _ __ ___ ___ _ __ __| | ___ _ __ ")
print("| |/\| |/ _ \ |/ __/ _ \| '_ ` _ \ / _ \ | __/ _ \ | __| '_ \ / _ \ | |\/| | | | / __| |/ __| | // _ \/ __/ __/ _ \| '_ ` _ \ / _ \ '_ \ / _` |/ _ \ '__|")
print("\ /\ / __/ | (_| (_) | | | | | | __/ | || (_) | | |_| | | | __/ | | | | |_| \__ \ | (__ | |\ \ __/ (_| (_| (_) | | | | | | __/ | | | (_| | __/ | ")
print(" \/ \/ \___|_|\___\___/|_| |_| |_|\___| \__\___/ \__|_| |_|\___| \_| |_/\__,_|___/_|\___| \_| \_\___|\___\___\___/|_| |_| |_|\___|_| |_|\__,_|\___|_| ")
print("")
def music():
print("")
print("")
print("What type of music genre do you like?")
print("")
print("")
print("Choose a number, based on what you like!")
user_choice = input('''
1. Hip Hop
2. Rock
3. Pop
4. Jazz
5. Classic
6. K-pop
7. Country
8. Electronic
''')
if user_choice == "1":
print("I would suggest to listen to:")
print("")
print("Started From The Bottom - Drake\n")
elif user_choice == "2":
print("Why don't you listen to this?")
print("")
print("Don't Stop Believin - Journey\n ")
elif user_choice == "3":
print("I know you will like this song:")
print("")
print("Feeling Good - Michael Bublé\n")
elif user_choice == "4":
print("This a nice song!:")
print("")
print("The Girl From Ipanema - Stan Getz & Joao Gilberto\n ")
elif user_choice == "5":
print("Have you listened to this song?")
print("")
print("Gramophone Waltz - Eugen Doga\n")
elif user_choice == "6":
print("You may want to dance to this one!")
print("")
print("How You Like That - BLACKPINK\n")
elif user_choice == "7":
print("You may like this one:")
print("")
print("Wagon Wheel - Darius Rucker\n")
elif user_choice == "8":
print("What about this one?")
print("")
print("Animals - Martin Garrix\n")
else:
print("I don't think that is an option...")
music()
again()
def again():
cal_again = input("Would you like to see more recommendations? (Y/N)")
if cal_again.upper() == "Y":
music()
elif cal_again.upper() == "N":
print("See ya! I hope you enjoy the song")
else:
print("Please answer with a Y/N!")
again()
music()
| [
"mfbarrerab2@gmail.com"
] | mfbarrerab2@gmail.com |
86089daeedc71651ae0564812bf24553d130050a | f399fbac7e35dcc2c2f2ad4d3202b0839d9b7d48 | /user/send_mail.py | 0cb781b2301d5d6442e6f1cfdfd49aada05a621f | [] | no_license | AktanKasymaliev/django-toilets-service | 480f56b652a88e1422290de8906f0bb6d5693cff | 225d71b164c36bab5fded86390b17ce265694a17 | refs/heads/main | 2023-07-14T12:46:12.399114 | 2021-08-23T17:14:04 | 2021-08-23T17:14:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import send_mail
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from decouple import config
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.utils.encoding import force_bytes, force_text
from .token import account_activation_token
def send_confirmation_email(request, user):
context = {
"small_text_detail": "Thank you for "
"creating an account. "
"Please verify your email "
"address to set up your account.",
"email": user.email,
"domain":get_current_site(request).domain,
"uid":urlsafe_base64_encode(force_bytes(user.pk)),
"token":account_activation_token.make_token(user)
}
current_site = get_current_site(request)
mail_subject = 'Active your account'
to_email = user.email
message = render_to_string('account/email.html', context)
email = EmailMultiAlternatives(
mail_subject,
message,
from_email=config('EMAIL_HOST_USER'),
to = [user.email],
)
email.content_subtype = 'html'
email.send(fail_silently=True)
print("ВСЕ ПРОШЛО УСПЕШНО EMAIL SENT")
| [
"aktan.kasymaliev@icloud.com"
] | aktan.kasymaliev@icloud.com |
b08074bd44154944cfb6114db650296021f40ab8 | ec50e0f401cbcb0eddadf38d576f434688df1be6 | /loss/models.py | 98d8b41d6ab18803134814bb785f67561c1cbb6b | [] | no_license | ayush180/djangoProject2 | ecbd1d74843be87b56e6bac457797c51f9e2ca9a | 6cdc58592b6e8ee517a2f095557e16dfbce7e41f | refs/heads/master | 2023-06-20T11:10:31.028099 | 2021-07-17T11:20:42 | 2021-07-17T11:20:42 | 381,335,957 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | from django.db import models
from product.models import product
from datetime import datetime
from django.urls import reverse
# Create your models here.
class loss(models.Model):
date = models.DateField(default=datetime.utcnow)
product = models.ForeignKey(product, on_delete=models.CASCADE, related_name="loss")
price = models.IntegerField()
rate = models.IntegerField()
quantity = models.IntegerField()
remark = models.TextField(null=True, blank=True)
def __str__(self):
return f"{self.id}-{self.product}"
def get_absolute_url(self):
return reverse("loss-view") | [
"ayush180670107041@gmail.com"
] | ayush180670107041@gmail.com |
2ed87c256e5bf9f70115c96c9aec2798f8b5a5af | 14913a0fb7e1d17318a55a12f5a181dddad3c328 | /63.snake.py | 990234c17a8d9d056195b13ae470723aa887b84e | [] | no_license | Jesuisjavert/Algorithm | 6571836ec23ac3036565738c2bee94f416595f22 | 730549d19e66e20b3474a235a600958a8e036a0e | refs/heads/master | 2023-02-16T06:34:50.984529 | 2020-09-25T09:40:30 | 2020-09-25T09:40:30 | 330,849,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | import sys
sys.stdin = open('input.txt','r')
T = int(input())
for testcase in range(1,T+1):
arr = [list(map(int, input().split())) for _ in range(4)]
print(arr) | [
"jesuisjavert@gmail.com"
] | jesuisjavert@gmail.com |
d941801758acdae714ecb26fe79b3331a4daef77 | db21d9daf62e3ea31f30da152f86ecfbb0d4814a | /PDDI-Datasets/HIV-Insite-interactions/hiv_insite_extraction.py | 2b661e7e80a4d33688017d50391a8f4de6f70d8b | [] | no_license | axel-op/public-PDDI-analysis | 487b1ebd650a21aa2df03893b896d80f37b0516b | 8199ee66b60bcb337f777889a210dd0d72a96e8f | refs/heads/master | 2021-07-06T06:50:04.438330 | 2017-10-02T19:04:08 | 2017-10-02T19:04:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | # hiv_insite_extraction.py
# Read .html files in folder:
# HIV-Insite-interactions
# Retrieve 2 drug names, quality of evidence, summary, and description of ddi
# Consolidate all such DDI's from each folder as .tsv file
import glob
import os
import codecs
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from bs4 import BeautifulSoup
################### GLOBAL VARIABLES ###################
DEBUG = True
# INPUT FILE FOLDERS
HEP_DDI_PATH = "../HEP-drug-interactions"
HIV_DDI_PATH = "../HIV-drug-interactions"
HIV_INSITE_DDI_PATH = "../HIV-Insite-interactions"
# OUTPUT FILES
HEP_OUTFILE_NAME = "HEP-drug-interactions.tsv"
HIV_OUTFILE_NAME = "HIV-drug-interactions.tsv"
HIV_INSITE_OUTFILE_NAME = "HIV-Insite-interactions.tsv"
########################################################
outfile = codecs.open(HIV_INSITE_OUTFILE_NAME, encoding='utf-8', mode='w+')
outfile.write(u"Drug 1 Name\tDrug 2 Name\tSummary\tDescription\n")
os.chdir(HIV_INSITE_DDI_PATH)
for file in glob.glob("*.html"):
if DEBUG:
print(file)
f = codecs.open(file, encoding='utf-8', mode='r+')
soup = BeautifulSoup(f, "html.parser")
f.close()
outfile.close()
| [
"echou53@gmail.com"
] | echou53@gmail.com |
e7b26c237632de6eb32b9fcf15a30857b52e9551 | 940713a19ae36c48e8b6f83b8dad3f3aa2ca9562 | /remainder.py | 52999dc56308b74aa752074f2131b03f222b16d1 | [] | no_license | girishpillai17/class-work | cbc2a43ea4b077fac8b1a121de741eba09d165f5 | 7e14841d1b371f6791526fd35d03f0d779b8554d | refs/heads/master | 2021-09-08T03:18:18.666530 | 2018-03-06T17:17:16 | 2018-03-06T17:17:16 | 121,372,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | t = int(input())
for i in range(t):
a, b = map(int, input().split())
d = a % b
print(d) | [
"noreply@github.com"
] | noreply@github.com |
be7975289ea7397570ae5a442d590aae139acd82 | 214dde26c268d1d0b7991318c5e2d43aa27af89b | /backlooking/order_analysis.py | c7b7acc13a43f9796ee1e1050048258fb6cc19ad | [] | no_license | hellobiek/smart_deal_tool | f1846903ac402257bbe92bd23f9552970937d50e | ba8aad0a37843362f5833526921c6f700fb881f1 | refs/heads/master | 2022-09-04T04:41:34.598164 | 2022-08-04T22:04:09 | 2022-08-04T22:04:09 | 88,258,362 | 36 | 14 | null | null | null | null | UTF-8 | Python | false | false | 3,559 | py | #-*- coding: utf-8 -*-
import os
import sys
from os.path import abspath, dirname
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import datetime
import const as ct
import pandas as pd
from futu import TrdEnv
from datetime import datetime
from base.cdate import get_dates_array
from tools.markdown_table import MarkdownTable
from tools.markdown_writer import MarkdownWriter
from algotrade.broker.futu.fututrader import FutuTrader
pd.options.mode.chained_assignment = None
def get_total_profit(orders):
buy_orders = orders.loc[orders.trd_side == 'BUY']
buy_orders = buy_orders.reset_index(drop = True)
sell_orders = orders.loc[orders.trd_side == 'SELL']
sell_orders = sell_orders.reset_index(drop = True)
total_sell_value = (sell_orders['dealt_qty'] * sell_orders['dealt_avg_price']).sum()
total_buy_value = (buy_orders['dealt_qty'] * buy_orders['dealt_avg_price']).sum()
return total_sell_value - total_buy_value
def generate(orders, date_arrary, dirname, start, end):
filename = 'form_%s_to_%s_tading_review.md' % (start, end)
os.makedirs(dirname, exist_ok = True)
fullfilepath = os.path.join(dirname, filename)
orders = orders[['code', 'trd_side', 'dealt_qty', 'dealt_avg_price', 'create_time', 'updated_time']]
total_profit = get_total_profit(orders)
md = MarkdownWriter()
md.addTitle("%s_%s_交割单" % (start, end), passwd = '909897')
md.addHeader("交割单分析", 1)
md.addHeader("总收益分析", 2)
t_index = MarkdownTable(headers = ["总收益"])
t_index.addRow(["%s" % total_profit])
md.addTable(t_index)
md.addHeader("交割单复盘", 2)
for cdate in date_arrary:
md.addHeader("%s_交割单" % cdate, 3)
order_info = orders.loc[orders['create_time'].str.startswith(cdate)]
order_info.at[:, 'create_time'] = order_info.loc[:, 'create_time'].str.split().str[1].str[0:8]
order_info = order_info.reset_index(drop = True)
t_index = MarkdownTable(headers = ["名称", "方向", "数量", "价格", "创建时间", "完成时间", "对错", "分析"])
for index in range(len(order_info)):
data_list = order_info.loc[index].tolist()
content_list = [data_list[0], data_list[1], int(data_list[2]), round(data_list[3], 2), data_list[4], data_list[5].split(' ')[1].strip()[0:8], '', '']
content_list = [str(i) for i in content_list]
t_index.addRow(content_list)
md.addTable(t_index)
md.addHeader("本周总结", 2)
md.addHeader("优点", 3)
md.addHeader("缺点", 3)
md.addHeader("心得", 3)
with open(fullfilepath, "w+") as f:
f.write(md.getStream())
def main():
#dirname = '/Volumes/data/quant/stock/data/docs/blog/hellobiek.github.io/source/_posts'
dirname = '/Users/hellobiek/Documents/workspace/blog/blog/source/_posts'
unlock_path = "/Users/hellobiek/Documents/workspace/python/quant/smart_deal_tool/configure/follow_trend.json"
key_path = "/Users/hellobiek/Documents/workspace/python/quant/smart_deal_tool/configure/key.pri"
futuTrader = FutuTrader(host = ct.FUTU_HOST_LOCAL, port = ct.FUTU_PORT, trd_env = TrdEnv.REAL, market = ct.US_MARKET_SYMBOL, unlock_path = unlock_path, key_path = key_path)
start = '2020-08-11'
end = '2020-08-12'
orders = futuTrader.get_history_orders(start = start, end = end)
date_arrary = get_dates_array(start, end, dformat = "%Y-%m-%d", asending = True)
generate(orders, date_arrary, dirname, start, end)
if __name__ == "__main__":
main()
| [
"hellobiek@gmail.com"
] | hellobiek@gmail.com |
ba7f120c0d5551658bacbd572127dbb325214ffa | 11b420a9e6dbe371167227f41ef8e344e3382612 | /ConvNets/Comparison_Plots/Pooled_Images/Pooled_Images.py | 15a23b6ae92fc9bdfccb8654ccf3350027e0953e | [
"MIT"
] | permissive | tarek-ullah/Active-Learning-Bayesian-Convolutional-Neural-Networks | 7092386758b68dc922efaa2c2eba055930bf2896 | f8b68038bd3b97c473e9c1de6b6cdee4538021f4 | refs/heads/master | 2021-01-13T06:57:19.343775 | 2016-11-02T12:22:16 | 2016-11-02T12:22:16 | 81,338,773 | 1 | 0 | null | 2017-02-08T14:34:15 | 2017-02-08T14:34:15 | null | UTF-8 | Python | false | false | 3,650 | py | from __future__ import print_function
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad, Adam
from keras.utils import np_utils, generic_utils
from six.moves import range
import numpy as np
import scipy as sp
from keras import backend as K
import random
import scipy.io
import matplotlib.pyplot as plt
from keras.regularizers import l2, activity_l2
from scipy.stats import mode
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between tran and test sets
(X_train_All, y_train_All), (X_test, y_test) = mnist.load_data()
X_train_All = X_train_All.reshape(X_train_All.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_Pool = X_train_All[5000:15000, :, :, :]
y_Pool = y_train_All[5000:15000]
Total_Pooled_Images = 400
Bald_Pool = np.load('Bald_Pool.npy')
print('Pooling Dropout Bald Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Bald_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Bald_Pool_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
Dropout_Max_Entropy_Pool = np.load('Dropout_Max_Entropy_Pool.npy')
print('Pooling Dropout Max Entropy Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Dropout_Max_Entropy_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Dropout_Max_Entropy_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
# Segnet_Pool = np.load('Segnet_Pool.npy')
# print('Pooling Bayes Segnet Images')
# #saving pooled images
# for im in range(Total_Pooled_Images):
# Image = X_Pool[Segnet_Pool[im], :, :, :]
# img = Image.reshape((28,28))
# sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Segnet_Pool_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
Variation_Ratio_Pool = np.load('Variation_Ratio_Pool.npy')
print('Pooling Variation Ratio Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Variation_Ratio_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Variation_Ratio_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
Max_Entropy_Pool = np.load('Max_Entropy_Pool.npy')
print('Pooling Max Entropy Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Max_Entropy_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Max_Entropy_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
Random_Pool = np.load('Random_Pool.npy')
print('Pooling Random Acquisition Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Random_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Random_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
| [
"riashat.islam.93@gmail.com"
] | riashat.islam.93@gmail.com |
ee4fe39711fc77d9a0fb62f3b590008b142f4b3b | 3e2d2175b3d5407a71f8e423105c7b3a9d55666a | /online_gobang/urls.py | 59a866cdb8ce5cd0c614a87f9ba88d021ac0f743 | [] | no_license | slgu/online_gobang | cb040918fc6048fd413d8d573fe25f95dec3f062 | 51418e18a85d9a47ddd2cb931c25e7df12357a2b | refs/heads/master | 2020-05-03T05:32:59.331652 | 2015-08-05T07:27:58 | 2015-08-05T07:27:58 | 39,301,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'online_gobang.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url("", include('django_socketio.urls')),
url(r'^gobang/static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/Users/gsl/PycharmProjects/online_gobang/static'}),
url(r'^gobang/', include('gobang.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/Users/gsl/PycharmProjects/online_gobang/static'}),
) | [
"blackhero98@gmail.com"
] | blackhero98@gmail.com |
72655e0d239fb7752d956948112e58f2ba5f52b8 | 3637fe729395dac153f7abc3024dcc69e17f4e81 | /reference/ucmdb/discovery/os_platform_discoverer.py | 02d93f540190842835fd968afa055cc09e7172c3 | [] | no_license | madmonkyang/cda-record | daced6846c2456f20dddce7f9720602d1583a02a | c431e809e8d0f82e1bca7e3429dd0245560b5680 | refs/heads/master | 2023-06-15T08:16:46.230569 | 2021-07-15T16:27:36 | 2021-07-15T16:27:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,272 | py | # coding=utf-8
'''
Created on Dec 27, 2013
@author: ekondrashev
'''
import logger
import entity
import command
import flow
import post_import_hooks
import service_loader
from service_loader import load_service_providers_by_file_pattern
class Platform(entity.Immutable):
def __init__(self, name):
self.name = name
def __eq__(self, other):
if isinstance(other, Platform):
return self.name.lower() == other.name.lower()
elif isinstance(other, basestring):
return self.name.lower() == other.lower()
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __key__(self):
return (self.name, )
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls, repr(self.name),)
class __PlatformsEnum(entity.Immutable):
def __init__(self, **platforms):
self.__platforms = platforms
def __getattr__(self, name):
value = self.__platforms.get(name)
if value:
return value
raise AttributeError
def values(self):
return self.__platforms.values()
def by_name(self, name):
for platform in self.values():
if platform == name:
return platform
def merge(self, **platforms):
self.__platforms.update(platforms)
enum = __PlatformsEnum()
class Discoverer(object):
def is_applicable(self, shell):
r'''
Returns if current discoverer implementation can be applied againt the
shell passed.
@types: shellutils.Shell-> bool
'''
raise NotImplementedError('is_applicable')
def get_platform(self, shell):
r'shellutils.Shell -> os_platform_discoverer.Platform'
raise NotImplementedError('get_platform')
def find_discoverer_by_shell(shell):
r'''
@types: shellutils.Shell -> os_platform_discoverer.Discoverer
@raise ValueError: if shell is not passed
@raise flow.DiscoveryException: if no os platform discoverer found
'''
if not shell:
raise ValueError('Invalid shell')
discoverers = service_loader.global_lookup[Discoverer]
for discoverer in discoverers:
if discoverer.is_applicable(shell):
return discoverer
raise flow.DiscoveryException('No os platform discoverer '
'implementation found')
def discover_platform_by_shell(shell):
r'''
@types: shellutils.Shell -> os_platform_discoverer.Platform
@raise ValueError: if shell is not passed
@raise flow.DiscoveryException: if no os platform discoverer found
or on platform discovery error
'''
discoverer = find_discoverer_by_shell(shell)
try:
return discoverer.get_platform(shell)
except command.ExecuteException, e:
raise flow.DiscoveryException(e)
@post_import_hooks.invoke_when_loaded(__name__)
def __load_plugins(module):
logger.debug('Loading os platforms')
load_service_providers_by_file_pattern('*_os_platform_discoverer.py')
logger.debug('Finished loading platforms: %s' % enum.values())
| [
"silentbalanceyh@126.com"
] | silentbalanceyh@126.com |
f0b2769ee9e69bd66465e108cb54e7908f6174e4 | 826c50246d5a09f794509155465f77c43e179967 | /04_evaluation/06_Hourly_to_Daily_SW_Conversion.py | d3d88d941f13e86bf5f887a40c42693f59bf8125 | [] | no_license | JohnnyRyan1/clouds | 38bb767360aef68f45a6123944901135fa48ae89 | afdadaeb6a61c675275f4a5c932f5e61e51879e8 | refs/heads/main | 2023-04-11T03:05:31.188286 | 2022-07-12T22:52:50 | 2022-07-12T22:52:50 | 481,300,745 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,851 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
DESCRIPTION
Compute hourly to daily correction for SW from ERA5.
"""
# Import modules
import numpy as np
import pandas as pd
import netCDF4
from datetime import timedelta, datetime
import pyresample
# Define destination
dest = '/home/johnny/Documents/Clouds/Data/ERA5/'
# Import ERA5 data
era = netCDF4.Dataset('/home/johnny/Documents/Clouds/Data/ERA5/era_ssrd_2006_2016.nc')
era_lon = era.variables['longitude'][:]
era_lat = era.variables['latitude'][:]
era_xx, era_yy = np.meshgrid(era_lon, era_lat)
# Define ice sheet grid
ismip = netCDF4.Dataset('/home/johnny/Documents/Clouds/Data/Masks/1km-ISMIP6.nc')
ismip_lon = ismip.variables['lon'][:]
ismip_lat = ismip.variables['lat'][:]
# Get time
base = datetime(1900,1,1)
era_time = pd.DataFrame(era.variables['time'][:], columns=['hours'])
era_time['datetime'] = era_time['hours'].apply(lambda x: base + timedelta(hours=x))
era_time['hour'] = era_time['datetime'].dt.hour
# Get daily average
clrsky_daily_mean = np.nanmean(era.variables['ssrdc'][:], axis=0) / 3600
allsky_daily_mean = np.nanmean(era.variables['ssrd'][:], axis=0) / 3600
# Get hourly average
unique_hours = np.unique(era_time['hour'].values)
allsky_hourly = np.zeros(era_xx.shape)
clrsky_hourly = np.zeros(era_xx.shape)
for i in unique_hours:
# Get index of hour
idx = era_time.loc[era_time['hour'] == i].index
# Sample ERA5 data
ssrdc = np.nanmean(era.variables['ssrdc'][idx, :, :], axis=0) / 3600
ssrd = np.nanmean(era.variables['ssrd'][idx, :, :], axis=0) / 3600
# Stack
allsky_hourly = np.dstack((allsky_hourly, ssrd))
clrsky_hourly = np.dstack((clrsky_hourly, ssrdc))
# Remove first layer
allsky_hourly = allsky_hourly[:, :, 1:]
clrsky_hourly = clrsky_hourly[:, :, 1:]
# Regrid to ISMIP grid
swath_def = pyresample.geometry.SwathDefinition(lons=era_xx, lats=era_yy)
swath_con = pyresample.geometry.GridDefinition(lons=ismip_lon, lats=ismip_lat)
# Determine nearest (w.r.t. great circle distance) neighbour in the grid.
allsky_hourly_data = pyresample.kd_tree.resample_nearest(source_geo_def=swath_def,
target_geo_def=swath_con,
data=allsky_hourly,
radius_of_influence=50000)
allsky_daily_data = pyresample.kd_tree.resample_nearest(source_geo_def=swath_def,
target_geo_def=swath_con,
data=allsky_daily_mean,
radius_of_influence=50000)
clrsky_hourly_data = pyresample.kd_tree.resample_nearest(source_geo_def=swath_def,
target_geo_def=swath_con,
data=clrsky_hourly,
radius_of_influence=50000)
clrsky_daily_data = pyresample.kd_tree.resample_nearest(source_geo_def=swath_def,
target_geo_def=swath_con,
data=clrsky_daily_mean,
radius_of_influence=50000)
# Set some values to NaN
allsky_daily_data[allsky_daily_data == 0] = np.nan
allsky_hourly_data[allsky_hourly_data == 0] = np.nan
clrsky_daily_data[clrsky_daily_data == 0] = np.nan
clrsky_hourly_data[clrsky_hourly_data == 0] = np.nan
# Make final calculation
allsky_hourly_multiply = allsky_daily_data[..., np.newaxis] / allsky_hourly_data
clrsky_hourly_multiply = clrsky_daily_data[..., np.newaxis] / clrsky_hourly_data
allsky_hourly_add = allsky_daily_data[..., np.newaxis] - allsky_hourly_data
clrsky_hourly_add = clrsky_daily_data[..., np.newaxis] - clrsky_hourly_data
###############################################################################
# Save 1 km dataset to NetCDF
###############################################################################
dataset = netCDF4.Dataset(dest + 'ERA5_SW_Conversions.nc',
'w', format='NETCDF4_CLASSIC')
print('Creating... %s' % dest + 'ERA5_SW_Conversions.nc')
dataset.Title = "Conversions from hourly to daily from ERA5"
import time
dataset.History = "Created " + time.ctime(time.time())
dataset.Projection = "WGS 84"
dataset.Reference = "Ryan, J. C., Smith, L. C., et al. (unpublished)"
dataset.Contact = "jonathan_ryan@brown.edu"
# Create new dimensions
lat_dim = dataset.createDimension('y', ismip_lat.shape[0])
lon_dim = dataset.createDimension('x', ismip_lat.shape[1])
data_dim = dataset.createDimension('z', 24)
# Define variable types
Y = dataset.createVariable('latitude', np.float32, ('y','x'))
X = dataset.createVariable('longitude', np.float32, ('y','x'))
y = dataset.createVariable('y', np.float32, ('y'))
x = dataset.createVariable('x', np.float32, ('x'))
# Define units
Y.units = "degrees"
X.units = "degrees"
# Create the actual 3D variable
correction_allsky_multiply_nc = dataset.createVariable('correction_allsky_multiply', np.float32, ('y','x','z'))
correction_clrsky_multiply_nc = dataset.createVariable('correction_clrsky_multiply', np.float32, ('y','x','z'))
correction_allsky_add_nc = dataset.createVariable('correction_allsky_add', np.float32, ('y','x','z'))
correction_clrsky_add_nc = dataset.createVariable('correction_clrsky_add', np.float32, ('y','x','z'))
# Write data to layers
Y[:] = ismip_lat
X[:] = ismip_lon
correction_allsky_multiply_nc[:] = allsky_hourly_multiply
correction_clrsky_multiply_nc[:] = clrsky_hourly_multiply
correction_allsky_add_nc[:] = allsky_hourly_add
correction_clrsky_add_nc[:] = clrsky_hourly_add
print('Writing data to %s' % dest + 'ERA5_SW_Conversions.nc')
# Close dataset
dataset.close()
| [
"jryan4@uoregon.edu"
] | jryan4@uoregon.edu |
8201d7cd3b6d9595dbf87a0a429d27b71306562b | 3b6bae8f1cd3f703bd15c55b8d6a327b0b19d39b | /scopes.py | d910c23a17244efad9205633acd5b925310397f3 | [] | no_license | AndreyNaidenko/GmailToVK | 721e00215961fd69e00ba1ad248886d37bee7500 | 73f2e62d98ad228cda7475860308f5cd57eada49 | refs/heads/master | 2020-04-13T10:39:37.391729 | 2019-02-26T18:06:49 | 2019-02-26T18:06:49 | 163,148,109 | 1 | 3 | null | 2019-02-27T13:19:17 | 2018-12-26T07:04:12 | Python | UTF-8 | Python | false | false | 90 | py | SCOPES = [
'https://www.googleapis.com/auth/gmail.readonly',
'https://mail.google.com/'
]
| [
"37630193+MedvedevDenis@users.noreply.github.com"
] | 37630193+MedvedevDenis@users.noreply.github.com |
d898539884fba4fbbea36f5cffcaef951b97aa96 | 7303a3e219352011ed210eaa522283c9f436f127 | /main.py | dbaa72367593b4498701dc685e2a1363ff70c720 | [] | no_license | Harish1503/6.-Swapping-of-two-numbers-using-temporary-variable. | a3e4627bcc5090a8916b58d495aca0849cd11ce2 | 11dc909bf772d9602041577b30ae5bb54d087082 | refs/heads/master | 2023-01-16T00:03:18.748183 | 2020-12-01T08:33:24 | 2020-12-01T08:33:24 | 317,475,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | a = int(input("Enter first number: "))
b = int(input("Enter second number: "))
print("Before Swapping")
print("a is", a)
print("b is", b)
temp = a
a = b
b = temp
print("After Swapping")
print("a is", a)
print("b is", b) | [
"n.harishkumar1503@gmail.com"
] | n.harishkumar1503@gmail.com |
beed14a3c1aff89d035020396a37556f4cf88ed1 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /wicd/rev519-537/right-branch-537/wicd/backends/be-wireless/threadedwirelessinterface.py | ab1a5d1e45f9fa860b190118e1d14d918ce5832a | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | from misc import WicdError
from baseinterface import needsidle
from encryptablewirelessinterface import EncryptableWirelessInterface
from asyncrunner import AsyncManager, AsyncError
class ThreadedWirelessInterface(EncryptableWirelessInterface):
def __init__(self, interface_name):
EncryptableWirelessInterface.__init__(self, interface_name)
self.__async_manager = AsyncManager()
def scan(self, finished_callback):
''' Performs a scan. Scanning is done asynchronously. '''
def _do_scan(abort_if_needed, self):
return EncryptableWirelessInterface._do_scan(self)
def finish_up(result):
print 'scan finished', result
self.networks = result
finished_callback()
self.__async_manager.run(_do_scan, finish_up, self)
def connect(self, finished_callback):
''' Attempts to connect. Connecting is done asynchronously.'''
def _do_connect(abort_if_needed, interface, network):
print 'connecting...'
print interface
print network
import time
while True:
time.sleep(10)
print 'in connecting thread...'
abort_if_needed()
print 'done connecting'
def finish_up(result):
finished_callback()
self.__async_manager.run(_do_connect, finish_up, self,
self.current_network,
name='connect')
def cancel_connection_attempt(self):
''' Cancel the current attempt to connect to the network. '''
self.__async_manager.stop('connect')
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
eee47352250b1354c790e2f7624fae5c7205dbdd | d45b87ba22649cb9c0f003479112c50a7ce09ba0 | /Counting Sort 3.py | 65bd53aba0bb44a886e5ed534ec574b1d9fdc902 | [] | no_license | chishui/HackerRankAlgorithmsChallenge | 7458f6553f52846b9de5b68c0f692f72be13dfa8 | 611096a0c362675ce68598065ea3fe0abbbe5b99 | refs/heads/master | 2020-12-24T13:35:43.829308 | 2014-09-02T10:36:57 | 2014-09-02T10:36:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | #https://www.hackerrank.com/challenges/countingsort3
N = input()
li = [int(raw_input().strip().split()[0]) for i in range(0, N)]
li.sort()
last = -1
index = 0
out = []
for i in range(0, 100):
while index < len(li) and i >= li[index] :
index = index + 1
out.append(index)
print ' '.join(map(str, out))
| [
"chishui2@gmail.com"
] | chishui2@gmail.com |
08fbb51cd24de98123d6393d2ea88e54c2531a02 | 5035dc491ad5d8e786944a4eb1625590254f0494 | /GoogleMapi.py | 944eca4ac912778f4115388d82eccb7d3be92c5f | [] | no_license | Ltunstall92/Personal-Projects | 383083180d0abf8569effd5cd74a520ee527c80d | 261ed4c69452971094321a89637efc3b57da89b3 | refs/heads/master | 2020-04-03T13:43:41.696705 | 2018-11-05T23:31:30 | 2018-11-05T23:31:30 | 155,295,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,694 | py | import googlemaps
import json
import string
import time
gmaps = googlemaps.Client(key='KEY')#@@@@@@@@REPLACE WITH API KEY!!!!@@@@@
s_num = 1 #step number
origin = None
print("For input below please follow the format: City, State Code /n Example: Boston, MA")
if origin == None:
origin = input("Enter a place of origin:")
#if ("," not in origin): google client accepts other formats
#origin = input("Enter a place of origin following format above:")
destination = None
if destination == None:
destination = input("Please enter the city would you like directions to:")
#if ("," not in destination): google client accepts other formats
#origin = input("Where you would like to go? Please follow format above:")
trans_mode = ""
while trans_mode.lower() != "driving" and trans_mode.lower() != "bicycling" and trans_mode.lower() != "walking" and trans_mode.lower() != "transit":
print("Please enter ond of the following options below: Driving, Bicycling, Walking, or Transit")
trans_mode = input("Preferred mode of transprotation from above:").lower()
#goog_directions = "https://maps.googleapis.com/maps/api/directions/json?origin=Boston,+MA&destination=Santa+Carla,+CA&mode=driving&key=AIzaSyAg1ZGxNcWd_pefV6ZG3i_uJ30g-5TTY5c"
#directions from link used for reference to loate items in googlemaps library module
google_directions = gmaps.directions(origin, destination, mode=trans_mode.lower())
#for x in range(len(google_directions)): #Prints out what the url populates use .directions to set list used for comparison to link
#print(google_directions[x])
for leg in google_directions[0]['legs']: #stays within google definded legs
end = leg['end_address']
begin = leg['start_address']
total_distance = leg['distance']['text']
total_time = leg['duration']['text']
print("Getting directions from %s to %s" % (begin,end))
time.sleep(1.2)
print('You will travel approximately %s and your trip will take approximately %s' % (total_distance,total_time))
time.sleep(1.2)
for i in leg['steps']: #stays within legs within steps i = index
html_instructions = i['html_instructions']
html_instructions = html_instructions.replace("<b>","").replace("</div>","").replace("</b>","")
html_instructions = html_instructions.replace('<div style="font-size:0.9em">'," ").replace(" ","")
turn_distance= i['distance']['text']
time_till_turn=i['duration']['text']
print("step %s:" % s_num)
print("In " + turn_distance + ", " + html_instructions + ". This should take you " + time_till_turn)
s_num = s_num + 1
time.sleep(.2)
print("You have arrived at %s" % end) | [
"noreply@github.com"
] | noreply@github.com |
e534714face0022b5bcd0add8b639c65f3106c87 | 85832b772afde033c77770a8eb268404bc96545e | /lib/operate_web/exceptions.py | b414fea64c9174c202222488c585ae500f072c25 | [] | no_license | yesseecity/light-novel-parser | ce8fdec66b55e8ae7c484402c57f2bbda7926f9b | 0c58c24360ab85585ba4a6230d90d8d32e6b57c7 | refs/heads/master | 2022-11-10T06:55:37.906276 | 2022-10-17T16:41:35 | 2022-10-17T16:41:35 | 234,101,534 | 0 | 0 | null | 2022-06-11T14:36:15 | 2020-01-15T14:44:56 | Python | UTF-8 | Python | false | false | 491 | py | class ElementNotFoundException(Exception):
def __init__(self, dom_id=None, class_name=None, xpath=None, msg=None, mode=None, wait_time=None):
self.dom_id = dom_id
self.class_name = class_name
self.xpath = xpath
self.mode = mode
self.wait_time = wait_time
self.msg = msg
# TODO 找不到元件時的 error追蹤
def __str__(self):
exception_msg = "Error: 元件 %s 找不到\n" % self.msg
return exception_msg
| [
"yesseecity@yahoo.com.tw"
] | yesseecity@yahoo.com.tw |
5ef08391fa1fa480f963802b9e34f6077002e815 | d12b59b33df5c467abf081d48e043dac70cc5a9c | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/interface_ddc6ed85db77afb3c40ee4824b274478.py | 7eb7f7341abf23ea8aaaf6c991a7897c8183a845 | [
"MIT"
] | permissive | ajbalogh/ixnetwork_restpy | 59ce20b88c1f99f95a980ff01106bda8f4ad5a0f | 60a107e84fd8c1a32e24500259738e11740069fd | refs/heads/master | 2023-04-02T22:01:51.088515 | 2021-04-09T18:39:28 | 2021-04-09T18:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,852 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Interface(Base):
"""This object represents a network interface.
The Interface class encapsulates a list of interface resources that are managed by the user.
A list of resources can be retrieved from the server using the Interface.find() method.
The list can be managed by using the Interface.add() and Interface.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'interface'
_SDM_ATT_MAP = {
'CircuitAuthType': 'circuitAuthType',
'CircuitReceivedPasswordList': 'circuitReceivedPasswordList',
'CircuitTransmitPassword': 'circuitTransmitPassword',
'ConfiguredHoldTime': 'configuredHoldTime',
'Enable3WayHandshake': 'enable3WayHandshake',
'EnableAutoAdjustArea': 'enableAutoAdjustArea',
'EnableAutoAdjustMtu': 'enableAutoAdjustMtu',
'EnableAutoAdjustProtocolsSupported': 'enableAutoAdjustProtocolsSupported',
'EnableBfdRegistration': 'enableBfdRegistration',
'EnableConfiguredHoldTime': 'enableConfiguredHoldTime',
'EnableConnectedToDut': 'enableConnectedToDut',
'Enabled': 'enabled',
'ExtendedCircuitId': 'extendedCircuitId',
'InterfaceId': 'interfaceId',
'InterfaceIp': 'interfaceIp',
'InterfaceIpMask': 'interfaceIpMask',
'Ipv6MtMetric': 'ipv6MtMetric',
'Level': 'level',
'Level1DeadTime': 'level1DeadTime',
'Level1HelloTime': 'level1HelloTime',
'Level2DeadTime': 'level2DeadTime',
'Level2HelloTime': 'level2HelloTime',
'Metric': 'metric',
'NetworkType': 'networkType',
'PriorityLevel1': 'priorityLevel1',
'PriorityLevel2': 'priorityLevel2',
'TeAdminGroup': 'teAdminGroup',
'TeMaxBandwidth': 'teMaxBandwidth',
'TeMetricLevel': 'teMetricLevel',
'TeResMaxBandwidth': 'teResMaxBandwidth',
'TeUnreservedBwPriority': 'teUnreservedBwPriority',
}
def __init__(self, parent):
super(Interface, self).__init__(parent)
@property
def CircuitAuthType(self):
"""
Returns
-------
- str(none | password | md5): The type of Circuit Authentication to be used for this emulated ISIS router.
"""
return self._get_attribute(self._SDM_ATT_MAP['CircuitAuthType'])
@CircuitAuthType.setter
def CircuitAuthType(self, value):
self._set_attribute(self._SDM_ATT_MAP['CircuitAuthType'], value)
@property
def CircuitReceivedPasswordList(self):
"""
Returns
-------
- list(str): The Receive Password List is used only for Cleartext Password authentication. MD5 Authentication requires that both of the neighbors have the same MD5 key for the packets to be accepted.
"""
return self._get_attribute(self._SDM_ATT_MAP['CircuitReceivedPasswordList'])
@CircuitReceivedPasswordList.setter
def CircuitReceivedPasswordList(self, value):
self._set_attribute(self._SDM_ATT_MAP['CircuitReceivedPasswordList'], value)
@property
def CircuitTransmitPassword(self):
"""
Returns
-------
- str: If circuitAuthType is isisAuthTypePassword, then this is the password (or MD5Key) that will be sent with transmitted IIHs.
"""
return self._get_attribute(self._SDM_ATT_MAP['CircuitTransmitPassword'])
@CircuitTransmitPassword.setter
def CircuitTransmitPassword(self, value):
self._set_attribute(self._SDM_ATT_MAP['CircuitTransmitPassword'], value)
@property
def ConfiguredHoldTime(self):
"""
Returns
-------
- number: The configured hold time for the interface. This value is only used if enableConfiguredHoldTime is set to true.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConfiguredHoldTime'])
@ConfiguredHoldTime.setter
def ConfiguredHoldTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConfiguredHoldTime'], value)
@property
def Enable3WayHandshake(self):
"""
Returns
-------
- bool: If true, Ixia emulated point-to-point circuit will include 3-way TLV in its P2P IIH and attempt to establish the adjacency as specified in RFC 5303.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enable3WayHandshake'])
@Enable3WayHandshake.setter
def Enable3WayHandshake(self, value):
self._set_attribute(self._SDM_ATT_MAP['Enable3WayHandshake'], value)
@property
def EnableAutoAdjustArea(self):
"""
Returns
-------
- bool: If set, and a HELLO message is received which contains a protocols TLV, then the interfaces protocols will be adjusted to match the received TLV.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableAutoAdjustArea'])
@EnableAutoAdjustArea.setter
def EnableAutoAdjustArea(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableAutoAdjustArea'], value)
@property
def EnableAutoAdjustMtu(self):
"""
Returns
-------
- bool: If set, and a padded HELLO message is received on the interface, then the interfaces MTU will be adjusted to match the packet length of the received HELLO message.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableAutoAdjustMtu'])
@EnableAutoAdjustMtu.setter
def EnableAutoAdjustMtu(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableAutoAdjustMtu'], value)
@property
def EnableAutoAdjustProtocolsSupported(self):
"""
Returns
-------
- bool: If set, and a HELLO message is received which contains a protocols TLV, then the interfaces protocols will be adjusted to match the received TLV.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableAutoAdjustProtocolsSupported'])
@EnableAutoAdjustProtocolsSupported.setter
def EnableAutoAdjustProtocolsSupported(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableAutoAdjustProtocolsSupported'], value)
@property
def EnableBfdRegistration(self):
"""
Returns
-------
- bool: Indicates if a BFD session is to be created to the ISIS peer IP address once the ISIS session is established. This allows ISIS to use BFD to maintain IPv4 connectivity the ISIS peer.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableBfdRegistration'])
@EnableBfdRegistration.setter
def EnableBfdRegistration(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableBfdRegistration'], value)
@property
def EnableConfiguredHoldTime(self):
"""
Returns
-------
- bool: If true, enables a hold time for the created interfaces, based on the value set in the configuredHoldTime object.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableConfiguredHoldTime'])
@EnableConfiguredHoldTime.setter
def EnableConfiguredHoldTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableConfiguredHoldTime'], value)
@property
def EnableConnectedToDut(self):
"""
Returns
-------
- bool: If enabled, this ISIS interface is directly connected to the DUT.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableConnectedToDut'])
@EnableConnectedToDut.setter
def EnableConnectedToDut(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableConnectedToDut'], value)
@property
def Enabled(self):
"""
Returns
-------
- bool: Enables the use of this interface for the simulated router.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def ExtendedCircuitId(self):
"""
Returns
-------
- number: The integer value of the local circuit ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['ExtendedCircuitId'])
@ExtendedCircuitId.setter
def ExtendedCircuitId(self, value):
self._set_attribute(self._SDM_ATT_MAP['ExtendedCircuitId'], value)
@property
def InterfaceId(self):
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface): The OSI interface ID for this interface.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterfaceId'])
@InterfaceId.setter
def InterfaceId(self, value):
self._set_attribute(self._SDM_ATT_MAP['InterfaceId'], value)
@property
def InterfaceIp(self):
"""
Returns
-------
- str: The IP address for this interface.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterfaceIp'])
@InterfaceIp.setter
def InterfaceIp(self, value):
self._set_attribute(self._SDM_ATT_MAP['InterfaceIp'], value)
@property
def InterfaceIpMask(self):
"""
Returns
-------
- str: Available only when Interface Connected to DUT is disabled. The mask used with the IPv4 address for this virtual interface on the emulated ISIS router. This interface address is used to connect to virtual ISIS Network Ranges behind the Ixia-emulated ISIS router.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterfaceIpMask'])
@InterfaceIpMask.setter
def InterfaceIpMask(self, value):
self._set_attribute(self._SDM_ATT_MAP['InterfaceIpMask'], value)
@property
def Ipv6MtMetric(self):
"""
Returns
-------
- number: This metric is same as the Interface Metric. If true, it allows you to enter data.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6MtMetric'])
@Ipv6MtMetric.setter
def Ipv6MtMetric(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6MtMetric'], value)
@property
def Level(self):
"""
Returns
-------
- str(level1 | level2 | level1Level2): The IS-IS level associated with the interface.
"""
return self._get_attribute(self._SDM_ATT_MAP['Level'])
@Level.setter
def Level(self, value):
self._set_attribute(self._SDM_ATT_MAP['Level'], value)
@property
def Level1DeadTime(self):
"""
Returns
-------
- number: The dead (holding time) interval for level 1 hello messages, in seconds. If an ISIS router sending L1 hellos is not heard from within this time period, it will be considered down.
"""
return self._get_attribute(self._SDM_ATT_MAP['Level1DeadTime'])
@Level1DeadTime.setter
def Level1DeadTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['Level1DeadTime'], value)
@property
def Level1HelloTime(self):
"""
Returns
-------
- number: The hello interval for level 1 hello messages, in seconds.
"""
return self._get_attribute(self._SDM_ATT_MAP['Level1HelloTime'])
@Level1HelloTime.setter
def Level1HelloTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['Level1HelloTime'], value)
@property
def Level2DeadTime(self):
"""
Returns
-------
- number: The dead (holding time) interval for level 2 hello messages, in seconds. If an ISIS router sending L2 hellos is not heard from within this time period, it will be considered down.
"""
return self._get_attribute(self._SDM_ATT_MAP['Level2DeadTime'])
@Level2DeadTime.setter
def Level2DeadTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['Level2DeadTime'], value)
@property
def Level2HelloTime(self):
"""
Returns
-------
- number: The hello interval for level 2 hello messages, in seconds.
"""
return self._get_attribute(self._SDM_ATT_MAP['Level2HelloTime'])
@Level2HelloTime.setter
def Level2HelloTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['Level2HelloTime'], value)
@property
def Metric(self):
"""
Returns
-------
- number: The cost metric associated with the route.
"""
return self._get_attribute(self._SDM_ATT_MAP['Metric'])
@Metric.setter
def Metric(self, value):
self._set_attribute(self._SDM_ATT_MAP['Metric'], value)
@property
def NetworkType(self):
"""
Returns
-------
- str(pointToPoint | broadcast | pointToMultipoint): Indicates the type of network attached to the interface: broadcast or point-to-point.
"""
return self._get_attribute(self._SDM_ATT_MAP['NetworkType'])
@NetworkType.setter
def NetworkType(self, value):
self._set_attribute(self._SDM_ATT_MAP['NetworkType'], value)
@property
def PriorityLevel1(self):
"""
Returns
-------
- number: Indicates the priority level 1.
"""
return self._get_attribute(self._SDM_ATT_MAP['PriorityLevel1'])
@PriorityLevel1.setter
def PriorityLevel1(self, value):
self._set_attribute(self._SDM_ATT_MAP['PriorityLevel1'], value)
@property
def PriorityLevel2(self):
"""
Returns
-------
- number: Indicates the priority level 2.
"""
return self._get_attribute(self._SDM_ATT_MAP['PriorityLevel2'])
@PriorityLevel2.setter
def PriorityLevel2(self, value):
self._set_attribute(self._SDM_ATT_MAP['PriorityLevel2'], value)
@property
def TeAdminGroup(self):
"""
Returns
-------
- str: The traffic engineering administrative group associated with the interface. (default = {00 00 00 00})
"""
return self._get_attribute(self._SDM_ATT_MAP['TeAdminGroup'])
@TeAdminGroup.setter
def TeAdminGroup(self, value):
self._set_attribute(self._SDM_ATT_MAP['TeAdminGroup'], value)
@property
def TeMaxBandwidth(self):
"""
Returns
-------
- number: For setting the maximum link bandwidth (sub-TLV 9) allowed for this link in this direction. It is a 32-bit IEEE floating point value, in bytes/sec. The default is 0.
"""
return self._get_attribute(self._SDM_ATT_MAP['TeMaxBandwidth'])
@TeMaxBandwidth.setter
def TeMaxBandwidth(self, value):
self._set_attribute(self._SDM_ATT_MAP['TeMaxBandwidth'], value)
@property
def TeMetricLevel(self):
"""
Returns
-------
- number: A user-defined metric for this TE path.
"""
return self._get_attribute(self._SDM_ATT_MAP['TeMetricLevel'])
@TeMetricLevel.setter
def TeMetricLevel(self, value):
self._set_attribute(self._SDM_ATT_MAP['TeMetricLevel'], value)
@property
def TeResMaxBandwidth(self):
"""
Returns
-------
- number: For setting the Maximum reservable link bandwidth (sub-TLV 10). It is the maximum bandwidth that can be reserved for this link in this direction. It is a 32-bit IEEE floating point value, in bytes/sec. The default is 0.
"""
return self._get_attribute(self._SDM_ATT_MAP['TeResMaxBandwidth'])
@TeResMaxBandwidth.setter
def TeResMaxBandwidth(self, value):
self._set_attribute(self._SDM_ATT_MAP['TeResMaxBandwidth'], value)
@property
def TeUnreservedBwPriority(self):
"""
Returns
-------
- list(number): The traffic engineering unreserved bandwidth for each priority to be advertised. There are eight distinct options. (default = 0.0)
"""
return self._get_attribute(self._SDM_ATT_MAP['TeUnreservedBwPriority'])
@TeUnreservedBwPriority.setter
def TeUnreservedBwPriority(self, value):
self._set_attribute(self._SDM_ATT_MAP['TeUnreservedBwPriority'], value)
def update(self, CircuitAuthType=None, CircuitReceivedPasswordList=None, CircuitTransmitPassword=None, ConfiguredHoldTime=None, Enable3WayHandshake=None, EnableAutoAdjustArea=None, EnableAutoAdjustMtu=None, EnableAutoAdjustProtocolsSupported=None, EnableBfdRegistration=None, EnableConfiguredHoldTime=None, EnableConnectedToDut=None, Enabled=None, ExtendedCircuitId=None, InterfaceId=None, InterfaceIp=None, InterfaceIpMask=None, Ipv6MtMetric=None, Level=None, Level1DeadTime=None, Level1HelloTime=None, Level2DeadTime=None, Level2HelloTime=None, Metric=None, NetworkType=None, PriorityLevel1=None, PriorityLevel2=None, TeAdminGroup=None, TeMaxBandwidth=None, TeMetricLevel=None, TeResMaxBandwidth=None, TeUnreservedBwPriority=None):
"""Updates interface resource on the server.
Args
----
- CircuitAuthType (str(none | password | md5)): The type of Circuit Authentication to be used for this emulated ISIS router.
- CircuitReceivedPasswordList (list(str)): The Receive Password List is used only for Cleartext Password authentication. MD5 Authentication requires that both of the neighbors have the same MD5 key for the packets to be accepted.
- CircuitTransmitPassword (str): If circuitAuthType is isisAuthTypePassword, then this is the password (or MD5Key) that will be sent with transmitted IIHs.
- ConfiguredHoldTime (number): The configured hold time for the interface. This value is only used if enableConfiguredHoldTime is set to true.
- Enable3WayHandshake (bool): If true, Ixia emulated point-to-point circuit will include 3-way TLV in its P2P IIH and attempt to establish the adjacency as specified in RFC 5303.
- EnableAutoAdjustArea (bool): If set, and a HELLO message is received which contains a protocols TLV, then the interfaces protocols will be adjusted to match the received TLV.
- EnableAutoAdjustMtu (bool): If set, and a padded HELLO message is received on the interface, then the interfaces MTU will be adjusted to match the packet length of the received HELLO message.
- EnableAutoAdjustProtocolsSupported (bool): If set, and a HELLO message is received which contains a protocols TLV, then the interfaces protocols will be adjusted to match the received TLV.
- EnableBfdRegistration (bool): Indicates if a BFD session is to be created to the ISIS peer IP address once the ISIS session is established. This allows ISIS to use BFD to maintain IPv4 connectivity the ISIS peer.
- EnableConfiguredHoldTime (bool): If true, enables a hold time for the created interfaces, based on the value set in the configuredHoldTime object.
- EnableConnectedToDut (bool): If enabled, this ISIS interface is directly connected to the DUT.
- Enabled (bool): Enables the use of this interface for the simulated router.
- ExtendedCircuitId (number): The integer value of the local circuit ID.
- InterfaceId (str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface)): The OSI interface ID for this interface.
- InterfaceIp (str): The IP address for this interface.
- InterfaceIpMask (str): Available only when Interface Connected to DUT is disabled. The mask used with the IPv4 address for this virtual interface on the emulated ISIS router. This interface address is used to connect to virtual ISIS Network Ranges behind the Ixia-emulated ISIS router.
- Ipv6MtMetric (number): This metric is same as the Interface Metric. If true, it allows you to enter data.
- Level (str(level1 | level2 | level1Level2)): The IS-IS level associated with the interface.
- Level1DeadTime (number): The dead (holding time) interval for level 1 hello messages, in seconds. If an ISIS router sending L1 hellos is not heard from within this time period, it will be considered down.
- Level1HelloTime (number): The hello interval for level 1 hello messages, in seconds.
- Level2DeadTime (number): The dead (holding time) interval for level 2 hello messages, in seconds. If an ISIS router sending L2 hellos is not heard from within this time period, it will be considered down.
- Level2HelloTime (number): The hello interval for level 2 hello messages, in seconds.
- Metric (number): The cost metric associated with the route.
- NetworkType (str(pointToPoint | broadcast | pointToMultipoint)): Indicates the type of network attached to the interface: broadcast or point-to-point.
- PriorityLevel1 (number): Indicates the priority level 1.
- PriorityLevel2 (number): Indicates the priority level 2.
- TeAdminGroup (str): The traffic engineering administrative group associated with the interface. (default = {00 00 00 00})
- TeMaxBandwidth (number): For setting the maximum link bandwidth (sub-TLV 9) allowed for this link in this direction. It is a 32-bit IEEE floating point value, in bytes/sec. The default is 0.
- TeMetricLevel (number): A user-defined metric for this TE path.
- TeResMaxBandwidth (number): For setting the Maximum reservable link bandwidth (sub-TLV 10). It is the maximum bandwidth that can be reserved for this link in this direction. It is a 32-bit IEEE floating point value, in bytes/sec. The default is 0.
- TeUnreservedBwPriority (list(number)): The traffic engineering unreserved bandwidth for each priority to be advertised. There are eight distinct options. (default = 0.0)
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, CircuitAuthType=None, CircuitReceivedPasswordList=None, CircuitTransmitPassword=None, ConfiguredHoldTime=None, Enable3WayHandshake=None, EnableAutoAdjustArea=None, EnableAutoAdjustMtu=None, EnableAutoAdjustProtocolsSupported=None, EnableBfdRegistration=None, EnableConfiguredHoldTime=None, EnableConnectedToDut=None, Enabled=None, ExtendedCircuitId=None, InterfaceId=None, InterfaceIp=None, InterfaceIpMask=None, Ipv6MtMetric=None, Level=None, Level1DeadTime=None, Level1HelloTime=None, Level2DeadTime=None, Level2HelloTime=None, Metric=None, NetworkType=None, PriorityLevel1=None, PriorityLevel2=None, TeAdminGroup=None, TeMaxBandwidth=None, TeMetricLevel=None, TeResMaxBandwidth=None, TeUnreservedBwPriority=None):
"""Adds a new interface resource on the server and adds it to the container.
Args
----
- CircuitAuthType (str(none | password | md5)): The type of Circuit Authentication to be used for this emulated ISIS router.
- CircuitReceivedPasswordList (list(str)): The Receive Password List is used only for Cleartext Password authentication. MD5 Authentication requires that both of the neighbors have the same MD5 key for the packets to be accepted.
- CircuitTransmitPassword (str): If circuitAuthType is isisAuthTypePassword, then this is the password (or MD5Key) that will be sent with transmitted IIHs.
- ConfiguredHoldTime (number): The configured hold time for the interface. This value is only used if enableConfiguredHoldTime is set to true.
- Enable3WayHandshake (bool): If true, Ixia emulated point-to-point circuit will include 3-way TLV in its P2P IIH and attempt to establish the adjacency as specified in RFC 5303.
- EnableAutoAdjustArea (bool): If set, and a HELLO message is received which contains a protocols TLV, then the interfaces protocols will be adjusted to match the received TLV.
- EnableAutoAdjustMtu (bool): If set, and a padded HELLO message is received on the interface, then the interfaces MTU will be adjusted to match the packet length of the received HELLO message.
- EnableAutoAdjustProtocolsSupported (bool): If set, and a HELLO message is received which contains a protocols TLV, then the interfaces protocols will be adjusted to match the received TLV.
- EnableBfdRegistration (bool): Indicates if a BFD session is to be created to the ISIS peer IP address once the ISIS session is established. This allows ISIS to use BFD to maintain IPv4 connectivity the ISIS peer.
- EnableConfiguredHoldTime (bool): If true, enables a hold time for the created interfaces, based on the value set in the configuredHoldTime object.
- EnableConnectedToDut (bool): If enabled, this ISIS interface is directly connected to the DUT.
- Enabled (bool): Enables the use of this interface for the simulated router.
- ExtendedCircuitId (number): The integer value of the local circuit ID.
- InterfaceId (str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface)): The OSI interface ID for this interface.
- InterfaceIp (str): The IP address for this interface.
- InterfaceIpMask (str): Available only when Interface Connected to DUT is disabled. The mask used with the IPv4 address for this virtual interface on the emulated ISIS router. This interface address is used to connect to virtual ISIS Network Ranges behind the Ixia-emulated ISIS router.
- Ipv6MtMetric (number): This metric is same as the Interface Metric. If true, it allows you to enter data.
- Level (str(level1 | level2 | level1Level2)): The IS-IS level associated with the interface.
- Level1DeadTime (number): The dead (holding time) interval for level 1 hello messages, in seconds. If an ISIS router sending L1 hellos is not heard from within this time period, it will be considered down.
- Level1HelloTime (number): The hello interval for level 1 hello messages, in seconds.
- Level2DeadTime (number): The dead (holding time) interval for level 2 hello messages, in seconds. If an ISIS router sending L2 hellos is not heard from within this time period, it will be considered down.
- Level2HelloTime (number): The hello interval for level 2 hello messages, in seconds.
- Metric (number): The cost metric associated with the route.
- NetworkType (str(pointToPoint | broadcast | pointToMultipoint)): Indicates the type of network attached to the interface: broadcast or point-to-point.
- PriorityLevel1 (number): Indicates the priority level 1.
- PriorityLevel2 (number): Indicates the priority level 2.
- TeAdminGroup (str): The traffic engineering administrative group associated with the interface. (default = {00 00 00 00})
- TeMaxBandwidth (number): For setting the maximum link bandwidth (sub-TLV 9) allowed for this link in this direction. It is a 32-bit IEEE floating point value, in bytes/sec. The default is 0.
- TeMetricLevel (number): A user-defined metric for this TE path.
- TeResMaxBandwidth (number): For setting the Maximum reservable link bandwidth (sub-TLV 10). It is the maximum bandwidth that can be reserved for this link in this direction. It is a 32-bit IEEE floating point value, in bytes/sec. The default is 0.
- TeUnreservedBwPriority (list(number)): The traffic engineering unreserved bandwidth for each priority to be advertised. There are eight distinct options. (default = 0.0)
Returns
-------
- self: This instance with all currently retrieved interface resources using find and the newly added interface resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained interface resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, CircuitAuthType=None, CircuitReceivedPasswordList=None, CircuitTransmitPassword=None, ConfiguredHoldTime=None, Enable3WayHandshake=None, EnableAutoAdjustArea=None, EnableAutoAdjustMtu=None, EnableAutoAdjustProtocolsSupported=None, EnableBfdRegistration=None, EnableConfiguredHoldTime=None, EnableConnectedToDut=None, Enabled=None, ExtendedCircuitId=None, InterfaceId=None, InterfaceIp=None, InterfaceIpMask=None, Ipv6MtMetric=None, Level=None, Level1DeadTime=None, Level1HelloTime=None, Level2DeadTime=None, Level2HelloTime=None, Metric=None, NetworkType=None, PriorityLevel1=None, PriorityLevel2=None, TeAdminGroup=None, TeMaxBandwidth=None, TeMetricLevel=None, TeResMaxBandwidth=None, TeUnreservedBwPriority=None):
"""Finds and retrieves interface resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve interface resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all interface resources from the server.
Args
----
- CircuitAuthType (str(none | password | md5)): The type of Circuit Authentication to be used for this emulated ISIS router.
- CircuitReceivedPasswordList (list(str)): The Receive Password List is used only for Cleartext Password authentication. MD5 Authentication requires that both of the neighbors have the same MD5 key for the packets to be accepted.
- CircuitTransmitPassword (str): If circuitAuthType is isisAuthTypePassword, then this is the password (or MD5Key) that will be sent with transmitted IIHs.
- ConfiguredHoldTime (number): The configured hold time for the interface. This value is only used if enableConfiguredHoldTime is set to true.
- Enable3WayHandshake (bool): If true, Ixia emulated point-to-point circuit will include 3-way TLV in its P2P IIH and attempt to establish the adjacency as specified in RFC 5303.
- EnableAutoAdjustArea (bool): If set, and a HELLO message is received which contains a protocols TLV, then the interfaces protocols will be adjusted to match the received TLV.
- EnableAutoAdjustMtu (bool): If set, and a padded HELLO message is received on the interface, then the interfaces MTU will be adjusted to match the packet length of the received HELLO message.
- EnableAutoAdjustProtocolsSupported (bool): If set, and a HELLO message is received which contains a protocols TLV, then the interfaces protocols will be adjusted to match the received TLV.
- EnableBfdRegistration (bool): Indicates if a BFD session is to be created to the ISIS peer IP address once the ISIS session is established. This allows ISIS to use BFD to maintain IPv4 connectivity the ISIS peer.
- EnableConfiguredHoldTime (bool): If true, enables a hold time for the created interfaces, based on the value set in the configuredHoldTime object.
- EnableConnectedToDut (bool): If enabled, this ISIS interface is directly connected to the DUT.
- Enabled (bool): Enables the use of this interface for the simulated router.
- ExtendedCircuitId (number): The integer value of the local circuit ID.
- InterfaceId (str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface)): The OSI interface ID for this interface.
- InterfaceIp (str): The IP address for this interface.
- InterfaceIpMask (str): Available only when Interface Connected to DUT is disabled. The mask used with the IPv4 address for this virtual interface on the emulated ISIS router. This interface address is used to connect to virtual ISIS Network Ranges behind the Ixia-emulated ISIS router.
- Ipv6MtMetric (number): This metric is same as the Interface Metric. If true, it allows you to enter data.
- Level (str(level1 | level2 | level1Level2)): The IS-IS level associated with the interface.
- Level1DeadTime (number): The dead (holding time) interval for level 1 hello messages, in seconds. If an ISIS router sending L1 hellos is not heard from within this time period, it will be considered down.
- Level1HelloTime (number): The hello interval for level 1 hello messages, in seconds.
- Level2DeadTime (number): The dead (holding time) interval for level 2 hello messages, in seconds. If an ISIS router sending L2 hellos is not heard from within this time period, it will be considered down.
- Level2HelloTime (number): The hello interval for level 2 hello messages, in seconds.
- Metric (number): The cost metric associated with the route.
- NetworkType (str(pointToPoint | broadcast | pointToMultipoint)): Indicates the type of network attached to the interface: broadcast or point-to-point.
- PriorityLevel1 (number): Indicates the priority level 1.
- PriorityLevel2 (number): Indicates the priority level 2.
- TeAdminGroup (str): The traffic engineering administrative group associated with the interface. (default = {00 00 00 00})
- TeMaxBandwidth (number): For setting the maximum link bandwidth (sub-TLV 9) allowed for this link in this direction. It is a 32-bit IEEE floating point value, in bytes/sec. The default is 0.
- TeMetricLevel (number): A user-defined metric for this TE path.
- TeResMaxBandwidth (number): For setting the Maximum reservable link bandwidth (sub-TLV 10). It is the maximum bandwidth that can be reserved for this link in this direction. It is a 32-bit IEEE floating point value, in bytes/sec. The default is 0.
- TeUnreservedBwPriority (list(number)): The traffic engineering unreserved bandwidth for each priority to be advertised. There are eight distinct options. (default = 0.0)
Returns
-------
- self: This instance with matching interface resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of interface data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the interface resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
68d3a28305f521b755b7fb016365e9e81bc67736 | 902b066e8b8b869d0a6eaac82ca26a0d7b42d8b7 | /subjectsapp/models.py | 60cb959e19222a67c094b13a620cbc09a4f10a67 | [] | no_license | manojb7996/rscpro | 4f6b613119a5b8257796e7b4535b027b0d60b770 | 3f2a1a183844968cf7af8170803a57d9c87fa49b | refs/heads/master | 2021-05-04T21:59:03.151471 | 2018-02-02T14:52:35 | 2018-02-02T14:52:35 | 119,992,911 | 0 | 0 | null | 2018-02-02T14:52:36 | 2018-02-02T14:42:04 | HTML | UTF-8 | Python | false | false | 1,329 | py | from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
# Create your models here.
# displays all subjects
class TechType(models.Model):
subject_name = models.CharField(max_length=30, help_text='Add the technology type ..like python , jave and mysql etc..')
status = models.BooleanField(default=False)
user = models.ForeignKey(User,default='null', null=True)
def __str__(self):
return '{0}'.format(self.subject_name)
def get_absolute_url(self):
return reverse('subjectapp:editsubject', kwargs={'pk': self.pk})
# displays all papers type
mode_choices = (('B','Beginner'),('I','Intermediate'),('A','Advanced'),)
class Paperstype(models.Model):
subject_name = models.ForeignKey(TechType)
mode = models.CharField(max_length=2,choices=mode_choices, help_text='Mode of exam')
papernumber = models.PositiveSmallIntegerField(help_text='Number of paper for exam')
status = models.BooleanField(help_text='Activation status')
def __str__(self):
return '{0}-{1}-{3}'.format(self.subject_name,self.mode,self.papernumber)
@property
def displaymode(self):
return '{0}'.format(self.mode)
def get_absolute_url(self):
return reverse('subjectapp:editpapertype', kwargs={'pk': self.pk})
| [
"santhoshcusat17@gmail.com"
] | santhoshcusat17@gmail.com |
90e640949012d95c3e5daa5e794cbd2d4fcecc7d | ca504fc52c635860ac141a21e453be5f721f4fe7 | /fb_router.py | 7370718b8f52f1c8d0bf0ca4dfda9643586b2a66 | [] | no_license | baconcho/baconchoproject | 844a92842937f63615d689cf3973b59d540a2ef5 | 57e4165419d8e5eca76c5c9ec00108055b2c9cf5 | refs/heads/master | 2020-05-18T20:38:38.477516 | 2011-09-30T07:10:36 | 2011-09-30T07:10:36 | 2,448,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | #this file define all the route related to the facebook application
import Util
from Canvas import CanvasHandler
from CreateCoverHandler import CreateCoverHandler
routes = [(r'/', CanvasHandler),
(r'/fb/', CanvasHandler),
(r'/fb/canvas/', CanvasHandler),
(r'/fb/createcover', CreateCoverHandler)]
Util.run_webapp(routes); | [
"truongducthang@gmail.com"
] | truongducthang@gmail.com |
2fbbd183b02fc3f961d32a9f9a9bf87a801392d7 | c5f2e56d7adcdab9474c78ebbc14e199d20ef95f | /examples/dahua/chatServer.py | fee1e2b1bf6713472da62d8a56389d67011ead5b | [] | no_license | nullzhengwx/pythonStudy | ad4131edaef41c700ec6490096f557292d370cc6 | be59e5b4f19013fff0291bcd13ccb02d49ccba30 | refs/heads/master | 2020-03-26T08:09:54.985328 | 2018-07-27T03:31:07 | 2018-07-27T03:31:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,073 | py | from asyncore import dispatcher
from asynchat import async_chat
import socket, asyncore
PORT = 5005
NAME = 'TestChat'
class CommandHandler:
"""
类似于标准库中cmd.Cmd的简单命令处理程序.
"""
def unknow(self, session, cmd):
# 相应未知命令
session.push(b'Unknow command:' + str.encode(cmd))
def handle(self, session, line):
line_str = bytes.decode(line)
# 处理从给定的会话中接受到的行.
if not line_str.strip():
return
# 拆分命令
parts = line_str.split(' ', 1)
cmd = parts[0]
try:
line_str = parts[1].strip()
except IndexError:
line_str = ''
# 试着查找处理程序
meth = getattr(self, 'do_' + cmd, None)
try:
# 假定它是可调用的
meth(session, line_str)
except TypeError:
# 如果不可以被调用,此段代码响应未知的命令
self.unknow(session, cmd)
class EndSession(Exception):
pass
class Room(CommandHandler):
"""
可以包括一个或多个用户(会话)的泛型环境.它负责基本的命令处理和广播
"""
def __init__(self, server):
self.server = server
self.sessions = []
def add(self, session):
# 一个会话(用户)已进入房间
self.sessions.append(session)
def remove(self, session):
# 一个会话(用户)已离开房间
self.sessions.remove(session)
def broadcast(self, line):
# 向房间中的所有会话发送一行
for session in self.sessions:
session.push(line)
def do_logout(self, session, line):
# 响应logout命令
raise EndSession
class LoginRoom(Room):
"""
为刚刚连接上的用户准备的房间.
"""
def add(self, session):
Room.add(self, session)
# 当用户进入时, 问候他或她
self.broadcast(b'Welcome to ' + str.encode(self.server.name))
def unknow(self, session, cmd):
# 所有未知命令(除了login或者logout外的一切)
# 会导致一个警告:
session.push(b'Please log in\nUse "login <nick>"\r\n')
def do_login(self, session, line):
name = line.strip()
# 确保用户输入了名字:
if not name:
session.push(b'Please enter a name\r\n')
# 确保用户名并没有被使用:
elif name in self.server.users:
session.push(b'The name ' + str.encode(name) + b' is taken.\r\n')
session.push(b'Please try again.\r\n')
else:
# 名字无问题,所以存储在会话中,并且将用户移动到主聊天室
session.name = name
session.enter(self.server.main_room)
class ChatRoom(Room):
"""
为多用户相互聊天准备的房间.
"""
def add(self, session):
# 告诉所有人有新用户进入:
self.broadcast(str.encode(session.name) + b' has entered the room.\r\n')
self.server.users[session.name] = session
Room.add(self, session)
def remove(self, session):
Room.remove(self, session)
# 告诉所有人有用户离开:
self.broadcast(str.encode(session.name) + b' has left the room.\r\n')
def do_say(self, session, line):
self.broadcast(str.encode(session.name) + b': ' + str.encode(line) + b'\r\n')
def do_look(self, session, line):
# 处理look命令, 该命令用于查看谁在房间内
session.push(b'The following are in this room:\r\n')
for other in self.sessions:
session.push(str.encode(other.name) + b'\r\n')
def do_who(self, session, line):
# 处理who命令, 该命令用于查看谁登录了
session.push(b'The following are logged in:\r\n')
for name in self.server.users:
session.push(str.encode(name) + b'\r\n')
class LogoutRoom(Room):
"""
为单用户准备的简单房间.只用于将用户名从服务器移除
"""
def add(self, session):
# 当会话(用户)进入要移除的LogoutRoom时
try:
del self.server.users[session.name]
except KeyError:
pass
class ChatSession(async_chat):
"""
单会话,负责和单用户通信
"""
def __init__(self, server, sock):
async_chat.__init__(self, sock)
self.server = server
# linux用'\n',windows用'\r\n',mac用'\r'表示换行
self.set_terminator(b"\r\n")
self.data = []
self.name = None
# 所有的会话都开始于单独的LoginRoom中:
self.enter(LoginRoom(server))
def enter(self, room):
# 从当前房间移除自身(self), 并且将自身添加到下一个房间......
try:
cur = self.room
except AttributeError:
pass
else:
cur.remove(self)
self.room = room
room.add(self)
def collect_incoming_data(self, data):
self.data.append(data)
def found_terminator(self):
line = b"".join(self.data)
self.data = []
try:
self.room.handle(self, line)
except EndSession:
self.handle_close()
def handle_close(self):
async_chat.handle_close(self)
self.enter(LogoutRoom(self.server))
class ChatServer(dispatcher):
"""
只有一个房间的聊天服务器
"""
def __init__(self, port, name):
dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(('', port))
self.listen(5)
self.name = name
self.users = {}
self.main_room = ChatRoom(self)
def handle_accept(self):
conn, addr = self.accept()
ChatSession(self, conn)
if __name__=='__main__':
s = ChatServer(PORT, NAME)
try:
asyncore.loop()
except KeyboardInterrupt:
print | [
"173601321@qq.com"
] | 173601321@qq.com |
663a5aa9df2a0620bc5e0f0dfe063853cd4c8171 | d429431864b3699029f52ca1a232a152c21fb9e6 | /codekata/pali.py | ed6ade1a7df282d0e83a461cdff5b2049294a74d | [] | no_license | kirubah18/guvi | 32e1baeae3371dc14cb95b61421ddac269e3f56d | 5a642b07b6defaf344847e29c5f6bee574aa7c4e | refs/heads/master | 2020-06-03T00:09:17.428442 | 2019-06-24T05:04:21 | 2019-06-24T05:04:21 | 191,355,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | a1=input()
a=int(a1)
b=a
i=0
while(a!=0):
p=int(a%10)
i=int(i*10+p)
a=int(a/10)
if(b==i):
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | noreply@github.com |
191db6f8ca5bb50f81b0a602940e6003d3f27b1b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/4/usersdata/112/1803/submittedfiles/swamee.py | 0993e51e919e7a77d0aa3da61db48e12e1ca660d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
f= input('Digite o valor de f:')
L= input('Digite o valor de L:')
Q= input('Digite o valor de Q:')
DeltaH= input('Digite o valor de Delta H')
v= input('Digite o valor de v')
g= 9.81
e= 0.000002
D= ((8*f*L*Q)*(2/5))/((math.pi)**2(g*DeltaH))
Rey= (4*Q)/((math.pi)*D*v)
K= (0.25)/(math.log10((e/3.70)+(5.74)/(Rey)**0.9))**2
print('D=%.4f'%D)
print('Rey=%.4f'%Rey)
print('K=%.4f'%K) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e493ea3032e1051ad60fa7c44027e35dbaecaf29 | f537166c2daa186eab63eb89742d848cfb83ac89 | /colorhexasearch.py | 9b4ac54da7ee63c2f98ca490c53a793668bd6e0f | [
"Apache-2.0"
] | permissive | evaneliasyoung/ColorHexa-Search | e4ec130a9fd875d25ed179439c991bf13693cf28 | e6c6d2813921d073002032f1900d0e88e56ddd19 | refs/heads/master | 2021-06-27T15:48:47.662339 | 2017-09-15T14:53:36 | 2017-09-15T14:53:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | import sublime, sublime_plugin
import re
from sys import platform
from os import startfile
from subprocess import Popen
__version__ = "1.0.0"
zeroPad = lambda txt: txt[::-1]+"0"*(2-len(txt))
def getFmt(txt):
"""Determines the format of the color
Arguments:
txt {str} -- The text to determine
"""
fmt = {}
fmt["isHex"] = re.match("#?[0-f]{3,6}", txt) != None
if(fmt["isHex"]): fmt["isHexMN"] = re.match("#?[0-f]{6}", txt) == None
fmt["isRGB"] = re.match("rgb\([0-9]{1,3},\s?[0-9]{1,3},\s?[0-9]{1,3}\)", txt) != None
fmt["isRGBA"] = re.match("rgba\([0-9]{1,3},\s?[0-9]{1,3},\s?[0-9]{1,3},\s?[.-9]*\)", txt) != None
return fmt
def toHex(txt, fmt):
"""Converts the color to hex format
[description]
Arguments:
txt {str} -- The raw color's text
fmt {dict} -- The format of the color
Returns:
str -- The hex converted color
"""
col = ""
if(fmt["isHex"]):
col = txt.replace("#", "")
if(fmt["isHexMN"]): return "".join([s*2 for s in col])
return col
if(fmt["isRGB"] or fmt["isRGBA"]):
rgb = re.sub("[^,0-9]", "", txt).split(",")
rgb = [int(s) for s in rgb[:3]]
for seg in rgb:
h = str(hex(seg))[2:]
col += zeroPad(h)
return col
def openSite(url):
"""Opens the specified url
Arguments:
url {str} -- The URL of the webpage
"""
if(platform == "win32"):
startfile(url)
class ColorHexaSearch(sublime_plugin.TextCommand):
def run(self, edit):
"""Main code
"""
sel = self.view.sel()[0]
txt = self.view.substr(sel).lower()
fmt = getFmt(txt)
col = toHex(txt, fmt)
url = "http://www.colorhexa.com/{}".format(col)
openSite(url) | [
"DocCodes@users.noreply.github.com"
] | DocCodes@users.noreply.github.com |
8f789582668896d0476c9662231bb5fbf4edd209 | d6bd031f28dc317208e2a41d24ed520e0e2846d3 | /code/RSA/data_source.py | 302a387641495235746e381e45e10c53341cf4d1 | [] | no_license | oliver-contier/rirsa_tmp | 29e8584c03263f3cb471dcd954a2727c66bf5d7c | 8aa718ef690b6123209fde6aa55b06b44a68bd5f | refs/heads/master | 2020-08-23T06:38:48.369288 | 2019-10-21T12:30:38 | 2019-10-21T12:30:38 | 216,562,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,862 | py | #!/usr/bin/env python
import copy
import csv
from os.path import join as pjoin
import numpy as np
import pandas as pd
from mvpa2.tutorial_suite import fmri_dataset
def get_training_stims(sub_id,
sess_id,
behav_data_dir='/home/my_data/exppsy/oliver/RIRSA/raw/my_data/behav_data',
fmri_logs_dir='/home/my_data/exppsy/oliver/RIRSA/raw/my_data/fmri_logs',
sort_lists=True):
"""
Get the set of trained and untrained stimuli objects from the behavioral results csv file
for a given subject.
"""
# get trained and untrained object IDs
behav_fname = pjoin(behav_data_dir, 'sub{}_behav.csv'.format(sub_id))
behav_df = pd.read_csv(behav_fname)
fmrilog_fname = pjoin(fmri_logs_dir, 'sub{}_session{}_rionly_run1_fmri.csv'.format(sub_id, sess_id))
fmrilog_df = pd.read_csv(fmrilog_fname)
trained_obs = [obj for obj in behav_df.object_id.unique()]
all_obs = fmrilog_df.object_id.unique()
untrained_obs = [obj for obj in all_obs if obj not in trained_obs]
# add rotations
rotations = range(0, 316, 45) # from 0 to 315 in 45 degree steps
trained_obs = np.array([str(obj) + '_r{}'.format(str(rot)) for rot in rotations for obj in trained_obs])
untrained_obs = np.array([str(obj) + '_r{}'.format(str(rot)) for rot in rotations for obj in untrained_obs])
assert len(trained_obs) == len(untrained_obs) and len(trained_obs) == 8 * 8
if sort_lists:
trained_obs.sort()
untrained_obs.sort()
return trained_obs, untrained_obs
def get_events_all(sub,
ses,
run,
bids_dir='/home/my_data/exppsy/oliver/RIRSA/scratch/BIDS'):
"""
Get all stimulus events for a given subject, session, and run.
"""
events_tsv = pjoin(bids_dir, 'sub-{}'.format(sub), 'ses-{}'.format(ses), 'func',
'sub-{}_ses-{}_task-ri_run-{}_events.tsv'.format(sub, ses, run))
with open(events_tsv) as f:
csvreader = csv.reader(f, delimiter='\t')
tsv_content = [row for row in csvreader]
events = [
{'onset': float(row[0]), 'duration': float(row[1]), 'condition': row[2]}
for row in tsv_content[1:]]
return events
def get_events_for_object_ids(sub,
ses,
run,
object_ids,
bids_dir='/home/my_data/exppsy/oliver/RIRSA/scratch/BIDS'):
"""
Get a pymvpa-compatible dict with events for a set ob object_ids
given a subject, session, and run.
"""
events_tsv = pjoin(bids_dir, 'sub-{}'.format(sub), 'ses-{}'.format(ses), 'func',
'sub-{}_ses-{}_task-ri_run-{}_events.tsv'.format(sub, ses, run))
with open(events_tsv) as f:
csvreader = csv.reader(f, delimiter='\t')
tsv_content = [row for row in csvreader]
events = [
{'onset': float(row[0]), 'duration': float(row[1]), 'condition': row[2]}
for row in tsv_content[1:] if row[2] in object_ids]
return events
def get_conf_regs(sub,
ses,
run,
conf_names=('a_comp_cor_00', 'a_comp_cor_01', 'a_comp_cor_02',
'a_comp_cor_03', 'a_comp_cor_04', 'trans_x', 'trans_y',
'trans_z', 'rot_x', 'rot_y', 'rot_z'),
fmriprep_outdir='/home/my_data/exppsy/oliver/RIRSA/scratch/fmriprep/fmriprep_out'):
"""
Grab confound regressors from fmriprep output.
"""
confound_tsv = pjoin(fmriprep_outdir, 'fmriprep', 'sub-{}'.format(sub), 'ses-{}'.format(ses), 'func',
'sub-{}_ses-{}_task-ri_run-{}_desc-confounds_regressors.tsv'.format(sub, ses, run))
with open(confound_tsv) as f:
csvreader = csv.reader(f, delimiter='\t')
tsv_content = [row for row in csvreader]
reg_idxs = [tsv_content[0].index(confname) for confname in conf_names]
conf_regs = np.array(tsv_content[1:])[:, reg_idxs].astype('float')
assert np.shape(conf_regs) == (175, len(conf_names))
return list(conf_names), conf_regs
def get_bold_with_mask(sub,
ses,
run,
fmriprep_outdir='/home/my_data/exppsy/oliver/RIRSA/scratch/fmriprep/fmriprep_out'):
"""
load preprocessed bold image together with mask image (both returned from fmriprep) into pymvpa dataset
"""
mask_path = pjoin(fmriprep_outdir, 'fmriprep', 'sub-{}'.format(sub), 'ses-{}'.format(ses), 'func',
'sub-{}_ses-{}_task-ri_run-{}_space-T1w_desc-brain_mask.nii.gz'.format(sub, ses, run))
bold_path = pjoin(fmriprep_outdir, 'fmriprep', 'sub-{}'.format(sub), 'ses-{}'.format(ses), 'func',
'sub-{}_ses-{}_task-ri_run-{}_space-T1w_desc-preproc_bold.nii.gz'.format(sub, ses, run))
fds = fmri_dataset(bold_path, mask=mask_path)
return fds
def _find_beta_indices(beta_ds,
trained_objects,
untrained_objects):
"""
find indices for trained / untrained stimuli in beta_ds.
used for seperating beta maps in function seperate_betas_by_training
"""
trained_indices, untrained_indices = [], []
for i in range(len(beta_ds.sa.condition)):
if beta_ds.sa.condition[i] == 'catch':
continue
elif beta_ds.sa.condition[i] in trained_objects:
trained_indices.append(i)
elif beta_ds.sa.condition[i] in untrained_objects:
untrained_indices.append(i)
else:
raise RuntimeError('couldnt figure out object {}'.format(beta_ds.sa.condition[i]))
assert len(trained_indices) == len(trained_objects) and len(untrained_indices) == len(untrained_objects)
return np.array(trained_indices), np.array(untrained_indices)
def seperate_betas_by_training(beta_ds,
trained_objects,
untrained_objects):
"""
Given a dataset of beta values obtained via fit_event_hrf_model,
create two new ones for trained and untrained stimulus responses respectively.
"""
# find indices of trained vs. untrained_stimuli
trained_indices, untrained_indices = _find_beta_indices(beta_ds, trained_objects, untrained_objects)
# make copies of beta_ds without regressors and condition (because they throw errors due to shape)
# but keep all the rest
keep_sa = []
keep_fa = ['voxel_indices']
keep_a = ['mapper', 'imgaffine', 'imghdr', 'voxel_dim', 'imgtype', 'voxel_eldim']
trained_betas = beta_ds.copy(deep=False, sa=keep_sa, fa=keep_fa, a=keep_a)
untrained_betas = beta_ds.copy(deep=False, sa=keep_sa, fa=keep_fa, a=keep_a)
# add selected samples (i.e. beta values), regressor and condition info
for indices, betamap in zip([trained_indices, untrained_indices],
[trained_betas, untrained_betas]):
betamap.samples = beta_ds.samples[indices, :]
betamap.sa.regressors = beta_ds.sa.regressors[indices, :]
betamap.sa.condition = beta_ds.sa.condition[indices]
return trained_betas, untrained_betas
def split_sl_results_it_sem_cat(sl_results):
# initiate empty my_data sets
# TODO: used in rsa regression and probably not working in this state
it_results, sem_results, cat_results = copy.deepcopy(sl_results), \
copy.deepcopy(sl_results), \
copy.deepcopy(sl_results)
# add correct parameter estimates to them plus some attributes
for idx, resultsmap in enumerate([it_results, sem_results, cat_results]):
resultsmap.samples = sl_results.samples[idx, :]
# resultsmap.sa['coefs'] = sl_results.sa.coefs[idx]
# resultsmap.fa['center_ids'] = sl_results.fa.center_ids
# resultsmap.a['mapper'] = mapper_from.a.mapper
return it_results, sem_results, cat_results
def split_slcorrr_results(slresults, boldds):
"""
Take the results from running a RSA-correlation searchlight analysis,
split rho and p maps into seperate data sets,
and save it with image information taken from the original bold dataset on which the analysis was run.
"""
# make copies
rho_ds = slresults.copy(deep=False)
p_ds = slresults.copy(deep=False)
# take only subset of samples (for selecting rho and p values)
for idx, target_ds in enumerate([rho_ds, p_ds]):
target_ds.samples = slresults.samples[idx, :]
# add general image attributes from bold dataset (for saving in correct space)
for copy_attr in ['mapper', 'imgaffine', 'imghdr', 'voxel_dim', 'imgtype', 'voxel_eldim']:
target_ds.a[copy_attr] = boldds.a[copy_attr]
return rho_ds, p_ds
| [
"oliver.contier@ovgu.de"
] | oliver.contier@ovgu.de |
7ec51630d622901b2d88dfdd17df2eebdafa22be | 9d58caaabce0289738d485135cda2e178575c9a4 | /kotti_bootswatch_theme/tests/test_resources.py | 97392a7f785c0a224f73154dd874981b9e8057c1 | [
"BSD-3-Clause-Modification"
] | permissive | mete0r/kotti_bootswatch_theme | 61202aeccbb729df5380ce540633d9a555225b78 | 4ee6fbf24501b89d4076dbee4323c70c72542e7e | refs/heads/master | 2022-11-24T07:59:07.560943 | 2020-05-05T10:36:56 | 2020-05-05T10:36:56 | 27,492,192 | 0 | 0 | NOASSERTION | 2022-11-22T05:06:19 | 2014-12-03T14:56:56 | CSS | UTF-8 | Python | false | false | 521 | py | # -*- coding: utf-8 -*-
"""
Created on 2014-12-03
:author: mete0r (mete0r@sarangbang.or.kr)
"""
from pytest import raises
def test_model(root, db_session):
from kotti_bootswatch_theme.resources import CustomContent
cc = CustomContent()
assert cc.custom_attribute is None
cc = CustomContent(custom_attribute=u'Foo')
assert cc.custom_attribute == u'Foo'
root['cc'] = cc = CustomContent()
assert cc.name == 'cc'
with raises(TypeError):
cc = CustomContent(doesnotexist=u'Foo')
| [
"mete0r@sarangbang.or.kr"
] | mete0r@sarangbang.or.kr |
27fac4f1aaf8414c571f63b38f3416535871b864 | e7fcc1d64cd95805918ab1b5786bf81a92f973ef | /2020/day06/test_day06.py | dcfa4fa5d4d7f186a72866d92f905fc5c31bff00 | [] | no_license | trolen/advent-of-code | 8145c1e36fea04e53d4b7a885efcc2da71fbfe57 | 0a4e022a6a810d86e044a15036a2f5778f0d38af | refs/heads/master | 2023-02-26T13:11:58.341006 | 2023-02-20T23:22:27 | 2023-02-20T23:22:27 | 54,579,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | #! /usr/bin/env python3
import unittest
import day06
class TestDay06(unittest.TestCase):
def setUp(self):
self.raw_data = [
'abc',
'',
'a',
'b',
'c',
'',
'ab',
'ac',
'',
'a',
'a',
'a',
'a',
'',
'b'
]
self.groups = day06.parse_data(self.raw_data)
def test_unique_chars(self):
self.assertEqual('abc', day06.get_unique_chars(['ab', 'ac']))
def test_common_chars(self):
self.assertEqual('a', day06.get_common_chars(['ab', 'ac']))
def test_part1(self):
self.assertEqual(11, day06.do_part1(self.groups))
def test_part2(self):
self.assertEqual(6, day06.do_part2(self.groups))
if __name__ == '__main__':
unittest.main()
| [
"timothy.rolen@gmail.com"
] | timothy.rolen@gmail.com |
ae840301c016aef5a128d64fcf9aea85c19267c3 | bf70b30f3a9f91c2994dda2f130e73eddf87161d | /ldpred/util.py | c19a9271059ed61303bd3fe1a00ac885697b5004 | [
"MIT"
] | permissive | choishingwan/ldpred | 892fd5fb168fdf3d9ace8ad1ce3dc1cf9c767888 | f7f2648dd7aebe9fcc919875823f379de309896b | refs/heads/master | 2021-01-20T07:50:50.155301 | 2019-02-19T14:14:39 | 2019-02-19T14:14:39 | 90,049,563 | 0 | 0 | MIT | 2019-02-04T16:02:48 | 2017-05-02T15:35:46 | Python | UTF-8 | Python | false | false | 3,632 | py | """
Various general utility functions.
"""
import scipy as sp
from scipy import stats
# LDpred currently ignores the Y and MT chromosomes.
ok_chromosomes = ['%d' % (x) for x in range(1, 23)]
ok_chromosomes.append('X')
chromosomes_list = ['chrom_%s' % (chrom) for chrom in ok_chromosomes]
#Various auxiliary variables
ambig_nts = set([('A', 'T'), ('T', 'A'), ('G', 'C'), ('C', 'G')])
opp_strand_dict = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G'}
valid_nts = set(['A', 'T', 'C', 'G'])
lc_CAPs_dict = {'a': 'A', 'c': 'C', 'g': 'G', 't': 'T'}
# LDpred currently ignores the Y and MT chromosomes.
valid_chromosomes = ['%d' % (x) for x in range(1, 24)]
valid_chromosomes.append('X')
chromosomes_list = ['chrom_%s' % (chrom) for chrom in valid_chromosomes]
#Conversion sizes for strings (necessary for using h5py and python 3)
fids_dtype = '|S64'
iids_dtype = '|S64'
sids_dtype = "|S30"
nts_dtype = "|S1"
sids_u_dtype = '<U30'
nts_u_dtype = '<U1'
def check_chromosomes(missing_chromosomes):
if len(missing_chromosomes) > 0:
print('Ignored chromosomes:', ','.join(list(missing_chromosomes)))
print('Please note that only data on chromosomes 1-23, and X is parsed.')
def calc_auc(y_true, y_hat, show_plot=False):
"""
Calculate the Area Under the Curve (AUC) for a predicted and observed case-control phenotype.
"""
y_true = sp.copy(y_true)
if len(sp.unique(y_true)) == 2:
y_min = y_true.min()
y_max = y_true.max()
if y_min != 0 or y_max != 1:
print('Transforming back to a dichotomous trait')
y_true[y_true == y_min] = 0
y_true[y_true == y_max] = 1
else:
print('Warning: Calculating AUC for a quantitative phenotype.')
y_mean = sp.mean(y_true)
zero_filter = y_true <= y_mean
one_filter = y_true > y_mean
y_true[zero_filter] = 0
y_true[one_filter] = 1
num_cases = sp.sum(y_true == 1)
num_controls = sp.sum(y_true == 0)
assert num_cases + num_controls == len(y_true), 'The phenotype is not defined as expected. It is not binary (0 1 case-control status).'
print('%d cases, %d controls' % (num_cases, num_controls))
num_indivs = float(len(y_true))
tot_num_pos = float(sp.sum(y_true))
tot_num_neg = float(num_indivs - tot_num_pos)
l = y_hat.tolist()
l.sort(reverse=True)
roc_x = []
roc_y = []
auc = 0.0
prev_fpr = 0.0
for thres in l:
thres_filter = y_hat >= thres
y_t = y_true[thres_filter]
n = len(y_t)
tp = sp.sum(y_t)
fp = n - tp
fpr = fp / tot_num_neg
tpr = tp / tot_num_pos
roc_x.append(fpr)
roc_y.append(tpr)
delta_fpr = fpr - prev_fpr
auc += tpr * delta_fpr
prev_fpr = fpr
print('AUC: %0.4f' % auc)
if show_plot:
import pylab
pylab.plot(roc_x, roc_y)
pylab.show()
return auc
def obs_h2_to_liab(R2_osb,K=0.01,P=0.5):
"""
Transformation from observed to liability scale.
Lee et al. AJHG 2011 conversion?
For heritability only
"""
t = stats.norm.ppf(1-K)
z = stats.norm.pdf(t)
c = P*(1-P)*z**2/(K**2*(1-K)**2)
R2_liab = R2_osb/c
return R2_liab
def obs_r2_to_liab(R2_osb,K=0.01,P=0.5):
"""
Lee et al., Gen Epi 2012 conversion
For R2 only
"""
t = stats.norm.ppf(K)
z = stats.norm.pdf(t)
m = z/K
C = (K*(1-K))**2/((z**2)*(P*(1-P)))
d = m*((P-K)/(1-K))
theta =d**2 - d*t
R2_liab_cc = (R2_osb*C)/(1+(R2_osb*C*theta))
return R2_liab_cc
| [
"bjarni.vilhjalmsson@gmail.com"
] | bjarni.vilhjalmsson@gmail.com |
cdb896df7dafbf9b574f7853ffe03b2a0ab849e0 | 5c4cc78698a8cdadb10c45799a67c95ca17a4d5a | /custom_components/usage.py | f93d2655364330efe4fac2599f2b0bc5244848ee | [] | no_license | gitumarkk/dash-custom-components-blog | fb044f14735d686bbf0c3e07b863c0eb39830c6b | 3a94e3fd7e3047eb082be901f2c2962b42b27964 | refs/heads/main | 2023-05-31T06:40:33.337975 | 2021-06-11T06:22:31 | 2021-06-11T06:22:31 | 375,925,178 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | import custom_components
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
app = dash.Dash(__name__)
app.layout = html.Div([
custom_components.MyCustomComponent(
id='input',
value='my-value',
label='my-label'
),
html.Div(id='output')
])
@app.callback(Output('output', 'children'), [Input('input', 'value')])
def display_output(value):
return 'You have entered {}'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)
| [
"gitumarkk@gmail.com"
] | gitumarkk@gmail.com |
9f9452d0846ce2ffe12f31dd25cd16d638492f05 | ddeb775900fc389b2f05b5cc990bf123bd60272f | /Aula17/Ex80.py | eb39c792d6f1130c24605495ffffe703211c28a9 | [] | no_license | rexayt/Meu-Aprendizado-Python | f61e1559c43ff76fb2d10ae1e8634ed82176bedf | 2571d26ab91f45d3440193dda32a487a0ad7bb46 | refs/heads/main | 2023-04-13T15:40:36.056624 | 2021-04-22T18:21:08 | 2021-04-22T18:21:08 | 360,639,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | lista = []
for c in range(0, 5):
n = int(input('Digite um número: '))
if c == 0 or n > lista[-1]:
lista.append(n)
else:
pos = 0
while pos < len(lista):
if n <= lista[pos]:
lista.insert(pos, n)
break
pos+=1
print(lista)
| [
"noreply@github.com"
] | noreply@github.com |
2a9046c5ee3b7dd7ead91f9a86e5601efaf67688 | 15d936f6f300e744dbd4d826a95740b1c155f9da | /libarsdkctrl/python/internal/device_handler.py | 2034195ea3d734a8d8c1158af706d460161dfa14 | [] | no_license | Parrot-Developers/arsdk-ng | 783aff771eccb295f2e5ca7057309b6026b062fd | e30d41eb1c7871139a933a04c953aa284fe61308 | refs/heads/master | 2023-05-06T06:34:40.269861 | 2023-04-19T13:45:04 | 2023-04-19T13:45:04 | 183,210,114 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,171 | py | # Copyright (c) 2020 Parrot Drones SAS
import ctypes
import logging
import libarsdk
from libarsdkctrl_bindings import (
arsdk_device_connect,
arsdk_device_create_cmd_itf,
arsdk_device_disconnect,
arsdk_device_get_info,
struct_arsdk_cmd_itf,
struct_arsdk_cmd_itf_cbs,
struct_arsdk_device_conn_cbs,
struct_arsdk_device_conn_cfg,
struct_arsdk_device_info,
struct_pomp_loop,
)
from .utils import string_cast
class DeviceHandler:
def __init__(self, loop, observer,
connected_cb=None, disconnected_cb=None):
self._log = logging.getLogger("Arsdk.DeviceHandler")
# Recast pomp_loop pointer to prevent ctypes errors
self._loop = ctypes.cast(loop, ctypes.POINTER(struct_pomp_loop))
self._observer = observer
self._connected_user_cb = connected_cb
self._disconnected_user_cb = disconnected_cb
self._device_cbs = struct_arsdk_device_conn_cbs.bind({
"connecting": self._connecting_cb,
"connected": self._connected_cb,
"disconnected": self._disconnected_cb,
"canceled": self._canceled_cb,
"link_status": self._link_status_cb,
})
self._device = None
self._cmd_itf = ctypes.POINTER(struct_arsdk_cmd_itf)()
def add_device(self, device):
# Log device info
info = ctypes.POINTER(struct_arsdk_device_info)()
res = arsdk_device_get_info(device, ctypes.byref(info))
if res < 0:
raise RuntimeError(f"arsdk_device_get_info: {res}")
self._log.info("Discovered device: %s (%s:%d)",
string_cast(info.contents.name),
string_cast(info.contents.addr),
info.contents.port)
# Connect device
cfg = struct_arsdk_device_conn_cfg(
ctypes.create_string_buffer(b"arsdkctrl"), # name
ctypes.create_string_buffer(b"python"), # type
ctypes.create_string_buffer(b""), # id
ctypes.create_string_buffer(b"{}") # json
)
res = arsdk_device_connect(
device, cfg, self._device_cbs, self._loop)
if res < 0:
raise RuntimeError(f"arsdk_device_connect: {res}")
def get_cmd_itf(self):
return self._cmd_itf
def send_command(self, command):
if not isinstance(command, libarsdk.struct_arsdk_cmd):
raise TypeError("DeviceHandler.send(): wrong argument type")
# Need to recast to type from correct python binding
cmd_itf = ctypes.cast(self._cmd_itf, ctypes.POINTER(
libarsdk.struct_arsdk_cmd_itf))
send_status = libarsdk.arsdk_cmd_itf_cmd_send_status_cb_t()
res = libarsdk.arsdk_cmd_itf_send(cmd_itf, command, send_status, None)
if res < 0:
raise RuntimeError(f"arsdk_cmd_itf_send: {res}")
def remove_device(self, device):
res = arsdk_device_disconnect(device)
if res < 0:
raise RuntimeError(f"arsdk_device_disconnect: {res}")
def _connecting_cb(self, device, device_info, user_data):
pass
def _connected_cb(self, device, device_info, user_data):
self._device = device
self._cmd_itf_cbs = struct_arsdk_cmd_itf_cbs.bind({
'dispose': self._dispose_cb,
'recv_cmd': self._recv_cmd_cb,
})
res = arsdk_device_create_cmd_itf(
self._device, self._cmd_itf_cbs, ctypes.pointer(self._cmd_itf))
if res < 0:
raise RuntimeError(f"arsdk_device_create_cmd_itf: {res}")
if self._connected_user_cb:
self._connected_user_cb()
def _disconnected_cb(self, device, device_info, user_data):
self._device = None
if self._disconnected_user_cb:
self._disconnected_user_cb()
def _canceled_cb(self, device, device_info, reason, user_data):
self._device = None
def _link_status_cb(self, device, device_info, status, user_data):
pass
def _recv_cmd_cb(self, itf, command, user_data):
self._observer.notify(command)
def _dispose_cb(self, itf, user_data):
pass
| [
"nicolas.brulez@parrot.com"
] | nicolas.brulez@parrot.com |
54c162b29e9001c19ac7d6dca301deb9f0baaeda | 69e318f2b60175108bc74ee669bfe16287a71cb6 | /plugins/modules/fortios_firewall_vipgrp46.py | c17473a84beae56fcf18f1d94f460da32776abe1 | [] | no_license | chillancezen/ansible-galaxy-fortios-collection | 5268a5fd97fb4594772349b8d89cb818ec54b3bd | 66a331cd4493d1b0f49798d5c2cd6ef5aeba84d3 | refs/heads/master | 2022-04-09T19:20:59.073193 | 2020-03-26T07:17:09 | 2020-03-26T07:17:09 | 250,185,374 | 0 | 0 | null | 2020-03-26T07:06:16 | 2020-03-26T07:06:16 | null | UTF-8 | Python | false | false | 11,486 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vipgrp46
short_description: Configure IPv4 to IPv6 virtual IP groups in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and vipgrp46 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_vipgrp46:
description:
- Configure IPv4 to IPv6 virtual IP groups.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
color:
description:
- Integer value to determine the color of the icon in the GUI (range 1 to 32).
type: int
comments:
description:
- Comment.
type: str
member:
description:
- Member VIP objects of the group (Separate multiple objects with a space).
type: list
suboptions:
name:
description:
- VIP46 name. Source firewall.vip46.name.
required: true
type: str
name:
description:
- VIP46 group name.
required: true
type: str
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPv4 to IPv6 virtual IP groups.
fortios_firewall_vipgrp46:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_vipgrp46:
color: "3"
comments: "<your_own_value>"
member:
-
name: "default_name_6 (source firewall.vip46.name)"
name: "default_name_7"
uuid: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_vipgrp46_data(json):
option_list = ['color', 'comments', 'member',
'name', 'uuid']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_vipgrp46(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_vipgrp46'] and data['firewall_vipgrp46']:
state = data['firewall_vipgrp46']['state']
else:
state = True
firewall_vipgrp46_data = data['firewall_vipgrp46']
filtered_data = underscore_to_hyphen(filter_firewall_vipgrp46_data(firewall_vipgrp46_data))
if state == "present":
return fos.set('firewall',
'vipgrp46',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'vipgrp46',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_vipgrp46']:
resp = firewall_vipgrp46(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_vipgrp46": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"comments": {"required": False, "type": "str"},
"member": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"uuid": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"fshen01@fortinet.com"
] | fshen01@fortinet.com |
79e89da491df1b01cf2db1375aa85bf04472dfce | f29a31354a66798e2c398fc2a01bc285b6e35dfb | /NeuralNetworks/l-IntroToNeuralNetworks/Perceptrons.py | 8b97e96224a7febd95bb5ca02c32f3a2c2cb5e9d | [] | no_license | ajpiter/UdacityDeepLearning | 2fd8b6ba7f29aa03ab9dfdd557dbdcc692e7ada0 | eb343a8be223f4bcc15a87483f7945023c2c9a0e | refs/heads/master | 2021-01-02T09:00:34.221125 | 2017-08-28T16:32:45 | 2017-08-28T16:32:45 | 99,121,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | #Perceptrons
#Also known as neurons
#Inputs
#Weights
#Start out as random values, then as the neural network learns more about the input data and results the network adjusts the weights
#The process of adjusting the weights is called training the neural network
#The higher the weight the more important it is in determining the output
# 'W' represents a matrix of weights
# 'w' represents an indivdual weight
#Linear combination
#Multiple weights times inputs and sum them
#Start at i = 1
#Evaluate (w1 * x1) and remember the results
#move to i = 2
#Evaluate (w2 * x2) and add these results to (w1 * x1)
#Continue repeating that process until i = mi where m is the number of inputs
#Example, if we had two inputs, (w1 * x1) + (w2 * x2)
#Output signal
#Done by feeding the linear combination into an activation function
#Activation functions are functions that decide, given the inputs to the node what should be the nodes outputs.
#The output layer is referred to as activations
#Heaviside step function
#An activation function that returns a 0 if the linear combination is less than 0.
#It returns a 1 if the linear combination is positive or equal to zero.
#Think of 1 as yes and 0 as no or True/False
#Bias
#one way to get a function to return 1 for more inputs is to add a value to the results of the linear combination
#Bias is represented in equations as b
#Similar to weights the bias can be updated and changed by the neural network durning training
#weights and bias are initially assigned a random value and then they are updated using a learning algorithm like gradient descent.
#The weights and biases change so that the next training example is more accurate and patterns are learned by the neural network.
| [
"noreply@github.com"
] | noreply@github.com |
930727de4fc9a819568fa813409d808229907aeb | 3da246077831519b40aa45ce6bb6ac63fc1a17fe | /models/regularizers.py | e6a7729c1644ee207936c4e77bc1e6134614ea4c | [] | no_license | jamesoneill12/SupervisedMetaEmbedding | b4d9728ce501e4661f1d9f9191affb7dbe8daab3 | 9d379d7bb9049e475bffb78f2e1df2d0ed72684d | refs/heads/master | 2022-11-12T23:28:17.058219 | 2020-07-04T00:47:18 | 2020-07-04T00:47:18 | 277,012,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,967 | py | import torch
from torch.nn import Parameter
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
"""module, weights, dropout=0, variational=False"""
def dropconnect(module, weights, p=None, dim=None, method='standard'):
"""module, weights, dropout=0, variational=False"""
if method == 'standard':
return DropConnect(module, weights, p, method)
elif method == 'gaussian':
return GaussianDropConnect(p/(1-p))
elif method == 'variational':
return VariationalDropConnect(p/(1-p), dim)
elif method == 'concrete':
# takes layer, input_shape
return ConcreteDropConnect
class DropConnect(torch.nn.Module):
def __init__(self, module, weights, dropout=0, method='standard'):
super(DropConnect, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self.method = method
self._setup()
def widget_demagnetizer_y2k_edition(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
# It must be a function rather than a lambda as otherwise pickling explodes
# We can't write boring code though, so ... WIDGET DEMAGNETIZER Y2K EDITION!
# (╯°□°)╯︵ ┻━┻
return
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition
for name_w in self.weights:
print('Applying weight drop of {} to {}'.format(self.dropout, name_w))
w = getattr(self.module, name_w)
del self.module._parameters[name_w]
self.module.register_parameter(name_w + '_raw', Parameter(w.data))
def _setweights(self):
for name_w in self.weights:
raw_w = getattr(self.module, name_w + '_raw')
w = None
if self.method == 'variational':
mask = torch.autograd.Variable(torch.ones(raw_w.size(0), 1))
if raw_w.is_cuda: mask = mask.cuda()
mask = torch.nn.functional.dropout(mask, p=self.dropout, training=True)
w = mask.expand_as(raw_w) * raw_w
elif self.method == 'concrete':
pass
elif self.method == 'locked':
pass
elif self.method == 'curriculum':
pass
else:
w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)
setattr(self.module, name_w, w)
def forward(self, *args):
self._setweights()
return self.module.forward(*args)
class LockedDropConnect(nn.Module):
"""https://github.com/j-min/Dropouts/blob/master/Gaussian_Variational_Dropout.ipynb"""
def __init__(self):
super().__init__()
def forward(self, x, dropout=0.5):
if not self.training or not dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
mask = Variable(m, requires_grad=False) / (1 - dropout)
mask = mask.expand_as(x)
return mask * x
class GaussianDropConnect(nn.Module):
def __init__(self, alpha=1.0):
super(GaussianDropConnect, self).__init__()
self.alpha = torch.Tensor([alpha])
def forward(self, x):
"""
Sample noise e ~ N(1, alpha)
Multiply noise h = h_ * e
"""
if self.train():
# N(1, alpha)
epsilon = torch.randn(x.size()) * self.alpha + 1
epsilon = Variable(epsilon)
if x.is_cuda:
epsilon = epsilon.cuda()
return x * epsilon
else:
return x
class VariationalDropConnect(nn.Module):
def __init__(self, alpha=1.0, dim=None):
super(VariationalDropConnect, self).__init__()
self.dim = dim
self.max_alpha = alpha
# Initial alpha
log_alpha = (torch.ones(dim) * alpha).log()
self.log_alpha = nn.Parameter(log_alpha)
def kl(self):
c1 = 1.16145124
c2 = -1.50204118
c3 = 0.58629921
alpha = self.log_alpha.exp()
negative_kl = 0.5 * self.log_alpha + c1 * alpha + c2 * alpha ** 2 + c3 * alpha ** 3
kl = -negative_kl
return kl.mean()
def forward(self, x):
"""
Sample noise e ~ N(1, alpha)
Multiply noise h = h_ * e
"""
if self.train():
# N(0,1)
epsilon = Variable(torch.randn(x.size()))
if x.is_cuda:
epsilon = epsilon.cuda()
# Clip alpha
self.log_alpha.data = torch.clamp(self.log_alpha.data, max=self.max_alpha)
alpha = self.log_alpha.exp()
# N(1, alpha)
epsilon = epsilon * alpha
return x * epsilon
else:
return x
class ConcreteDropConnect(nn.Module):
def __init__(self, layer, input_shape, weight_regularizer=1e-6,
dropout_regularizer=1e-5, init_min=0.1, init_max=0.1):
super(ConcreteDropConnect, self).__init__()
# Post drop out layer
self.layer = layer
# Input dim for regularisation scaling
self.input_dim = np.prod(input_shape[1:])
# Regularisation hyper-parameters
self.weight_regularizer = weight_regularizer
self.dropout_regularizer = dropout_regularizer
# Initialise p_logit
init_min = np.log(init_min) - np.log(1. - init_min)
init_max = np.log(init_max) - np.log(1. - init_max)
self.p_logit = nn.Parameter(torch.Tensor(1))
nn.init.uniform(self.p_logit, a=init_min, b=init_max)
def forward(self, x):
return self.layer(self._concrete_dropout(x))
def regularisation(self):
"""Computes weights and dropout regularisation for the layer, has to be
extracted for each layer within the model and added to the total loss
"""
weights_regularizer = self.weight_regularizer * self.sum_n_square() / (1 - self.p)
dropout_regularizer = self.p * torch.log(self.p)
dropout_regularizer += (1. - self.p) * torch.log(1. - self.p)
dropout_regularizer *= self.dropout_regularizer * self.input_dim
regularizer = weights_regularizer + dropout_regularizer
return regularizer
def _concrete_dropout(self, x):
"""Forward pass for dropout layer
"""
eps = 1e-7
temp = 0.1
self.p = nn.functional.sigmoid(self.p_logit)
# Check if batch size is the same as unif_noise, if not take care
unif_noise = Variable(torch.FloatTensor(np.random.uniform(size=tuple(x.size())))).cuda()
drop_prob = (torch.log(self.p + eps)
- torch.log(1 - self.p + eps)
+ torch.log(unif_noise + eps)
- torch.log(1 - unif_noise + eps))
drop_prob = nn.functional.sigmoid(drop_prob / temp)
random_tensor = 1 - drop_prob
retain_prob = 1 - self.p
x = torch.mul(x, random_tensor)
x /= retain_prob
return x
def sum_n_square(self):
"""Helper function for paramater regularisation
"""
sum_of_square = 0
for param in self.layer.parameters():
sum_of_square += torch.sum(torch.pow(param, 2))
return sum_of_square
class Linear_relu(nn.Module):
def __init__(self, inp, out):
super(Linear_relu, self).__init__()
self.model = nn.Sequential(nn.Linear(inp, out), nn.ReLU())
def forward(self, x):
return self.model(x)
class CurriculumDropConnect(nn.Module):
"""
:param
gamma : temperature I think ??
p : scheduled probability throughout training, reust ss_prob func
"""
def __init__(self):
super(CurriculumDropConnect, self).__init__()
def forward(self, x, gamma, p):
return (1.-p) * np.exp(-gamma*x) + p
def get_dropout(drop_position, drop_rate, drop_dim, drop_method, fixed_dropout=False):
if drop_position == 1 or drop_position == 3:
drop_in = dropout(drop_rate, drop_dim,
drop_method, fixed=fixed_dropout)
else:
drop_in = False
if drop_position == 2 or drop_position == 3:
drop_out = dropout(drop_rate, drop_dim,
drop_method, fixed=fixed_dropout)
else:
drop_out = False
return drop_in, drop_out
def dropout(p=None, dim=None, method='standard', fixed=False):
if method == 'standard':
return LockedDropout(p) if fixed else nn.Dropout(p)
elif method == 'gaussian':
return GaussianDropout(p/(1-p), fixed=fixed)
elif method == 'locked':
return LockedDropout(p)
elif method == 'variational':
"""This is specifically gaussian variational dropout
and doesn't converge for either fixed time steps or non-fixed"""
return VariationalDropout(p/(1-p), dim, locked=fixed)
elif method == 'concrete':
# takes layer, input_shape
return ConcreteDropout
# elif method == 'zoneout':
# return Zoneout(p, fixed)
elif method == 'curriculum':
"""Not required, can just change nn.Dropout() param p"""
# return CurriculumDropout()
return nn.Dropout(p)
class LockedDropout(nn.Module):
def __init__(self, dropout=0.5):
super().__init__()
self.p = dropout
def forward(self, x, p=None):
if p is not None:
self.p = p
if not self.training or not p:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.p)
mask = Variable(m, requires_grad=False) / (1 - self.p)
mask = mask.expand_as(x)
return mask * x
class GaussianDropout(nn.Module):
def __init__(self, alpha=1.0, fixed=False):
super(GaussianDropout, self).__init__()
self.alpha = torch.Tensor([alpha])
self.fixed = fixed
def forward(self, x):
"""
Sample noise e ~ N(1, alpha)
Multiply noise h = h_ * e
"""
if self.train():
# N(1, alpha)
if self.fixed:
epsilon = torch.randn((1, x.size(1), x.size(2))) * self.alpha + 1
else:
epsilon = torch.randn(x.size()) * self.alpha + 1
epsilon = Variable(epsilon)
if x.is_cuda:
epsilon = epsilon.cuda()
return x * epsilon
else:
return x
class VariationalDropout(nn.Module):
"""
Variational Gaussian Dropout is not Bayesian so read this paper:
https://arxiv.org/abs/1711.02989
"""
# max alpha is used for clamping and should be small
def __init__(self, alpha=0.01, dim=None, locked=True):
super(VariationalDropout, self).__init__()
self.dim = dim
self.max_alpha = alpha
self.locked = locked
# Initial alpha
log_alpha = (torch.ones(dim) * alpha).log()
self.log_alpha = nn.Parameter(log_alpha)
def kl(self):
c1 = 1.16145124
c2 = -1.50204118
c3 = 0.58629921
alpha = self.log_alpha.exp()
negative_kl = 0.5 * self.log_alpha + c1 * alpha + c2 * alpha ** 2 + c3 * alpha ** 3
kl = -negative_kl
return kl.mean()
def forward(self, x):
"""Sample noise e ~ N(1, alpha)
Multiply noise h = h_ * e"""
if self.train():
# N(0,1)
if self.locked:
epsilon = Variable(torch.randn(size=(x.size(0), x.size(2))))
epsilon = torch.cat([epsilon] * x.size(1)).view(x.size())
else:
epsilon = Variable(torch.randn(x.size()))
if x.is_cuda:
epsilon = epsilon.cuda()
# Clip alpha
self.log_alpha.data = torch.clamp(self.log_alpha.data, max=self.max_alpha)
alpha = self.log_alpha.exp()
# N(1, alpha)
epsilon = epsilon * alpha
return x * epsilon
else:
return x
"""https://github.com/yaringal/ConcreteDropout/blob/master/concrete-dropout-pytorch.ipynb"""
class ConcreteDropout(nn.Module):
def __init__(self, layer, input_shape, weight_regularizer=1e-6, locked = True,
dropout_regularizer=1e-5, init_min=0.1, init_max=0.1):
super(ConcreteDropout, self).__init__()
# Post drop out layer
self.layer = layer
# Input dim for regularisation scaling
self.input_dim = np.prod(input_shape[1:])
# Regularisation hyper-parameters
self.weight_regularizer = weight_regularizer
self.dropout_regularizer = dropout_regularizer
self.locked = locked
# Initialise p_logit
init_min = np.log(init_min) - np.log(1. - init_min)
init_max = np.log(init_max) - np.log(1. - init_max)
self.p_logit = nn.Parameter(torch.Tensor(1))
nn.init.uniform(self.p_logit, a=init_min, b=init_max)
def forward(self, x):
return self.layer(self._concrete_dropout(x))
def regularisation(self):
"""Computes weights and dropout regularisation for the layer, has to be
extracted for each layer within the model and added to the total loss
"""
weights_regularizer = self.weight_regularizer * self.sum_n_square() / (1 - self.p)
dropout_regularizer = self.p * torch.log(self.p)
dropout_regularizer += (1. - self.p) * torch.log(1. - self.p)
dropout_regularizer *= self.dropout_regularizer * self.input_dim
regularizer = weights_regularizer + dropout_regularizer
return regularizer
def _concrete_dropout(self, x):
"""Forward pass for dropout layer
"""
eps = 1e-7
temp = 0.1
self.p = nn.functional.sigmoid(self.p_logit)
# Check if batch size is the same as unif_noise, if not take care
if self.locked:
noise = np.random.uniform(size=(x.size(0), x.size(2)))
noise = np.repeat(noise[:, np.newaxis, :], x.size(1), axis=1)
else:
noise = np.random.uniform(size=tuple(x.size()))
unif_noise = Variable(torch.FloatTensor(noise)).cuda()
drop_prob = (torch.log(self.p + eps)
- torch.log(1 - self.p + eps)
+ torch.log(unif_noise + eps)
- torch.log(1 - unif_noise + eps))
drop_prob = nn.functional.sigmoid(drop_prob / temp)
random_tensor = 1 - drop_prob
retain_prob = 1 - self.p
x = torch.mul(x, random_tensor)
x /= retain_prob
return x
def sum_n_square(self):
"""Helper function for paramater regularisation
"""
sum_of_square = 0
for param in self.layer.parameters():
sum_of_square += torch.sum(torch.pow(param, 2))
return sum_of_square
class Linear_relu(nn.Module):
def __init__(self, inp, out):
super(Linear_relu, self).__init__()
self.model = nn.Sequential(nn.Linear(inp, out), nn.ReLU())
def forward(self, x):
return self.model(x)
class CurriculumDropout(nn.Module):
"""
:param
gamma : temperature
p : scheduled probability throughout training, reust ss_prob func
"""
def __init__(self):
super(CurriculumDropout, self).__init__()
def forward(self, x, gamma, p):
if self.train():
return (1.-p) * np.exp(-gamma * x) + p
else:
return x
def show_drop_probs(model, dropout_position):
if dropout_position == 1:
print("drop-in {}".format(model.drop_in.p))
elif dropout_position == 2:
print("drop-out {}".format(model.drop_out.p))
elif dropout_position == 3:
print("drop-in {} \t drop-out {}".format(model.drop_in.p, model.drop_out.p)) | [
"james.oneill@insight-centre.org"
] | james.oneill@insight-centre.org |
b31b2e1e03da902449ac1d6c84e33183f1d07823 | 7e372094fde657891fdfad0e1fe2f3d9693bdf64 | /instapi/exceptions.py | 097eba4057636202fd4eb326edc7e7e743aaa204 | [
"MIT"
] | permissive | uriyyo/instapi | b92c95a8adc2cdb73fc563649d3d14752c1e42c4 | f15b45727a08fd9b203c71b2a8c9d103478a50dc | refs/heads/develop | 2023-06-30T17:46:25.307092 | 2023-06-26T08:43:57 | 2023-06-26T08:43:57 | 203,244,044 | 37 | 4 | MIT | 2023-09-08T03:08:27 | 2019-08-19T20:17:36 | Python | UTF-8 | Python | false | false | 100 | py | class ClientNotInitedException(Exception):
pass
__all__ = [
"ClientNotInitedException",
]
| [
"1998uriyyo@gmail.com"
] | 1998uriyyo@gmail.com |
91df374bba45a92207655510d73d15b4b347d239 | 1b77a59afaf8f525160de213e902357ba543ea14 | /demo/webSpider/web_spider.py | 4c84af38f1eb5bef81082c57146dc6997539fe17 | [] | no_license | Miracleslop/python_daily | 8e7711194b96538144581163f4cc3f09f0560638 | 11b66dff63dad01bea858aadc828e450e5470cc4 | refs/heads/master | 2020-03-19T01:07:41.982764 | 2018-09-21T10:01:10 | 2018-09-21T10:01:10 | 135,524,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,858 | py | from urllib import request
from urllib import parse
from urllib import error
from commun.logger import clog
from demo.webSpider.htmlity import HtmlTagStart
from demo.webSpider.htmlity import HtmlTagEnd
import io
lg = clog.Logger("web_spider")
class FilterStrategy(object):
def __init__(self, text: str):
"""
:param text: 待过滤的文本
"""
self.__text = text
self.__len = len(text)
# 过滤选项,key统一为前4个字符,value为HtmlTag的子类对象
self.__switch = {
'<scr': HtmlTagStart('<script', '</script>'),
'<!--': HtmlTagEnd('<!--', '-->'),
'<lin': HtmlTagEnd('<link', '/>'),
'<cod': HtmlTagStart('<code', '</code>')
}
@clog.log('filter script content ! ', lg)
def filter(self):
"""
过滤主体
:return: 返回过滤后的文本:str
"""
# 存储过滤后的文本
io_text = io.StringIO()
# 记录需要跳过部分的起始id
old_i = 0
# 遍历文本的下标
i = 0
while i < self.__len:
# 判断是否是过滤文本的开头
if self.__text[i] == '<':
# 取出长度为4的字符串判断是否存在于过滤选项中
sel = self.__switch.get(self.__text[i:i + 4])
if sel is not None:
sel_len = sel.start_len
if self.__text[i:i + sel_len] == sel.start_sign:
# 存在过滤选项中,开始过滤
# 存储过滤文本之前的字符串内容到内存中
io_text.write(self.__text[old_i:i])
# i调转到<script后面第一个字符的下标
i += sel_len
# 开始遍历直到结束标记为止
i = sel.skip_content(i, self.__text)
# 此时i已经跳到结束标记的后一个字符,并将此处记为old_i
old_i = i
# 此时i已经在新的字符上,所以直接continue
continue
i += 1
res_text = io_text.getvalue()
io_text.close()
return res_text
class Demo(object):
@clog.log('downloading...', lg)
def _load_page(self, req_url: str):
"""
加载页面,爬去返回页面文本并返回
:param req_url: 请求
:return: 返回解码后的文本
"""
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134 "
}
rqs = request.Request(req_url, headers=head)
rqs.add_header("Connection", "keep-alive")
rps = request.urlopen(rqs)
html_byte = rps.read()
# 解析读取后的字节文件为字符串
html_str = html_byte.decode('utf-8')
return html_str
@clog.log('writing...', lg)
def _write_file(self, content: str, path):
"""
作用:把str写入相应路径的文件
:param content:需要写入的str
:param path: 写入文件的路径
:return:写入结束不返回值
"""
with open(path, 'w') as f:
f.write(content)
@clog.log('spidering...', lg)
def tieba_spider(self, req_url, begin_page, end_page):
"""
作用:负责处理url,分配每个url去发送请求
:param req_url: 需要处理的第一个url
:param begin_page: 爬虫执行的起始页面
:param end_page: 爬虫执行的截止页面
:return:
"""
for page in range(begin_page, end_page + 1):
pn = (page - 1) * 50
filename = "page " + str(page) + ".html"
full_url = '%s&pn=%d' % (req_url, pn)
# 爬去指定url的页面,并返回未解析的html信息
html_str = self._load_page(full_url)
# 创建过滤对象
fs = FilterStrategy(html_str)
# 过滤并将返回值转成字符串
html_fil = str(fs.filter())
# 将过滤后的内容写入文本文件
self._write_file(html_fil, filename)
if __name__ == "__main__":
kw = "lol"
beginPage = 1
endPage = 1
url = "http://tieba.baidu.com/f?"
key = parse.urlencode({"kw": kw})
url += str(key)
try:
demo = Demo()
demo.tieba_spider(url, beginPage, endPage)
except error.ContentTooShortError:
lg.error('downloaded size does not match content-length')
except error.HTTPError:
lg.error('occurs HTTP error')
except error.URLError:
lg.error('sub-type of OSError')
| [
"windous999@hotmail.com"
] | windous999@hotmail.com |
c6c9c9fd3add6b71c82d0a67b2ad1afdaeaf38b8 | 5ae9609559fd3352e894ebc21e072cbc7e7efa89 | /imsteg.py | 52f545d9b40775a5cd27aa0f997dac4ead519489 | [] | no_license | TheLonelyNull/imsteg | d65d3391b5d2ed79ce67777e1add67ca9d4aa0c4 | 2f6425b8cac02d6cfd2be2a9d7259bb016ee2ba9 | refs/heads/master | 2020-12-05T01:33:28.073073 | 2019-12-24T11:48:32 | 2019-12-24T11:48:32 | 231,968,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,383 | py | """A basic steganography program to hide text in an image file"""
from PIL import Image
import readline
import getpass
from random import seed, random, randint
# TODO make autocomplete work
readline.parse_and_bind("tab: complete")
def get_mode_and_exec():
selection = input("Mode(Select 1 or 2)\n1. Encode\n2. Decode\n")
if (selection == "1"):
image = get_image_from_path()
text = get_text_to_encode()
password = get_password()
out_file = get_output_file_name()
encode(image, text, password)
write_to_output(out_file, image)
elif (selection == "2"):
image = get_image_from_path()
password = get_password()
decode(image, password)
else:
print(selection + " is not a valid option.")
exit(1)
def get_image_from_path():
path = input("Enter the path to image: ")
image = None
try:
# TODO make path point to home directory
image = Image.open(path)
except:
print("Could not find an image at " + path)
exit(1)
return image
def get_text_to_encode():
selection = input("Text to embed(Select 1 or 2)\n1. From file\n2. Manual input\n")
text = ""
if (selection == "1"):
text = get_text_from_file()
elif (selection == "2"):
text = get_text_from_input()
else:
print(selection + " is not a valid option.")
exit(1)
return text
def get_text_from_file():
path = input("Enter the path to file: ")
file = None
try:
# TODO make path point to home directory
file = open(path)
except:
print("Could not find an file at " + path)
exit(1)
text = file.read()
return text
def get_text_from_input():
text = input("Text to encode: ")
if len(text) == 0:
print("No text enter")
exit(1)
return text
def get_password():
password = getpass.getpass("Enter a password(No output will be displayed):")
return password
def get_output_file_name():
output_path = input("Enter a name for the output file: ")
if len(output_path) == 0:
print("No output path specified")
exit(1)
return output_path
def encode(image, text, password):
width, height = image.size
upper_bound = width * height
seed(password)
termination_pixels = [(randint(0, 255), randint(0, 255), randint(0, 255)),
(randint(0, 255), randint(0, 255), randint(0, 255))]
already_seen = set()
pixels = image.load()
# encode text
for char in text:
r_num = randint(0, upper_bound)
while r_num in set():
r_num = randint(0, upper_bound)
already_seen.add(r_num)
x = r_num % width
y = r_num // width
cur_pixel = pixels[x, y]
pixels[x, y] = (ord(char), cur_pixel[1], cur_pixel[2])
# encode termination bits
for i in range(2):
r_num = randint(0, upper_bound)
while r_num in set():
r_num = randint(0, upper_bound)
already_seen.add(r_num)
x = r_num % width
y = r_num // width
pixels[x, y] = termination_pixels[i]
def decode(image, password):
width, height = image.size
upper_bound = width * height
seed(password)
termination_pixels = [(randint(0, 255), randint(0, 255), randint(0, 255)),
(randint(0, 255), randint(0, 255), randint(0, 255))]
already_seen = set()
pixels = image.load()
prev_pixel = None
cur_pixel = None
count = 0
message = []
while prev_pixel != termination_pixels[0] and cur_pixel != termination_pixels[1] and count < upper_bound:
r_num = randint(0, upper_bound)
count += 1
while r_num in set():
count += 1
r_num = randint(0, upper_bound)
already_seen.add(r_num)
x = r_num % width
y = r_num // width
prev_pixel = cur_pixel
cur_pixel = pixels[x, y]
message.append(chr(pixels[x, y][0]))
outstring = ""
for c in message[:-2]:
outstring += c
print(outstring)
def write_to_output(output_file_name, encoded_image):
# todo make sure that no compression happens when the file is saved. Probably needs modification of library code.
encoded_image.save(output_file_name)
if __name__ == '__main__':
mode = get_mode_and_exec()
| [
"chris.rossouw5@gmail.com"
] | chris.rossouw5@gmail.com |
5c9ae93b8c8b6056f60afc3d451483318bba5335 | 53548996d9dce7f6127126f2df8b1ed43ce96e0a | /tweetsearch.py | 3e450cdf5952a3bcd332b4bb60fca070210bb8bc | [] | no_license | keivahn/W205__Assignment2 | 35eb46cb8a9f772480e9dd82e57e46cf9da09b66 | f5c1bb6abda62547534ca9c1431fe370a8b12e16 | refs/heads/master | 2020-04-20T03:51:26.584638 | 2015-07-11T02:39:38 | 2015-07-11T02:39:38 | 38,150,147 | 0 | 0 | null | 2015-07-11T02:39:38 | 2015-06-27T06:38:41 | null | UTF-8 | Python | false | false | 2,472 | py | # Alex Smith
# MIDS - W205
# This program searches for tweets within a given date range
# for all tweets containing two queries.
import sys # import the system module, read command line arguments
import urllib # import module to interact with web
import datetime # import module to manipulate date and time formats
import tweetacquire # import tweetacquire class to search for tweets
import tweetdictionary # import tweetdictionary class to create a dictionary of our tweets
import tweetcount # import tweetcount class to count the instances of each query
import histogram # import histogram class to create plot of word counts
import tweetS3 # import tweetS3 class to send output to AWS S3
# get today's date and set range of 1 week for pulling relevant tweets
TODAYS_DATE = datetime.date.today()
ACCEPTABLE_RANGE = datetime.timedelta(days=6)
START = TODAYS_DATE - ACCEPTABLE_RANGE
END = TODAYS_DATE
# define two hashtags (queries) that we want to find
QUERY1 = "#NBAFinals2015"
QUERY2 = "#Warriors"
# name the output file for storing our tweets and later retrieval
OUTPUT_FILE = "tweet_output.p"
if __name__ == "__main__":
# check to see if this function is the main function being called
# call our acquire function and use it to search the queries
acquire = tweetacquire.TweetAcquire()
query1 = urllib.quote_plus(QUERY1)
query2 = urllib.quote_plus(QUERY2)
begin = START
end = END
acquire.search(query1,begin,end)
acquire.search(query2,begin,end)
# create a dictionary to store all our tweets by tweet id
mytweetdictionary = tweetdictionary.TweetDictionary().create(query1,query2,begin,end,OUTPUT_FILE)
# call our count function on our dictionary to count the number
# of instances of each query alone and the number of instances of
# both queries together; we use non-urllib queries because when we are
# searching the original text
tweetcount.TweetCount().count(mytweetdictionary,QUERY1,QUERY2)
# create a histogram of words, write each word and its frequency
# to a csv file; creates 3 csv files, one for tweets with only the
# first query, one for tweets with only the second query, and one
# for tweets with both queries
histogram.Histogram().create(mytweetdictionary,QUERY1,QUERY2)
# send tweet output to Amazon S3 for others to use
tweetS3.TweetS3().upload(OUTPUT_FILE)
| [
"Alex@Alexs-MacBook-Pro.local"
] | Alex@Alexs-MacBook-Pro.local |
cbf272103a5b17ce360b512e13f10fb16434d0cb | 8b82bb7488961596a5f868c96ac61a5e4ca05c42 | /max_value.py | 01d0aad6279fa48a1bd0f51e705a10750ac9b837 | [] | no_license | ashasuj/Python-newby | cb400bb00cfdba04891fdd339fc771b83993182f | 6f907d93ba2610bf4edc7c9f3f1feca76d9096e7 | refs/heads/master | 2020-04-09T22:17:13.304147 | 2019-11-24T01:15:11 | 2019-11-24T01:15:11 | 160,624,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | numbers = [22,43,14,10,1]
max_value = numbers[0]
for item in numbers :
if item > max_value :
max_value = item
print(max_value)
| [
"noreply@github.com"
] | noreply@github.com |
6e8afb0f588cd9bd417b3f790e1acf42db700d9e | 22f83d2dd088b31ce32a394cfcf5013ad72e2cbc | /网络爬虫/ip池/Improve/DataBase.py | a9e131e995a27ca1067bf249184d8bb28eba40d1 | [] | no_license | Lawlighty/Python | 375908669373be2ffb38c148f6ba523058dd74b8 | 757261ab74e5aac14a5e29a7781b7581cfd223b9 | refs/heads/master | 2020-05-04T16:39:39.640378 | 2019-11-16T04:37:11 | 2019-11-16T04:37:11 | 179,284,076 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,382 | py | import config
import pymysql
#数据库操作
#初始化
def init():
try:
db = pymysql.connect(config.mysql_localhost, config.mysql_root, config.mysql_psw, config.mysql_dbname )
cursor = db.cursor()
sql = 'CREATE TABLE IF NOT EXISTS {}(ip_port varchar(30) primary key not null)'.format(config.mysql_tableName)
cursor.execute(sql)
except Exception as e:
print(e)
db.rollback()
finally:
db.close()
#插入ip数据
#逐条插入
def insert_ip(ip_port):
try:
db = pymysql.connect(config.mysql_localhost, config.mysql_root, config.mysql_psw, config.mysql_dbname)
cursor = db.cursor()
#忽略数据插入
sql = 'insert ignore into {} values("{}")'.format(config.mysql_tableName, ip_port)
cursor.execute(sql)
db.commit()
except Exception as e:
print(e)
db.rollback()
finally:
db.close()
#列表插入
def insert_ip_list(ip_list):
try:
db = pymysql.connect(config.mysql_localhost, config.mysql_root, config.mysql_psw, config.mysql_dbname)
cursor = db.cursor()
for i in ip_list:
sql = 'REPLACE into {} values("{}")'.format(config.mysql_tableName, i)
cursor.execute(sql)
db.commit()
except Exception as e:
print(e)
db.rollback()
finally:
db.close()
def get_ip_list():
ip_list = []
try:
db = pymysql.connect(config.mysql_localhost, config.mysql_root, config.mysql_psw, config.mysql_dbname)
cursor = db.cursor()
sql = 'select * from {}'.format(config.mysql_tableName)
cursor.execute(sql)
#收全部的返回结果行
result = cursor.fetchall()
for row in result:
ip_list.append(row[0])
except Exception as e:
print(e)
db.rollback()
finally:
db.close()
return ip_list
def dropTable():
try:
db = pymysql.connect(config.mysql_localhost, config.mysql_root, config.mysql_psw, config.mysql_dbname)
cursor = db.cursor()
sql = 'drop table if exists {}'.format(config.mysql_tableName)
cursor.execute(sql)
except Exception as e:
print(e)
db.rollback()
finally:
db.close()
| [
"noreply@github.com"
] | noreply@github.com |
69d50785ab8f1b14cf54f382e8aae15f81e7eab9 | 816f1b2efd6c33da1177694a1bacc65c6dd7309c | /views/predict.py | 9fa1aef52df8f288d1735ae3febd85835c38aa96 | [] | no_license | harbaouiiii/etyp | 7ea2f411100b0f50fb44523cd8445336e3aa4ea4 | 1b0f51caf80005a01d97cf6cc4645d2714352f4f | refs/heads/main | 2023-06-03T00:16:24.021112 | 2021-06-20T16:27:24 | 2021-06-20T16:27:24 | 378,690,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,178 | py | from flask import Flask, render_template, request, jsonify, json, Blueprint, redirect, url_for, session
from flask_sqlalchemy import SQLAlchemy
from wtforms import SelectField, Form
from flask_wtf import FlaskForm
from flask_login import current_user
from views.forms import Form
from models import City, Municipality
import numpy as np
import pickle
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
import sklearn
import joblib
model = pickle.load(open('model2.pkl', 'rb'))
#model = joblib.load('model.joblib')
#sc_X = joblib.load("data_transformer.joblib")
pred = Blueprint('pred', __name__, template_folder='templates')
cities0 = ['Agba',
'Akouda',
'Ariana Ville',
'Aïn Zaghouan',
'Bizerte Nord',
'Borj Cedria',
'Borj Louzir',
'Boumhel',
'Béni Khiar',
'Carthage',
'Centre Urbain Nord',
'Centre Ville - Lafayette',
'Chebba',
'Chotrana',
'Cité El Khadra',
'Cité Olympique',
'Dar Châabane El Fehri',
'Denden',
'Djebel Jelloud',
'Djerba - Midoun',
'Djerba-Houmt Souk',
'Douar Hicher',
'El Haouaria',
'El Mida',
'El Mourouj',
'El Omrane supérieur',
'El Ouardia',
'Ennasr',
'Ettadhamen',
'Ettahrir',
'Ezzahra',
'Ezzouhour',
'Fouchana',
'Gammarth',
'Ghazela',
'Grombalia',
'Hammam Chott',
'Hammam Ghezèze',
'Hammam Lif',
'Hammam Sousse',
'Hammamet',
'Hammamet Centre',
'Hammamet Nord',
'Hammamet Sud',
'Hergla',
'Jardin De Carthage',
"Jardins D'el Menzah",
'Kalaâ Kebira',
'Kalaâ Sghira',
'Kalaât El Andalous',
'Kantaoui',
'Korba',
'Kélibia',
"L'aouina",
'La Goulette',
'La Marsa',
'La Soukra',
'Le Bardo',
'Le Kram',
'Les Berges Du Lac',
"M'saken",
'Mahdia',
'Manar',
'Manouba Ville',
'Medina Jedida',
'Menzah',
'Menzel Temime',
'Mnihla',
'Mohamedia',
'Monastir',
'Mornag',
'Mornaguia',
'Mrezga',
'Mutuelleville',
'Médina',
'Nabeul',
'Oued Ellil',
'Radès',
'Raoued',
'Ras Jebel',
'Sahloul',
'Sidi Daoud',
'Sidi El Béchir',
'Sousse Jawhara',
'Sousse Medina',
'Sousse Riadh',
'Sousse Sidi Abdelhamid',
'Zaouit-Ksibat Thrayett']
cities1 = ['Agba',
'Ain Draham',
'Akouda',
'Ariana Ville',
'Aïn Zaghouan',
'Bekalta',
'Bembla',
'Ben Gardane',
'Bizerte Nord',
'Borj Cedria',
'Borj El Amri',
'Borj Louzir',
'Bou Salem',
'Bouficha',
'Boumhel',
'Béni Khalled',
'Béni Khiar',
'Carthage',
'Centre Urbain Nord',
'Centre Ville - Lafayette',
'Chebba',
'Chotrana',
'Cité El Khadra',
'Dar Châabane El Fehri',
'Denden',
'Djebel Jelloud',
'Djedeida',
'Djerba - Midoun',
'Djerba-Houmt Souk',
'Douar Hicher',
'El Battan',
'El Guettar',
'El Haouaria',
'El Kabaria',
'El Mourouj',
'El Omrane',
'El Omrane supérieur',
'El Ouardia',
'Enfidha',
'Ennasr',
'Ettadhamen',
'Ettahrir',
'Ezzahra',
'Ezzouhour',
'Fouchana',
'Gafsa Nord',
'Gafsa Sud',
'Gammarth',
'Ghar El Melh',
'Grombalia',
'Hammam Chott',
'Hammam Ghezèze',
'Hammam Lif',
'Hammam Sousse',
'Hammamet',
'Hammamet Centre',
'Hammamet Nord',
'Hammamet Sud',
'Hergla',
'Hraïria',
'Jardin De Carthage',
"Jardins D'el Menzah",
'Jemmal',
'Jendouba',
'Kairouan Nord',
'Kairouan Sud',
'Kalaâ Kebira',
'Kalaâ Sghira',
'Kalaât El Andalous',
'Kantaoui',
'Korba',
'Ksar Hellal',
'Ksibet el-Médiouni',
'Ksour Essef',
'Kélibia',
"L'aouina",
'La Goulette',
'La Marsa',
'La Soukra',
'Le Bardo',
'Le Kram',
'Les Berges Du Lac',
"M'saken",
'Mahdia',
'Manar',
'Manouba Ville',
'Mateur',
'Medina Jedida',
'Menzah',
'Menzel Bourguiba',
'Menzel Bouzelfa',
'Menzel Jemil',
'Menzel Temime',
'Mnihla',
'Mohamedia',
'Moknine',
'Monastir',
'Mornag',
'Mornaguia',
'Mrezga',
'Mutuelleville',
'Médenine Nord',
'Médenine Sud',
'Médina',
'Nabeul',
'Nasrallah',
'Oued Ellil',
'Ouerdanine',
'Radès',
'Raoued',
'Ras Jebel',
'Sahline',
'Sahloul',
'Sidi Bou Said',
'Sidi Daoud',
'Sidi El Béchir',
'Sidi Hassine',
'Sidi Thabet',
'Soliman',
'Sousse Jawhara',
'Sousse Medina',
'Sousse Riadh',
'Sousse Sidi Abdelhamid',
'Tabarka',
'Tataouine Nord',
'Tataouine Sud',
'Tebourba',
'Tinja',
'Téboulba',
'Utique',
'Zaouit-Ksibat Thrayett',
'Zarzis',
'Zarzouna']
price_m1 = [[3.0, 2001.0282752929813],
[11.0, 1583.4590601917334],
[21.0, 2103.945814066818],
[39.0, 2177.397335841727],
[40.0, 1013.4458509142054],
[61.0, 3464.285714285714],
[68.0, 2715.7894736842104],
[78.0, 2552.680519809534],
[84.0, 2697.8942652329747],
[88.0, 1775.5050505050503],
[93.0, 1297.5851683802186],
[109.0, 1491.300563236047],
[117.0, 2200.0],
[9.0, 1878.6426930806774],
[14.0, 1946.2264150943397],
[34.0, 1970.03367003367],
[42.0, 1924.7902889754507],
[44.0, 1718.1318681318685],
[50.0, 1317.5335775335775],
[52.0, 1321.2817833507488],
[87.0, 1439.8974190694953],
[94.0, 974.0939742867431],
[97.0, 1617.2144522144522],
[108.0, 1265.76686059039],
[8.0, 1489.9641879081526],
[48.0, 1173.015873015873],
[86.0, 1316.6666666666665],
[89.0, 3000.0],
[91.0, 892.7521755501632],
[110.0, 2849.0873015873017],
[127.0, 2170.7729468599036],
[129.0, 2684.722222222222],
[132.0, 1260.8695652173913],
[31.0, 800.0],
[45.0, 1369.2307692307693],
[46.0, 1101.3976240391335],
[1.0, 418.1818181818182],
[12.0, 740.7407407407408],
[63.0, 1031.25],
[123.0, 1132.3484848484848],
[64.0, 1013.2434745400087],
[65.0, 797.4296536796537],
[105.0, 375.0],
[20.0, 1164.3815201192251],
[73.0, 1220.18341307815],
[83.0, 1595.2802466350854],
[10.0, 1380.7854137447405],
[24.0, 2170.823989419815],
[26.0, 2200.0],
[29.0, 1400.0],
[30.0, 1188.118811881188],
[85.0, 2337.5913398339494],
[98.0, 1824.2926949959794],
[106.0, 1310.8641975308642],
[126.0, 2007.703081232493],
[7.0, 5454.545454545455],
[27.0, 2192.5225411088695],
[28.0, 2537.125850340136],
[101.0, 1102.9411764705883],
[102.0, 968.915950479314],
[131.0, 2016.5199161425578],
[5.0, 682.3529411764706],
[6.0, 803.5714285714286],
[62.0, 1592.5925925925928],
[71.0, 1776.0141093474429],
[72.0, 2250.0],
[95.0, 1536.5461847389558],
[96.0, 2222.042520855991],
[107.0, 1600.0],
[111.0, 1.6666666666666667],
[128.0, 2173.913043478261],
[15.0, 910.8698992133726],
[16.0, 1284.6687030075188],
[23.0, 1526.9138755980864],
[32.0, 1230.3485162180816],
[49.0, 1561.9622777641644],
[51.0, 1427.8734801122835],
[54.0, 2267.875084816535],
[55.0, 2115.951319212189],
[56.0, 2228.5930735930733],
[57.0, 2199.551414768806],
[74.0, 2270.392557295212],
[70.0, 1690.2702702702704],
[90.0, 1385.8823529411766],
[92.0, 1385.2810457516339],
[99.0, 2132.0502645502647],
[104.0, 1676.1867442901926],
[118.0, 1468.3023040866176],
[2.0, 1629.5164016200604],
[13.0, 1500.0],
[38.0, 622.2222222222222],
[53.0, 1448.516169446402],
[58.0, 3153.3521303258144],
[66.0, 1692.3304473304474],
[67.0, 1789.8601398601397],
[69.0, 2539.815099890531],
[82.0, 1760.5743243243244],
[112.0, 2475.9127214249165],
[119.0, 1913.175853018373],
[120.0, 2072.840786048333],
[121.0, 1213.5381285381286],
[122.0, 1441.6267942583731],
[130.0, 843.121868552903],
[124.0, 1088.4496753246754],
[125.0, 853.2051282051282],
[0.0, 764.7058823529412],
[4.0, 2160.2916235558723],
[17.0, 3119.3608039938317],
[18.0, 3448.2758620689656],
[19.0, 1653.6675771447785],
[22.0, 1984.1269841269843],
[25.0, 1369.1487589538565],
[33.0, 1266.6666666666667],
[35.0, 1243.9977282745058],
[36.0, 1793.3035714285716],
[37.0, 1102.2222222222222],
[41.0, 1058.3333333333335],
[43.0, 1263.4188034188032],
[47.0, 1779.69696969697],
[59.0, 1285.0678733031675],
[60.0, 2641.6666666666665],
[75.0, 2862.2784262369755],
[76.0, 2272.7272727272725],
[77.0, 3129.5168067226887],
[79.0, 2169.68524251806],
[80.0, 2709.3253968253966],
[81.0, 2314.814814814815],
[103.0, 1587.9407051282053],
[100.0, 1666.6666666666667],
[113.0, 2000.0],
[114.0, 1524.8419150858174],
[115.0, 1745.5555555555554],
[116.0, 1734.6642246642248]]
price_m0 = [[2.0, 2177.485928705441],
[6.0, 1956.4183964183965],
[13.0, 2284.9325337331334],
[27.0, 2568.5892468313828],
[28.0, 930.2325581395348],
[34.0, 1657.7665010435467],
[46.0, 2903.9523172053296],
[49.0, 1916.6666666666665],
[56.0, 2433.8275973204727],
[62.0, 2086.2107803700724],
[65.0, 2387.6906262419807],
[67.0, 1232.528659611993],
[78.0, 1509.501174489208],
[5.0, 1709.1715793358671],
[7.0, 1950.6336320989594],
[24.0, 1767.6495577722476],
[30.0, 1629.8114334071288],
[32.0, 1132.5046468401488],
[36.0, 1563.270666875279],
[38.0, 1294.4444444444443],
[64.0, 1617.869766768072],
[68.0, 1040.0],
[70.0, 1534.920634920635],
[77.0, 1512.549588776004],
[4.0, 1951.654795581599],
[79.0, 1368.0225054448656],
[12.0, 1290.3225806451612],
[61.0, 1145.1670682258919],
[17.0, 1651.600592390066],
[21.0, 1135.1362683438156],
[63.0, 1717.6909074574328],
[71.0, 1700.4856988667027],
[76.0, 1075.2314814814815],
[19.0, 1715.1651651651648],
[20.0, 2089.9150268336316],
[69.0, 1310.3889106583072],
[8.0, 1075.619351408825],
[16.0, 1835.5978260869567],
[22.0, 1375.9398496240601],
[23.0, 1500.0],
[35.0, 1500.0],
[37.0, 1898.8095238095239],
[40.0, 2434.032258823164],
[41.0, 2616.2190442880983],
[42.0, 2695.653565826853],
[43.0, 2423.745372274784],
[52.0, 1219.9366515837105],
[51.0, 1231.3229340319124],
[66.0, 1500.0],
[72.0, 2243.5195106937117],
[75.0, 2505.7875886844267],
[1.0, 2230.5059523809523],
[39.0, 1634.9772834861087],
[44.0, 2301.7857142857147],
[47.0, 1000.0],
[48.0, 1969.6969696969697],
[50.0, 2093.965334741536],
[60.0, 1000.0],
[80.0, 2132.540373170058],
[83.0, 1999.4467979942588],
[84.0, 2295.959595959596],
[85.0, 2005.9537935197654],
[86.0, 1022.6034858387799],
[87.0, 1500.0],
[0.0, 1340.088996763754],
[3.0, 2290.1785714285716],
[9.0, 2411.098329303308],
[10.0, 2755.9523809523807],
[11.0, 1819.873921999064],
[14.0, 1594.6759259259259],
[15.0, 1589.9585921325051],
[18.0, 941.1764705882352],
[25.0, 1841.9753086419753],
[26.0, 1202.0531400966183],
[29.0, 1635.758047262472],
[31.0, 923.0769230769231],
[33.0, 1810.441176470588],
[45.0, 3077.772461456672],
[53.0, 1844.9460286609913],
[54.0, 2601.3888888888887],
[55.0, 2730.526028170007],
[57.0, 2271.0597826086955],
[58.0, 1681.950397616516],
[59.0, 3402.377430044415],
[74.0, 1969.1876750700283],
[73.0, 1438.8888888888887],
[81.0, 2551.7905773420475],
[82.0, 1771.7640692640693]]
@pred.route('/predict', methods=['GET', 'POST'])
def predict():
if 'logged_in' in session:
if not session['logged_in']:
session['predict'] = True
return redirect(url_for('auth.login'))
else:
form = Form()
form.municipality.choices = [
(municipality.id_mun, municipality.municipality) for municipality in Municipality.query.all()]
form.municipality.default = '1'
if request.method == 'POST':
municipality = Municipality.query.filter_by(
municipality=form.municipality.data).first()
cit = City.query.filter_by(city=form.city.data).first()
city = str(cit)[6:-1]
area = int(form.area.data)
roomNumber = int(form.roomNumber.data)
category = int(form.category.data)
if(category == 1):
if city in cities0:
city_pos = cities0.index(city)
else:
city_pos = cities1.index(city)
for e in price_m0:
if int(e[0]) == city_pos:
pr_m = e[1]
else:
if city in cities1:
city_pos = cities1.index(city)
else:
city_pos = cities0.index(city)
for e in price_m1:
if int(e[0]) == city_pos:
pr_m = e[1]
int_features = [[city_pos, category, roomNumber, area, pr_m]]
prediction = model.predict(int_features)
return render_template('price.html', price=int(prediction[0]), city=city, category=category, room=roomNumber, area=area)
else:
session['predict'] = True
return redirect(url_for('auth.login'))
return render_template('predict.html', form=form)
@pred.route('/city/<get_city>')
def citybymunicipality(get_city):
city = City.query.filter_by(id_mun=get_city).all()
cityArray = []
for c in city:
cityObj = {}
cityObj['name'] = c.city
cityArray.append(cityObj)
return jsonify({'citymunicipality': cityArray})
| [
"harbaouiiii"
] | harbaouiiii |
8c98bff38d91ed5a44a71a4aa02dfad006406174 | 049c6607e64e89bb4ddfe43d86a4bd41c9f1b1e1 | /overay_noscale.py | b81839eeee1f8a839c49f43b1b00ba2c94595d62 | [] | no_license | sverrirbjarna/Python | 4cf5058f3895f12eb1239d036f36ec8317048662 | 0f1500b9939a395f7c7240d037ef16dd67870301 | refs/heads/master | 2023-02-12T10:30:17.629585 | 2021-01-11T02:22:02 | 2021-01-11T02:22:02 | 252,490,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | from __future__ import print_function
from PIL import Image
import cv2 as cv
alpha = 0.2
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
print(''' Simple Linear Blender
-----------------------
* Enter alpha [0.0-1.0]: ''')
#input_alpha = float(raw_input().strip())
#if 0 <= alpha <= 1:
# alpha = input_alpha
# [load]
src1 = cv.imread(cv.samples.findFile('/home/sverrir/Documents/Python/rgb_26.png'))
src2 = cv.imread(cv.samples.findFile('/home/sverrir/Documents/Python/depth_26.png'))
# [load]
if src1 is None:
print("Error loading src1")
exit(-1)
elif src2 is None:
print("Error loading src2")
exit(-1)
# [blend_images]
beta = (1.0 - alpha)
dst = cv.addWeighted(src1, alpha, src2, beta, 0.0)
# [blend_images]
# [display]
cv.imshow('dst', dst)
cv.waitKey(0)
# [display]
cv.destroyAllWindows()
| [
"sverrirbjarnason@gmail.com"
] | sverrirbjarnason@gmail.com |
62011be163fcd16e0c89f50f2b772060031a4830 | 103c0ab0aa8645890af45a9825b9130fa498fb15 | /travelapp/models.py | 06ec2f405122e5136dc941c3f616cf4071642702 | [] | no_license | JohnDBlaze/travel | fcf23f4f42ebdefba2c9a5af6d9a6c36b7befadd | 19e10e19d2cfdc9bcd95f72742e2f531b813f8d1 | refs/heads/master | 2023-08-14T18:35:53.132105 | 2021-10-10T13:11:53 | 2021-10-10T13:11:53 | 415,590,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from django.db import models
# Create your models here.
class place(models.Model):
name = models.CharField(max_length=100)
img = models.ImageField(upload_to='picture')
desc = models.TextField()
price = models.IntegerField()
offer = models.BooleanField(default=False)
class blog(models.Model):
date = models.TextField()
img = models.ImageField(upload_to='pic')
head = models.CharField(max_length=50)
desc = models.TextField()
| [
"abhilashfrancis88@gmail.com"
] | abhilashfrancis88@gmail.com |
dd983b475450134d57eea146369f3c3af92487a9 | c3c9f799c6163162d31916968eab9bc827f6fd7c | /trainlesk.py | d52736afb1a27dbb54ae839f4758993e52da67c0 | [] | no_license | Sayan-m90/Thats-Punderful | b52faba8574a37bed5059572c29f306605a81bf3 | 3885553d98575df003bc4a13173d4f17784384e5 | refs/heads/master | 2021-09-05T17:45:45.565902 | 2018-01-30T01:46:18 | 2018-01-30T01:46:18 | 119,462,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,892 | py | from __future__ import division
from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer
from itertools import chain
from nltk.corpus import stopwords
import csv
from sklearn import tree
import pdb
import string
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
#bank_sents = [
#'The batteries were given out free of charge',
#'I went to the bank to deposit my money',
#'The river bank was full of dead fishes']
#plant_sents = ['A few boxers were standing in a line. That\'s the punch line!',
#'A bicycle cannot stand alone; it is two tired.',
#'The workers at the industrial plant were overworked',
#'The plant had no flowers or leaves']
ps = PorterStemmer()
#reader = csv.reader(f, delimiter=" ")
# text_file = open("training_set.ods", "r")
# list1 = text_file.readlines()
# listtext = []
# for item in list1:
# listtext.append(str(item))
# print item
def lesk(context_sentence, ambiguous_word, pos=None, stem=True, hyperhypo=True):
max_overlaps = 0; lesk_sense = None
sec_max_overlaps = 0; lesk_sense_2 = None
thi_max_overlaps = 0; lesk_sense_3 = None
#context_sentence = context_sentence.split()
lesk = []
for ss in wn.synsets(ambiguous_word):
# If POS is specified.
if pos and ss.pos() is not pos:
print ss.pos
print pos
print "continuing"
continue
lesk_dictionary = []
# Includes definition.
lesk_dictionary+= ss.definition().split()
# Includes lemma_names.
lesk_dictionary+= ss.lemma_names()
# Optional: includes lemma_names of hypernyms and hyponyms.
if hyperhypo == True:
lesk_dictionary+= list(chain(*[i.lemma_names() for i in ss.hypernyms()+ss.hyponyms()]))
if stem == True: # Matching exact words causes sparsity, so lets match stems.
lesk_dictionary = [ps.stem(i) for i in lesk_dictionary]
context_sentence = [ps.stem(i) for i in context_sentence]
overlaps = set(lesk_dictionary).intersection(context_sentence)
if len(overlaps) > max_overlaps:
lesk_sense = ss
max_overlaps = len(overlaps)
elif len(overlaps) > sec_max_overlaps:
lesk_sense_2 = ss
sec_max_overlaps = len(overlaps)
elif len(overlaps) > thi_max_overlaps:
thi_max_overlaps = len(overlaps)
lesk_sense_3 = ss
lesk.append(lesk_sense)
lesk.append(str(max_overlaps))
lesk.append(lesk_sense_2)
lesk.append(str(sec_max_overlaps))
lesk.append(lesk_sense_3)
lesk.append(str(thi_max_overlaps))
return lesk
wordtrain = []
label = []
#pdb.set_trace()
with open('training_set.tsv') as f:
reader = csv.reader(f, delimiter="\t")
for lidx, line in enumerate(reader):
#print lidx
sent = line[0]
sent = sent.translate(None, string.punctuation)
if len(line) == 2:
print "Quit on line: {0}".format(lidx)
break
punidx = line[len(line)-1]
punidxs = punidx.split(",")
if len(punidxs) is 0:
print "Quit on line: {0}".format(lidx)
punidxs = [int(x) for x in punidxs]
newpunidxs = []
for i in punidxs:
if i < 0:
newpunidxs += [len(sent.split(" "))+i]
else:
newpunidxs += [i]
punidxs = newpunidxs
#test if started with any negatives
#x = [1 for i in punidxs if int(i) < 0]
#if 1 in x:
# pdb.set_trace()
trainset = []
for idx, ew in enumerate(sent.split()):
if int(idx) in punidxs:
answer = lesk(sent,ew)
c = len(wn.synsets(ew))
l = []
l.append(c)
l.append(abs(int(answer[3])-int(answer[1])))
trainset.append(l)
label.append(1)
else:
answer = lesk(sent,ew)
l = []
c = len(wn.synsets(ew))
l.append(c)
l.append(abs(int(answer[3])-int(answer[1])))
#l.append(int(answer[3]))
trainset.append(l)
label.append(0)
wordtrain.append(trainset)
for i in wordtrain:
print i
print "\n"
#svc = SVC(C=1.0)
#svc = tree.DecisionTreeClassifier()
#params = [{'kernel':['linear','rbf'],'random_state':[1,2,5,10],'gamma':[1e0, 1e-1, 1e-3, 1e-5],'C':[1,5,10,100]}]
#clf = GridSearchCV(svc,params,cv=5)
#clf = tree.DecisionTreeClassif)
#svc.fit(trainset, label)
#trainpreds = svc.predict(trainset)
#cm = confusion_matrix(trainpreds, label)
#print(cm)
#mzip = zip(trainpreds,label)
#corrects = [1 for x in mzip if x[0]==x[1]]
#accuracy = sum(corrects) / len(label)
#print accuracy
#print trainpreds
#clf.predict_proba([[2, 2]])
#y_pred = clf.predict([[2,2]])
#print(y_pred)
| [
"noreply@github.com"
] | noreply@github.com |
4cd27ec2a6296f942376e8e41767de6dd42b8cba | 3efe7813bb3c4f48f508c464f24b78acaccd827b | /actions.py | 475586444fb2906bb7612bd32f78d38310d0ef12 | [] | no_license | SamButers/Le-Chat | 05110b2b9fa0484380b214abbcec590635a6ed27 | af0af4512456dceeb364abe80b4e9169be1fc46e | refs/heads/master | 2021-07-03T01:54:23.995599 | 2019-04-06T05:10:30 | 2019-04-06T05:10:30 | 178,958,398 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from rasa_core_sdk import Action
from rasa_core_sdk.events import SlotSet
import requests, json
class ActionInformCat(Action):
def name(self):
return "action_inform_cat";
def run(self, dispatcher, tracker, domain):
cat = tracker.get_slot('cat');
cat_url = 'https://api.thecatapi.com/v1/breeds/search?q=' + cat;
response = requests.get(cat_url);
cat_content = response.content.decode();
cat_json = json.loads(cat_content);
try:
answer = 'This is the information about {} I could find for you: {}'.format(cat_json[0]['name'], cat_json[0]['description']);
except:
answer = "Pawrdon me, human. I haven't been able to find infurrmation about your desired cat.";
dispatcher.utter_message(answer);
return [SlotSet('cat', cat)]; | [
"sambuters@gmail.com"
] | sambuters@gmail.com |
0ae0b7cc11a68a67ca5c8f5f511bf81476008878 | 54c850a8d5f02295020ca5caf1f0b74167b3adf5 | /3 - Infection Transmission/calculate_rdmst.py | 810d0ffdd376ce9551a74570e9cceb61a7424459 | [] | no_license | ThisIsRoy/Algorithmic-Thinking-Projects | 9706e00d308bb9ffb0d2f2e4de10258d4f68b460 | 70215fcb14f6f0e2d4c6b3d1ee7325d50814b687 | refs/heads/master | 2021-04-04T06:16:27.985614 | 2018-04-30T05:15:12 | 2018-04-30T05:15:12 | 125,116,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,726 | py | from collections import *
from copy import *
def compute_rdmst_helper(graph, root):
"""
Computes the RDMST of a weighted digraph rooted at node root.
It is assumed that:
(1) root is a node in graph, and
(2) every other node in graph is reachable from root.
Arguments:
graph -- a weighted digraph in standard dictionary representation.
root -- a node in graph.
Returns:
An RDMST of graph rooted at root. The weights of the RDMST
do not have to be the original weights.
"""
# reverse the representation of graph
rgraph = reverse_digraph_representation(graph)
# Step 1 of the algorithm
modify_edge_weights(rgraph, root)
# Step 2 of the algorithm
rdst_candidate = compute_rdst_candidate(rgraph, root)
# compute a cycle in rdst_candidate
cycle = compute_cycle(rdst_candidate)
# Step 3 of the algorithm
if not cycle:
return reverse_digraph_representation(rdst_candidate)
else:
# Step 4 of the algorithm
g_copy = deepcopy(rgraph)
g_copy = reverse_digraph_representation(g_copy)
# Step 4(a) of the algorithm
(contracted_g, cstar) = contract_cycle(g_copy, cycle)
# cstar = max(contracted_g.keys())
# Step 4(b) of the algorithm
new_rdst_candidate = compute_rdmst_helper(contracted_g, root)
# Step 4(c) of the algorithm
rdmst = expand_graph(reverse_digraph_representation(rgraph), new_rdst_candidate, cycle, cstar)
return rdmst
def bfs(graph, startnode):
"""
Perform a breadth-first search on digraph graph starting at node startnode.
Arguments:
graph -- directed graph
startnode - node in graph to start the search from
Returns:
The distances from startnode to each node
"""
dist = {}
# Initialize distances
for node in graph:
dist[node] = float('inf')
dist[startnode] = 0
# Initialize search queue
queue = deque([startnode])
# Loop until all connected nodes have been explored
while queue:
node = queue.popleft()
for nbr in graph[node]:
if dist[nbr] == float('inf'):
dist[nbr] = dist[node] + 1
queue.append(nbr)
return dist
def reverse_digraph_representation(graph):
"""
:param graph: weighted digraph
:return: same digraph but in reverse representation
"""
reversed_graph = defaultdict(dict)
for node in graph.keys():
reversed_graph[node] = {}
# iterates through edges to reverse head and tail
for head, edges in graph.items():
for tail, weight in edges.items():
reversed_graph[tail][head] = weight
return dict(reversed_graph)
# Test cases:
# test_g = {1: {2: 3, 3: 1}, 2: {1: 4, 3: 5}, 3: {1: 6, 2: 9}}
# print reverse_digraph_representation(test_g)
# test_g2 = g = {0: {1: 20, 2: 4, 3: 20}, 1: {2: 2, 5: 16}, 2: {3: 8, 4: 20}, 3: {4: 4, 5: 8},
# 4: {1: 4}, 5: {}}
# print reverse_digraph_representation(test_g2)
def modify_edge_weights(rgraph, root):
"""
:param rgraph: weighted digraph
:param root: root node of digraph
:return: input digraph with modified edge weights
"""
for node in rgraph.keys():
if node != root:
# finds min weight excluding root node
outnode_weights = [weight for weight in rgraph[node].values()]
# outnode_weights = []
# for tail, edges in rgraph.items():
# for head, weight in edges.items():
# outnode_weights.append(weight)
# print outnode_weights
min_income_edge = min(outnode_weights) if len(outnode_weights) > 0 else 0
# subtracts min weight from all incoming edges
for head in rgraph[node].keys():
rgraph[node][head] -= min_income_edge
return rgraph
# Test cases:
# fig3 = {0: {1: 20, 2: 4, 3: 20}, 1: {2: 2, 5: 16}, 2: {3: 8, 4: 20}, 3: {4: 4, 5: 8}, 4: {1: 4}, 5: {}}
# reverse_fig3 = reverse_digraph_representation(fig3)
# print reverse_digraph_representation(modify_edge_weights(reverse_fig3, 0))
def compute_rdst_candidate(rgraph, root):
"""
:param rgraph: reversed digraph
:param root: root node
:return: RDST of the digraph in the reverse representation
"""
span_tree = defaultdict(dict)
for tail, edges in rgraph.items():
if tail != root:
if edges.values():
min_weight = min(edges.values())
for head, weight in edges.items():
if weight == min_weight:
span_tree[tail][head] = weight
break
return span_tree
# Test cases:
# reverse_fig3 = {0: {}, 1: {0: 20, 4: 4}, 2: {0: 4, 1: 2}, 3: {0: 20, 2: 8}, 4: {2: 20, 3: 4}}
# reverse_mod_fig3 = modify_edge_weights(reverse_fig3, 0)
# print reverse_digraph_representation(compute_rdst_candidate(reverse_mod_fig3, 0))
def compute_cycle(rdst_candidate):
normal_rdst = reverse_digraph_representation(rdst_candidate)
visited = dict()
nodes = normal_rdst.keys()
for node in nodes:
visited[node] = False
for node in nodes:
queue = []
visited_copy = dict(visited)
parent = dict()
queue.append(node)
visited_copy[node] = True
while queue:
curr_node = queue.pop()
for neighbor in normal_rdst[curr_node].keys():
if not visited_copy[neighbor]:
visited_copy[neighbor] = True
queue.append(neighbor)
parent[neighbor] = curr_node
else:
parent[neighbor] = curr_node
cycle = [neighbor]
cycle_node = curr_node
while cycle_node != neighbor:
cycle.insert(1, cycle_node)
cycle_node = parent[cycle_node]
return tuple(cycle)
return None
# Test cases:
# test_g1 = {0: {1: 1}, 1: {2: 3}, 2: {3: 5}, 3: {1: 3}}
# print compute_cycle(test_g1)
# test_g2 = {1: {2: 1, 3: 1}, 2: dict(), 3: {4: 1, 5: 1}, 4: dict(), 5: {6: 1}, 6: {1: 1}}
# print compute_cycle(test_g2)
def contract_cycle(graph, cycle):
r_graph = reverse_digraph_representation(graph)
graph_cont = deepcopy(graph)
cstar = max(graph.keys()) + 1
graph_cont[cstar] = dict()
unique_cdict = defaultdict(lambda: float('inf'))
# find and add all outgoing edges from c-star
for cycle_node in cycle:
for tail, weight in graph[cycle_node].items():
if tail not in cycle and weight < unique_cdict[tail]:
unique_cdict[tail] = weight
graph_cont[cstar] = dict(unique_cdict)
# compile all incoming edges to cycle to edge_in
incoming_head = None
incoming_weight = float('inf')
for cycle_node in cycle:
for head, weight in r_graph[cycle_node].items():
if head not in cycle and weight < incoming_weight:
incoming_head = head
incoming_weight = weight
graph_cont[incoming_head][cstar] = incoming_weight
# remove all cycle nodes and outgoing edges
for node in cycle:
graph_cont.pop(node)
# remove all incoming edges to cycle nodes
for head, edges in graph_cont.items():
for tail in edges.keys():
if tail in cycle:
graph_cont[head].pop(tail)
return graph_cont, cstar
# Test cases:
# fig_3a = {0: {1: 20, 2: 4, 3: 20}, 1: {2: 2, 5: 16}, 2: {3: 8, 4: 20}, 3: {4: 4, 5: 8}, 4: {1 : 4}, 5: {}}
# print contract_cycle(fig_3a, (1, 2, 3, 4))
# test2 = {0: {1: 3, 2: 2}, 1: {2: 0}, 2: {1: 0}}
# print contract_cycle(test2, (1, 2))
def expand_graph(graph, rdst_candidate, cycle, cstar):
# make expanded graph a nested dict copy of the rdst
r_graph = reverse_digraph_representation(graph)
expanded_graph = defaultdict(dict)
for head, edges in rdst_candidate.items():
if edges == dict():
expanded_graph[head] = dict()
else:
for tail, weight in edges.items():
expanded_graph[head][tail] = weight
if cstar in expanded_graph.keys():
expanded_graph.pop(cstar)
reverse_rdst = reverse_digraph_representation(rdst_candidate)
# find minimum weight incoming edge
incoming_head = reverse_rdst[cstar].keys()[0]
incoming_tail = None
incoming_weight = float('inf')
for tail, weight in graph[incoming_head].items():
if tail in cycle and weight < incoming_weight:
incoming_tail = tail
incoming_weight = weight
# replace outgoing edges
cstar_tails = rdst_candidate[cstar].keys()
for tail in cstar_tails:
outgoing_weight = float('inf')
outgoing_head = None
for head, weight in r_graph[tail].items():
if head in cycle and weight < outgoing_weight:
outgoing_weight = weight
outgoing_head = head
expanded_graph[outgoing_head][tail] = outgoing_weight
expanded_graph[incoming_head].pop(cstar)
expanded_graph[incoming_head][incoming_tail] = incoming_weight
# expand outgoing cycle edges and cycle edges except for vstar
for cycle_node in cycle:
for tail, weight in graph[cycle_node].items():
if tail in cycle and tail != incoming_tail:
expanded_graph[cycle_node][tail] = weight
return expanded_graph
# Test cases:
# fig_3a = {0: {1: 20, 2: 4, 3: 20}, 1: {2: 2, 5: 16}, 2: {3: 8, 4: 20}, 3: {4: 4, 5: 8}, 4: {1 : 4}, 5: {}}
# reverse_fig_3a = reverse_digraph_representation(fig_3a)
# mod_fig_3a = reverse_digraph_representation(modify_edge_weights(reverse_fig_3a, 0))
# rdst_candidate = {0: {6: 2}, 6: {5: 0}, 5: {}}
# cycle = (1, 2, 3, 4)
# cstar = 6
# print expand_graph(mod_fig_3a, rdst_candidate, cycle, cstar)
def compute_rdmst(graph, root):
"""
This function checks if:
(1) root is a node in digraph graph, and
(2) every node, other than root, is reachable from root
If both conditions are satisfied, it calls compute_rdmst_helper
on (graph, root).
Since compute_rdmst_helper modifies the edge weights as it computes,
this function reassigns the original weights to the RDMST.
Arguments:
graph -- a weighted digraph in standard dictionary representation.
root -- a node id.
Returns:
An RDMST of graph rooted at r and its weight, if one exists;
otherwise, nothing.
"""
if root not in graph:
print "The root node does not exist"
return
distances = bfs(graph, root)
for node in graph:
if distances[node] == float('inf'):
print "The root does not reach every other node in the graph"
return
rdmst = compute_rdmst_helper(graph, root)
# reassign the original edge weights to the RDMST and computes the total
# weight of the RDMST
rdmst_weight = 0
for node in rdmst:
for nbr in rdmst[node]:
rdmst[node][nbr] = graph[node][nbr]
rdmst_weight += rdmst[node][nbr]
return (rdmst, rdmst_weight)
# Test cases:
# g0 = {0: {1: 2, 2: 2, 3: 2}, 1: {2: 2, 5: 2}, 2: {3: 2, 4: 2}, 3: {4: 2, 5: 2}, 4: {1: 2}, 5: {}}
# print compute_rdmst(g0, 0)
# g1 = {0: {1: 20, 2: 4, 3: 20}, 1: {2: 2, 5: 16}, 2: {3: 8, 4: 20}, 3: {4: 4, 5: 8}, 4: {1: 4}, 5: {}}
# print compute_rdmst(g1, 0)
# g2 = {0: {1: 5, 2: 4}, 1: {2: 2}, 2: {1: 2}}
# print compute_rdmst(g2, 0)
# g3 = {1: {2: 2.1, 3: 1.0, 4: 9.1, 5: 1.1}, 2: {1: 2.1, 3: 1.0, 4: 17.0, 5: 1.0}, 3: {1: 1.0, 2: 1.0, 4: 16.0, 5: 0.0}, 4: {1: 9.1, 2: 17.1, 3: 16.0, 5: 16.0}, 5: {1: 1.1, 2: 1.0, 3: 0.0, 4: 16.0}}
# print compute_rdmst(g3, 1)
# g4 = {1: {2: 2.1, 3: 1.0, 4: 9.1, 5: 1.1, 6: 10.1, 7: 10.1, 8: 6.1, 9: 11.0, 10: 10.1}, 2: {1: 2.1, 3: 1.0, 4: 17.0, 5: 1.0, 6: 18.1, 7: 18.1, 8: 14.1, 9: 19.1, 10: 18.0}, 3: {1: 1.0, 2: 1.0, 4: 16.0, 5: 0.0, 6: 17.0, 7: 17.0, 8: 13.1, 9: 18.1, 10: 17.0}, 4: {1: 9.1, 2: 17.1, 3: 16.0, 5: 16.0, 6: 5.1, 7: 5.1, 8: 15.1, 9: 6.1, 10: 5.0}, 5: {1: 1.1, 2: 1.0, 3: 0.0, 4: 16.0, 6: 17.1, 7: 17.1, 8: 13.1, 9: 18.1, 10: 17.0}, 6: {1: 10.1, 2: 18.1, 3: 17.0, 4: 5.1, 5: 17.1, 7: 0.0, 8: 16.1, 9: 7.1, 10: 0.0}, 7: {1: 10.1, 2: 18.1, 3: 17.0, 4: 5.1, 5: 17.1, 6: 0.0, 8: 16.0, 9: 7.1, 10: 0.0}, 8: {1: 6.1, 2: 14.1, 3: 13.1, 4: 15.1, 5: 13.1, 6: 16.1, 7: 16.0, 9: 17.1, 10: 16.1}, 9: {1: 11.1, 2: 19.1, 3: 18.1, 4: 6.1, 5: 18.1, 6: 7.1, 7: 7.1, 8: 17.1, 10: 7.0}, 10: {1: 10.1, 2: 18.1, 3: 17.1, 4: 5.1, 5: 17.0, 6: 0.0, 7: 0.0, 8: 16.1, 9: 7.0}}
# print compute_rdmst(g4, 1)
| [
"rs58@rice.edu"
] | rs58@rice.edu |
5216a13d8d872dec242e77027299a6e3e1ff4e34 | 4e711136a2cbf3a16ee50f2076b0d992561b7557 | /users/models.py | 5e93afb21dfd1a4ae766e593f211401d2bf3e1cc | [] | no_license | SATYENDRARANJAN/YOURPHYSIO | 30e324f65352bf3890df019023782f13732a42e6 | 829245309e2f1800cf8127b37df6134dcf3ced20 | refs/heads/main | 2023-06-25T14:14:52.112859 | 2021-07-30T00:01:24 | 2021-07-30T00:01:24 | 390,877,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | from datetime import datetime
from django.db import models
# Create your models here.
class Patient(models.Model):
name = models.CharField(max_length=40,null=True)
phone = models.IntegerField(max_length=10)
email = models.EmailField()
created_at=models.DateTimeField(default=datetime.now)
updated_at=models.DateTimeField(default=datetime.now)
class Profile(models.Model):
patient= models.OneToOneField(Patient,related_name='profile',on_delete=models.CASCADE)
address = models.CharField(max_length=200)
#other personal data
class HealthQuestions(models.Model):
QTYPES=(('multiple_choice','multiple_choice'),('text_input','text_input'),('numeric_input','numeric_input'),('upload_file','upload_file'))
qcode = models.CharField(max_length=10)
qtext = models.CharField(max_length=100)
order = models.IntegerField()
qtype = models.CharField(max_length=40,choices=QTYPES)
class SubQuestions(models.Model):
question = models.ForeignKey(HealthQuestions,related_name='subquestions',on_delete=models.CASCADE)
subqtext = models.CharField(max_length=100)
order= models.IntegerField()
class HealthInfoRecords(models.Model):
patient = models.ForeignKey(Patient,on_delete=models.CASCADE,related_name='records')
created_at = models.DateTimeField(default=datetime.now)
updated_at = models.DateTimeField(default=datetime.now)
session_count = models.IntegerField(default=0)
class PatientGoals(models.Model):
pass
| [
"satya@getvital.com"
] | satya@getvital.com |
312460ac8bca6af84350206ee751570d59d027a3 | 1a639d185f9c883b7bebf33c577c58b22ac93c7e | /graphics/moving_pan.py | 37ad05954d8428b128a6f418e6a6d7232f4c8994 | [] | no_license | gofr1/python-learning | bd09da5b5850b1533a88b858690ed4380b55d33e | 19343c985f368770dc01ce415506506d62a23285 | refs/heads/master | 2023-09-02T15:42:27.442735 | 2021-11-12T10:17:13 | 2021-11-12T10:17:13 | 237,828,887 | 0 | 0 | null | 2021-11-12T10:17:14 | 2020-02-02T20:03:42 | Python | UTF-8 | Python | false | false | 767 | py | from superwires import games
path_to_images = '../../Pictures/img/'
games.init(screen_width = 640, screen_height = 480, fps = 50)
class Pan(games.Sprite):
'''Pan moving with mouse'''
def update(self):
'''Move object to mouse position'''
self.x = games.mouse.x
self.y = games.mouse.y
def main():
wall_image = games.load_image(path_to_images + "wall.jpg", transparent=False)
games.screen.background = wall_image
pan_image = games.load_image(path_to_images + "PizzaPan.png")
the_pan = Pan(
image = pan_image,
x = games.mouse.x,
y = games.mouse.y
)
games.screen.add(the_pan)
games.mouse.is_visible = False # mouse pointer is invisible
games.screen.mainloop()
# go!
main()
| [
"gofr.one@gmail.com"
] | gofr.one@gmail.com |
3cd88b7155f943721061204a47fa62099cc6acd0 | bf4209c0846a1c237dd996267c8eadd7717ae19e | /timer_tracker.py | 0eab39fcd28105062a931c59184961366d0cb19f | [] | no_license | bulhakovolexiy/movieParser | f312fb8c0037536dea8ca15f79ec280fdcbdc129 | 72235789a26dc828ab605f63de0e7370310393ba | refs/heads/master | 2023-08-15T04:02:19.478610 | 2021-10-13T19:24:49 | 2021-10-13T19:24:49 | 415,060,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py |
class TimerTracker(object):
def __init__(self):
pass
# def find_main_beep_freq(source):
# fft_spectrum = np.fft.rfft(source)
#
# freq = np.fft.rfftfreq(source.size, d=1. / sampFreq)
#
# freq_coords = [int(x) for x in range(0, len(freq)) if 1500 <= freq[x] <= 3250]
# fft_spectrum = fft_spectrum[freq_coords]
#
# fft_spectrum_abs = np.abs(fft_spectrum)
#
# max_amplitude = max(fft_spectrum_abs)
# max_amplitude_coord = fft_spectrum_abs.tolist().index(max_amplitude)
# return freq[freq_coords[max_amplitude_coord]]
#
# def find_highest_peak(source):
# main_beep_freq = find_main_beep_freq(source)
# butter_filter = signal.butter(10, [main_beep_freq - 40, main_beep_freq + 40], btype='bandpass', output='sos',
# fs=sampFreq)
# source = signal.sosfilt(butter_filter, source)
#
# return max(source)
#
# def find_highest_peak_coord(source, time_window, offset, samp_freq):
# practical_time_window = int(time_window*samp_freq)
# practical_offset = int(offset*samp_freq)
# source_len = len(source)
# highest_peak = 0
# highest_peak_coord = 0
# i = 0
# while i < source_len:
# j = i+practical_time_window
# if j > source_len:
# break
#
# peak = find_highest_peak(source[i:j])
#
# if peak > highest_peak:
# highest_peak = peak
# highest_peak_coord = i;
# i += practical_offset
# return highest_peak_coord, highest_peak
| [
"dzot161995@gmail.com"
] | dzot161995@gmail.com |
b18d91a68a5d42b0f2f054ca6b2d456eab45041a | 9ed3770babe66fd7f1ba86b1245978cf86f77d1b | /client.py | 19c8e91e4b992d25d18bc220a89f1fc0fc842aa4 | [] | no_license | lwx940710/TCP-Concurrent-Upload-Download | ff0065fb0c5190e51c0b53ebb75edf6e4313dcad | cf402e1d5498a740becdc1210d6ea7f5e185e533 | refs/heads/master | 2021-01-16T18:56:42.928643 | 2017-08-12T19:22:44 | 2017-08-12T19:22:44 | 100,133,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | #!/usr/bin/env python3
from struct import pack, unpack
import socket, sys, os, io, math, time
# terminate server: client <host> <port> F
# download: client <host> <port> G<key> <file name> <recv size>
# upload: client <host> <port> P<key> <file name> <send size> <wait time>
def tryInt(s):
try:
int(s)
return True
except ValueError:
return False
HOST, PORT, instruction = sys.argv[1:4]
PORT = int(PORT)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
if (instruction[0] == "F"):
instruction = instruction + "\0\0\0\0\0\0\0\0"
s.send(str.encode(instruction))
elif (instruction[0] == "G"):
while len(instruction) < 9:
instruction = instruction + "\0"
s.send(str.encode(instruction))
fileName, payloadSize = sys.argv[4:]
payloadSize = int(payloadSize)
fd = open(fileName, "wb+")
while True:
pkt = s.recv(payloadSize)
if not pkt: break
fd.write(pkt)
fd.close()
elif (instruction[0] == "P"):
while len(instruction) < 9:
instruction = instruction + "\0"
s.send(str.encode(instruction))
fileName, payloadSize, waitTime = sys.argv[4:]
payloadSize, waitTime = int(payloadSize), int(waitTime)
if (tryInt(fileName)):
fileSize = int(fileName)
fd = io.BytesIO(bytes(fileSize))
else:
fd = open(fileName, "rb")
fileSize = os.path.getsize(fileName)
while True:
chunk = fd.read(payloadSize)
if not chunk: break
s.send(bytes(chunk))
time.sleep(waitTime / 1000.0)
fd.close()
| [
"w254li@uwaterloo.ca"
] | w254li@uwaterloo.ca |
a1bdcdb481696a4cde9e5264a2cddcb522103db0 | 02081cb12f02c0481701c53efabd0a73e889f42d | /model/CR.py | 4f4731c1ec7cbfa88a34e74d8c0ed8463765a42d | [] | no_license | zr1048028670/AECR-Net | 900484711dd9c1e1d0e54157c9662d437b9f0a4a | d2eb15b9fd98d6f1bedd64b72d49548ebfadeedb | refs/heads/main | 2023-08-12T11:53:02.742030 | 2021-10-13T02:16:30 | 2021-10-13T02:16:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py | import torch.nn as nn
import torch
from torch.nn import functional as F
import torch.nn.functional as fnn
from torch.autograd import Variable
import numpy as np
from torchvision import models
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
return [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
class ContrastLoss(nn.Module):
def __init__(self, ablation=False):
super(ContrastLoss, self).__init__()
self.vgg = Vgg19().cuda()
self.l1 = nn.L1Loss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
self.ab = ablation
def forward(self, a, p, n):
a_vgg, p_vgg, n_vgg = self.vgg(a), self.vgg(p), self.vgg(n)
loss = 0
d_ap, d_an = 0, 0
for i in range(len(a_vgg)):
d_ap = self.l1(a_vgg[i], p_vgg[i].detach())
if not self.ab:
d_an = self.l1(a_vgg[i], n_vgg[i].detach())
contrastive = d_ap / (d_an + 1e-7)
else:
contrastive = d_ap
loss += self.weights[i] * contrastive
return loss
| [
"704289013@qq.com"
] | 704289013@qq.com |
6f44e55609bce00b517938584610a93d2b86c024 | 8b21142cccab05509cad8fe0dd7af867cd7fa6ff | /pybullet/noros/solo.py | 31a42a840a9716da3cedd786f09ce26694f6665d | [
"MIT"
] | permissive | WPI-MMR/simulation_playground | 2c7763b5d5b4ee388d0d101af1d6f4235307bcd0 | b5ebfc58feed6d380c92fb65988032cbab5b32bc | refs/heads/master | 2022-12-22T09:36:50.376379 | 2020-10-04T21:32:25 | 2020-10-04T21:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,788 | py | import functools
import math
import util
import os
from typing import List, Tuple
from pinocchio import utils as se3util
from pinocchio import robot_wrapper as se3robot_wrapper
import pinocchio as se3
class BaseSolo(object):
"""The base solo class
Args:
model_path (str): The path to the root of the model folder
robot_name (str): Name of the robot, typically the name of the urdf file
yaml_config (str): Name of the yaml configuration
motor_inertia (float): Motor innertia constant
motor_gear_ration (float): Motor gear ratios
Returns:
"""
# PID gains
kp: float = 5.0
kd: float = 0.1
ki: float = 0.0
# The Kt constant of the motor [Nm/A]: tau = I * Kt.
motor_torque_constant: float = 0.025
# Control time period.
control_period: float = 0.001
dt: float = control_period
# Maximum control one can send, here the control is the current.
# MaxCurrent = 12 # Ampers
max_current: float = 2.0
max_control: float = max_current
max_qref: float = math.pi
base_link_name: str = 'base_link'
end_effector_names: List[str] = ['HL_ANKLE', 'HR_ANKLE', 'FL_ANKLE',
'FR_ANKLE']
def __init__(self, model_path: str, robot_name: str, yaml_config: str,
motor_inertia: float, motor_gear_ration: float):
self.model_path = model_path
self.urdf_path, self.mesh_path, self.yaml_path = util.model_to_subpaths(
model_path, robot_name, yaml_config)
self.motor_inertia = motor_inertia
self.motor_gear_ration = motor_gear_ration
self.mass: float = None
self.base_name: str = None
self.nb_joints: int = None
self.map_joint_name_to_id: Dict[str, int] = None
self.map_joint_limits: Dict[int, Tuple[float, float]] = None
@property
def max_torque(self):
return self.motor_torque_constant * self.max_current
def _build_robot(self):
"""_build_robot Create a pinocchio/pybullet robot wrapper """
# Rebuild the robot wrapper instead of using an existing model to also load
# the visuals
print('mesh path')
print(self.mesh_path)
robot = se3robot_wrapper.RobotWrapper.BuildFromURDF(
self.urdf_path, self.mesh_path, se3.JointModelFreeFlyer())
robot.rotorInertia[6:] = self.motor_inertia
robot.rotorGearRatio[6:] = self.motor_gear_ration
self.mass = np.sum([i.mass for i in robot.inertias])
self.base_name = robot.frames[2].name
# The number of motors, here they are the same as there are only revolute
# joints.
self.nb_joints = robot.nv - 6
self.map_joint_name_to_id = {}
self.map_joint_limits = {}
for i, (name, lower, upper) in enumerate(zip(robot.names[1:],
robot.lowerPositionLimit,
robot.upperPositionLimit)):
self.map_joint_name_to_id[name] = i
self.map_joint_limits[i] = (float(lower), float(upper))
return robot
class Solo8Vanilla(BaseSolo):
robot_name: str = 'solo'
yaml_config: str = 'dmg_parameters_solo8.yaml'
# The inertia of a single blmc_motor.
motor_inertia: float = 0.0000045
# The motor gear ratio.
motor_gear_ration: int = 9.
# Define the initial state.
initial_configuration = [0.2,0,0.4, 0,0,0,1] + 4*[0.8,-1.6]
initial_velocity = (8+6)*[0,]
def __init__(self, model_path: str):
self.q0 = None
self.v0 = None
self.a0 = None
super().__init__(model_path, self.robot_name, self.yaml_config,
self.motor_inertia, self.motor_gear_ration)
def build_robot_wrapper(self):
robot = self._build_robot()
self.q0 = se3util.zero(robot.nq)
self.q0[:] = self.initial_configuration
self.v0 = se3util.zero(robot.nv)
self.a0 = se3util.zero(robot.nv)
return robot | [
"rukna1000@gmail.com"
] | rukna1000@gmail.com |
0a9ceb15dff10ffe4f727308fc88a2b199140f7d | e6bbdf531b4c6452be6d007829d9ff8da5a5c42a | /tests/test_local_planner.py | 586bc8b27cb87b34577f017a06b2fda1fd93921d | [] | no_license | DevotionZhu/simple_scenario_validation | e3c3e61a72c3d4774514c3a116088bfef88c470c | b84a9f0d6c5023171526f37fa8b36edb4308f714 | refs/heads/master | 2022-01-15T06:52:15.312433 | 2019-07-17T13:35:46 | 2019-07-17T13:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | import carla
from utils.local_planner import LocalPlanner
from utils.in_memory_map import InMemoryMap
ip = "10.51.6.52"
port = 2006
client = carla.Client(ip, port)
world = client.get_world()
world_map = world.get_map()
local_map = InMemoryMap(world_map)
local_map.setup()
vehicle = world.get_actors().filter('vehicle.*')[0]
local_planner = LocalPlanner(vehicle, local_map, target_velocity=20)
while True:
throttle, steer = local_planner.update()
control = carla.VehicleControl()
control.throttle = throttle
control.steer = steer
vehicle.apply_control(control)
print('Throttle : ', throttle, 'Steer : ', steer)
| [
"pravinblaze@hotmail.com"
] | pravinblaze@hotmail.com |
c1064fc49e796bcf0aefae3556c05fc32fd0952f | d4d16e48357eeb759d979c1777bf93b437f8d69a | /page/__init__.py | 5ce10e61df1804de6a73f86870f1834d93850e60 | [] | no_license | jianxiaohui18/xuechebu_project | abc2f32f8688fa47ed87d041dfafd3151cdb213a | 974ffb10d648873151cb586fa153d4dd491b30f9 | refs/heads/master | 2020-12-13T16:23:27.643807 | 2020-01-17T10:37:21 | 2020-01-17T10:37:21 | 234,470,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | from selenium.webdriver.common.by import By
#首页
mine_btn = By.XPATH,'//*[contains(@text,"我的")]' #我的按钮
#我的页面
log_reg_btn = By.XPATH,'//*[contains(@text,"登录")]' #登录/注册按钮
username_text = By.XPATH,'//*[contains(@text,"用户")]'
#登录页面
username_box = By.ID,'com.bjcsxq.chat.carfriend:id/login_phone_et' #用户名输入框
password_box = By.ID,'com.bjcsxq.chat.carfriend:id/login_pwd_et' #密码输入
login_btn = By.ID,'com.bjcsxq.chat.carfriend:id/login_btn' #登录按钮
alter_btn = By.ID,'com.bjcsxq.chat.carfriend:id/btn_neg' #弹窗按钮
| [
"624767241@qq.com"
] | 624767241@qq.com |
bdf68b56c5eb18f09a7ad8ccc33d711cf58a4939 | bebc8db5f0da1a0e44bc99a5d4922243a7d5ccdf | /LAB_0_MFAA.py | 83cf2930c9c789a554455f994156dd1a25528fab | [] | no_license | MaferAnso/LAB_0_MFAA | 3d29825fa112a950a0e40599daf28b9a4b714d2d | 502a69c20df4768b57ee28af2d35e8874e639125 | refs/heads/master | 2020-12-23T17:26:02.477897 | 2020-02-24T14:15:29 | 2020-02-24T14:15:29 | 237,217,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,134 | py | # -- ------------------------------------------------------------------------------------ -- #
# -- Proyecto: Repaso de python y análisis de precios en OANDA -- #
# -- Codigo: Laboratorio_0 -- #
# -- Rep: https://github.com/MaferAnso/LAB_0_MFAA -- #
# -- Autor: MaferAnso -- #
# -- ------------------------------------------------------------------------------------ -- #
# -- ------------------------------------------------------------- Importar con funciones -- #
import funciones as fn # Para procesamiento de datos
import visualizaciones as vs # Para visualizacion de datos
import pandas as pd # Procesamiento de datos
from datos import OA_Ak # Importar token para API de OANDA
# -- --------------------------------------------------------- Descargar precios de OANDA -- #
# token de OANDA
OA_In = "EUR_USD" # Instrumento
OA_Gn = "D" # Granularidad de velas
fini = pd.to_datetime("2019-07-06 00:00:00").tz_localize('GMT') # Fecha inicial
ffin = pd.to_datetime("2019-12-06 00:00:00").tz_localize('GMT') # Fecha final
# Descargar precios masivos
df_pe = fn.f_precios_masivos(p0_fini=fini, p1_ffin=ffin, p2_gran=OA_Gn,
p3_inst=OA_In, p4_oatk=OA_Ak, p5_ginc=4900)
# -- --------------------------------------------------------------- Graficar OHLC plotly -- #
vs_grafica1 = vs.g_velas(p0_de=df_pe.iloc[0:120, :])
vs_grafica1.show()
# -- ------------------------------------------------------------------- Conteno de velas -- #
# multiplicador de precios
pip_mult = 10000
# -- 0A.1: Hora
df_pe['hora'] = [df_pe['TimeStamp'][i].hour for i in range(0, len(df_pe['TimeStamp']))]
# -- 0A.2: Dia de la semana.
df_pe['dia'] = [df_pe['TimeStamp'][i].weekday() for i in range(0, len(df_pe['TimeStamp']))]
# -- 0B: Boxplot de amplitud de velas (close - open).
df_pe['co'] = (df_pe['Close'] - df_pe['Open'])*pip_mult
# -- ------------------------------------------------------------ Graficar Boxplot plotly -- #
vs_grafica2 = vs.g_boxplot_varios(p0_data=df_pe[['co']], p1_norm=False)
vs_grafica2.show()
# -- ------------------------------------------------------------- Conteo de Velas -- #
# -- 01: Mes de la vela.
df_pe['Mes'] = [df_pe['TimeStamp'][i].month for i in range(0, len(df_pe['TimeStamp']))]
# -- 02: Sesion de la vela.
asia = ['00', '01', '02', '03', '04', '05', '06', '07', '22', '23']
asia_europa = ['08']
europa = ['09', '10', '11', '12']
europa_america= ['13', '14', '15', '16'']
america = ['17', '18', '19', '20', '21']
asi = [x+'']
# -- 03: Amplitud OC esperada de vela para cualquier dia de la semana (Dist de Freq).
# -- 04: Amplitud HL esperada de vela para cualquier dia de la semana (Dist de Freq).
# -- 05: Evolucion de velas consecutivas (1: Alcistas, 0: Bajistas).
# -- 06: Maxima evolucion esperada de velas consecutivas (Dist Acum de Freq).
# -- 07: Calculo + Grafica autopropuesta. | [
"mafer.ansoleaga@gmail.com"
] | mafer.ansoleaga@gmail.com |
81a9468e822101750e73217c4b2e6d17f02e75b2 | 7a3fc3ea3dd71e4ec85ac73e0af57ae976777513 | /.history/flaskblog_20210524215327.py | 3dc704c3d526e4ab9cbfa1dbab452f4fb649c5bb | [] | no_license | khanhdk0000/first_proj | 72e9d2bbd788d6f52bff8dc5375ca7f75c0f9dd0 | bec0525353f98c65c3943b6d42727e3248ecfe22 | refs/heads/main | 2023-05-12T10:36:08.026143 | 2021-06-05T15:35:22 | 2021-06-05T15:35:22 | 374,148,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
@app.route('/home')
def hello_world():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('home.html')
if __name__ == '__main__':
app.run(debug=True) | [
"khanhtran28092000@gmail.com"
] | khanhtran28092000@gmail.com |
53f22c793897b67ba555bf2fec3f3bd3b6be2bb4 | 343885d6a1901a045c79132d25fad91f21be3626 | /cs224n/a5/model_embeddings.py | 0992b20a7cb2eece2be4925cd100e48b9d2ddd8c | [] | no_license | DeCinB/nlp_learning | 04acf0c2e6c2b89e7496013338623106505bf497 | 251387484d09add52d7a0f24ac89deb8b6633fd9 | refs/heads/master | 2023-01-31T03:17:06.442021 | 2020-12-09T15:12:27 | 2020-12-09T15:12:27 | 319,951,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,752 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2019-20: Homework 5
model_embeddings.py: Embeddings for the NMT model
Pencheng Yin <pcyin@cs.cmu.edu>
Sahil Chopra <schopra8@stanford.edu>
Anand Dhoot <anandd@stanford.edu>
Michael Hahn <mhahn2@stanford.edu>
"""
import torch.nn as nn
# Do not change these imports; your module names should be
# `CNN` in the file `cnn.py`
# `Highway` in the file `highway.py`
# Uncomment the following two imports once you're ready to run part 1(j)
from cnn import CNN
from highway import Highway
# End "do not change"
class ModelEmbeddings(nn.Module):
"""
Class that converts input words to their CNN-based embeddings.
"""
def __init__(self, word_embed_size, vocab):
"""
Init the Embedding layer for one language
@param word_embed_size (int): Embedding size (dimensionality) for the output word输出的词向量维度
@param vocab (VocabEntry): VocabEntry object. See vocab.py for documentation.#应该是字符表
Hints: - You may find len(self.vocab.char2id) useful when create the embedding
"""
super(ModelEmbeddings, self).__init__()
### YOUR CODE HERE for part 1h
self.word_embed_size=word_embed_size
char_embed_size=50#字符的embedding维度
max_word_length=21#一个单词最长的字符数
self.char_embedding=nn.Embedding(len(vocab.char2id),
embedding_dim=char_embed_size,
padding_idx=vocab.char2id['<pad>'])#字符嵌入
self.cnn=CNN(k=5,f=self.word_embed_size,emb_size=char_embed_size,m_word=max_word_length)
self.highway=Highway(self.word_embed_size)
self.dropout=nn.Dropout(0.3)
### END YOUR CODE
def forward(self, input):
"""
Looks up character-based CNN embeddings for the words in a batch of sentences.
@param input: Tensor of integers of shape (sentence_length, batch_size, max_word_length) where
each integer is an index into the character vocabulary
@param output: Tensor of shape (sentence_length, batch_size, word_embed_size), containing the
CNN-based embeddings for each word of the sentences in the batch
"""
### YOUR CODE HERE for part 1h
X=self.char_embedding(input)
sents_len,batch_size,max_word_length,char_embed_size=X.shape
view_shape=(sents_len*batch_size,max_word_length,char_embed_size)
X_reshape=X.view(view_shape).transpose(1,2)
X_conv_out=self.cnn(X_reshape)
X_highway=self.highway(X_conv_out)
X_word_emb=self.dropout(X_highway)
X_word_emb=X_word_emb.view(sents_len,batch_size,self.word_embed_size)
return X_word_emb
### END YOUR CODE
| [
"1481410986@qq.com"
] | 1481410986@qq.com |
e11d12473f0b5708c3e7481974b91923f8e075f3 | c021c17736f8839d66d2f875cc37b48c319b74f3 | /disabler.py | 5a9752613cf1e1f81f1899be366f28e4b2c2a6f2 | [] | no_license | bianfusia/WinKeyDisabler | 310363a2b85aac36e9bbd897e9bd65e35c64c55d | c5e6b4689ef59d0c2c866b984aa1967d02eab626 | refs/heads/master | 2022-12-02T07:01:46.145850 | 2020-08-11T15:00:22 | 2020-08-11T15:00:22 | 267,564,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import keyboard
keyboard.block_key('left')
input("this is included here to make a persistent console to ensure script stays alive.")
| [
"noreply@github.com"
] | noreply@github.com |
abbfb2d604c1dd871e29b62445be0039b0e26849 | f64e9de2a5f8cd677180172591256a651fe20cbc | /examples/list1.py | 8a18ad28e39f8a252c7df3374926d0a7605803f6 | [
"MIT"
] | permissive | schlichtanders/pyparsing-2.0.3-OrderedDict | c85455a65ea610a959a41d035175912ba3762e11 | 50bb1a10b63ac623ef58ffa3ee59bb08be172ff4 | refs/heads/master | 2021-01-10T04:07:10.044763 | 2015-10-23T11:03:14 | 2015-10-23T11:03:14 | 44,807,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,660 | py | from pyparsingOD import *
# first pass
lbrack = Literal("[")
rbrack = Literal("]")
integer = Word(nums).setName("integer")
real = Combine(Optional(oneOf("+ -")) + Word(nums) + "." +
Optional(Word(nums))).setName("real")
listItem = real | integer | quotedString
listStr = lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14]"
print(listStr.parseString(test))
# second pass, cleanup and add converters
lbrack = Literal("[").suppress()
rbrack = Literal("]").suppress()
cvtInt = lambda s,l,toks: int(toks[0])
integer = Word(nums).setName("integer").setParseAction( cvtInt )
cvtReal = lambda s,l,toks: float(toks[0])
real = Combine(Optional(oneOf("+ -")) + Word(nums) + "." +
Optional(Word(nums))).setName("real").setParseAction( cvtReal )
listItem = real | integer | quotedString.setParseAction( removeQuotes )
listStr = lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14]"
print(listStr.parseString(test))
# third pass, add nested list support
cvtInt = lambda s,l,toks: int(toks[0])
cvtReal = lambda s,l,toks: float(toks[0])
lbrack = Literal("[").suppress()
rbrack = Literal("]").suppress()
integer = Word(nums).setName("integer").setParseAction( cvtInt )
real = Combine(Optional(oneOf("+ -")) + Word(nums) + "." +
Optional(Word(nums))).setName("real").setParseAction( cvtReal )
listStr = Forward()
listItem = real | integer | quotedString.setParseAction(removeQuotes) | Group(listStr)
listStr << lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14, [ +2.718, 'xyzzy', -1.414] ]"
print(listStr.parseString(test)) | [
"Stephan.Sahm@gmx.de"
] | Stephan.Sahm@gmx.de |
a904615bd096feb7bf4831dd78ed92d7eebcda1f | 8293eea2e15ea057ffcc496f528dddd48b0ee2cd | /script/luatex_node_inspect.py | d3183f62449dbc293ec5e7d33bcdcc83ed793fa6 | [] | no_license | xziyue/luatex-node-inspect | 5a0c162a4c50eb47e8935872b46cb433f16253d9 | 7ba1e80f60dadb337bb231a4c92f3831e256a915 | refs/heads/master | 2023-01-24T16:25:21.295622 | 2020-12-15T16:25:50 | 2020-12-15T16:25:50 | 321,552,792 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,315 | py | import copy
import re
import os
import json
__node_common_field = [
'id', 'prev', 'next', 'subtype'
]
__node_id = {
0: 'hlist', 1: 'vlist', 2: 'rule', 3: 'ins', 4: 'mark', 5: 'adjust', 6: 'boundary', 7: 'disc', 8: 'whatsit',
9: 'local_par', 10: 'dir', 11: 'math', 12: 'glue', 13: 'kern', 14: 'penalty', 15: 'unset', 16: 'style',
17: 'choice', 18: 'noad', 19: 'radical', 20: 'fraction', 21: 'accent', 22: 'fence', 23: 'math_char', 24: 'sub_box',
25: 'sub_mlist', 26: 'math_text_char', 27: 'delim', 28: 'margin_kern', 29: 'glyph', 30: 'align_record',
31: 'pseudo_file', 32: 'pseudo_line', 33: 'page_insert', 34: 'split_insert', 35: 'expr_stack', 36: 'nested_list',
37: 'span', 38: 'attribute', 39: 'glue_spec', 40: 'attribute_list', 41: 'temp', 42: 'align_stack',
43: 'movement_stack', 44: 'if_stack', 45: 'unhyphenated', 46: 'hyphenated', 47: 'delta', 48: 'passive', 49: 'shape'
}
_hlist_field = [
'attr', 'width', 'height', 'depth', 'shift', 'glue_order', 'glue_set', 'glue_sign',
'head', 'list', 'dir'
]
hlist_subtype = {
0: 'unknown', 1: 'line', 2: 'box', 3: 'indent', 4: 'alignment', 5: 'cell', 6: 'equation', 7: 'equationnumber',
8: 'math', 9: 'mathchar', 10: 'hextensible', 11: 'vextensible', 12: 'hdelimiter', 13: 'vdelimiter',
14: 'overdelimiter', 15: 'underdelimiter', 16: 'numerator', 17: 'denominator', 18: 'limits', 19: 'fraction',
20: 'nucleus', 21: 'sup', 22: 'sub', 23: 'degree', 24: 'scripts', 25: 'over', 26: 'under', 27: 'accent',
28: 'radical'
}
hlist_glue_sign = {
0: 'normal',
1: 'stretching',
2: 'shrinking'
}
_vlist_field = copy.copy(_hlist_field)
vlist_subtype = {
0: 'unknown',
4: 'alignment',
5: 'cell'
}
_rule_field = [
'attr', 'width', 'height', 'depth',
'left', 'right', 'dir', 'index', 'transform'
]
rule_subtype = {
0: 'normal', 1: 'box', 2: 'image', 3: 'empty', 4: 'user', 5: 'over', 6: 'under', 7: 'fraction', 8: 'radical',
9: 'outline'
}
_ins_field = [
'attr', 'cost', 'height', 'depth', 'head', 'list'
]
_mark_field = [
'attr', 'class', 'mark'
]
_adjust_field = [
'attr', 'head', 'list'
]
adjust_subtype = {
0: 'normal',
1: 'pre'
}
_disc_field = [
'attr', 'pre', 'post', 'replace', 'penalty'
]
disc_subtype = {
0: 'discretionary', 1: 'explicit', 2: 'automatic', 3: 'regular', 4: 'first', 5: 'second'
}
_math_field = [
'attr', 'surround'
]
math_subtype = {
0: 'beginmath', 1: 'endmath'
}
_glue_field = [
'width', 'stretch', 'stretch_over', 'shrink', 'shrink_over', 'attr', 'leader'
]
glue_subtype = {
0: 'userskip', 1: 'lineskip', 2: 'baselineskip', 3: 'parskip', 4: 'abovedisplayskip', 5: 'belowdisplayskip',
6: 'abovedisplayshortskip', 7: 'belowdisplayshortskip', 8: 'leftskip', 9: 'rightskip', 10: 'topskip',
11: 'splittopskip', 12: 'tabskip', 13: 'spaceskip', 14: 'xspaceskip', 15: 'parfillskip', 16: 'mathskip',
17: 'thinmuskip', 18: 'medmuskip', 19: 'thickmuskip', 98: 'conditionalmathskip', 99: 'muglue', 100: 'leaders',
101: 'cleaders', 102: 'xleaders', 103: 'gleaders'
}
_kern_field = [
'attr', 'kern'
]
kern_subtype = {
0: 'footkern', 1: 'userkern', 2: 'accentkern', 3: 'italiccorrection'
}
_penalty_field = [
'attr', 'penalty'
]
penalty_subtype = {
0: 'userpenalty', 1: 'linebreakpenalty', 2: 'linepenalty', 3: 'wordpenalty', 4: 'finalpenalty', 5: 'noadpenalty',
6: 'beforedisplaypenalty', 7: 'afterdisplaypenalty', 8: 'equationnumberpenalty'
}
_glyph_field = [
'attr', 'char', 'font', 'lang', 'left', 'right', 'uchyph',
'components', 'xoffset', 'yoffset', 'width', 'height', 'depth',
'expansion_factor', 'data'
]
glyph_subtype = {
0: '', 1: 'character', 2: 'ligature', 3: 'ligature+character', 4: 'ghost', 5: 'ghost+character',
6: 'ghost+ligature', 7: 'ghost+ligature+character', 8: 'left', 9: 'left+character', 10: 'left+ligature',
11: 'left+ligature+character', 12: 'left+ghost', 13: 'left+ghost+character', 14: 'left+ghost+ligature',
15: 'left+ghost+ligature+character', 16: 'right', 17: 'right+character', 18: 'right+ligature',
19: 'right+ligature+character', 20: 'right+ghost', 21: 'right+ghost+character', 22: 'right+ghost+ligature',
23: 'right+ghost+ligature+character', 24: 'right+left', 25: 'right+left+character', 26: 'right+left+ligature',
27: 'right+left+ligature+character', 28: 'right+left+ghost', 29: 'right+left+ghost+character',
30: 'right+left+ghost+ligature', 31: 'right+left+ghost+ligature+character'
}
_boundary_field = [
'attr', 'value'
]
_local_par_field = [
'attr', 'pen_inter', 'pen_broken', 'dir', 'box_left', 'box_left_width',
'box_right', 'box_right_width'
]
_dir_field = [
'attr', 'dir', 'level'
]
_marginkern_field = [
'attr', 'width', 'glyph'
]
marginkern_subtype = {
0: 'left', 1: 'right'
}
_math_char_field = [
'attr', 'char', 'fam'
]
_math_text_char_field = copy.copy(_math_char_field)
_sub_box_field = [
'attr', 'head', 'list'
]
_sub_mlist_field = copy.copy(_sub_box_field)
_delim_field = [
'attr', 'small_char', 'small_fam', 'large_char', 'large_fam'
]
__math_options = {8: 'set/internal', 9: 'internal', 10: 'axis', 12: 'no axis', 24: 'exact', 25: 'left', 26: 'middle',
28: 'right', 41: 'no sub script', 42: 'no super script', 43: 'no script'}
_noad_field = [
'attr', 'nucleus', 'sub', 'sup', 'options'
]
noad_subtype = {
0: 'ord', 1: 'opdisplaylimits', 2: 'oplimits', 3: 'opnolimits', 4: 'bin', 5: 'rel', 6: 'open', 7: 'close',
8: 'punct', 9: 'inner', 10: 'under', 11: 'over', 12: 'vcenter'
}
_accent_field = [
'nucleus', 'sub', 'sup', 'accent', 'bot_accent', 'fraction'
]
accent_subtype = {
0: 'bothflexible', 1: 'fixedtop', 2: 'fixedbottom', 3: 'fixedboth'
}
_style_field = [
'style'
]
_choice_field = [
'attr', 'display', 'text', 'script', 'scriptscript'
]
_radical_field = [
'attr', 'nucleus', 'sub', 'sup', 'left', 'degree', 'width', 'options'
]
radical_subtype = {
0: 'radical', 1: 'uradical', 2: 'uroot', 3: 'uunderdelimiter', 4: 'uoverdelimiter', 5: 'udelimiterunder',
6: 'udelimiterover'
}
_fraction_field = [
'attr', 'width', 'num', 'denom', 'left', 'right', 'middle', 'options'
]
_fence_field = [
'attr', 'delim', 'italic', 'height', 'depth', 'options', 'class'
]
__whatsit_id = {'0': 'open', '1': 'write', '2': 'close', '3': 'special', '6': 'save_pos', '7': 'late_lua',
'8': 'user_defined', '16': 'pdf_literal', '17': 'pdf_refobj', '18': 'pdf_annot', '19': 'pdf_start_link',
'20': 'pdf_end_link', '21': 'pdf_dest', '22': 'pdf_action', '23': 'pdf_thread',
'24': 'pdf_start_thread', '25': 'pdf_end_thread', '26': 'pdf_thread_data', '27': 'pdf_link_data',
'28': 'pdf_colorstack', '29': 'pdf_setmatrix', '30': 'pdf_save', '31': 'pdf_restore'}
_whatsit_open_field = [
'attr', 'stream', 'name', 'ext', 'area'
]
_whatsit_write_field = [
'attr', 'stream', 'data'
]
_whatsit_close_field = [
'attr', 'stream'
]
_whatsit_user_defined_field = [
'attr', 'user_id', 'type', 'value'
]
whatsit_user_defined_type = {
97: 'attribute list',
100: 'Lua number',
108: 'Lua value',
110: 'node list',
115: 'Lua string',
116: 'Lua token list'
}
_whatsit_save_pos_field = [
'attr'
]
_whatsit_late_lua_field = [
'attr', 'data', 'token', 'name'
]
_whatsit_special_field = [
'attr', 'data'
]
_whatsit_pdf_literal_field = [
'attr', 'mode', 'data', 'token'
]
whatsit_pdf_literal_mode = {
0: 'origin',
1: 'page',
2: 'direct',
3: 'raw',
4: 'text'
}
_whatsit_pdf_refobj_field = [
'attr', 'objnum'
]
_whatsit_pdf_annot_field = [
'attr', 'width', 'height', 'depth', 'objnum', 'data'
]
_whatsit_pdf_start_link_field = [
'attr', 'width', 'height', 'depth', 'objnum', 'link_attr', 'action'
]
_whatsit_pdf_end_link_field = [
'attr'
]
_whatsit_pdf_dest_field = [
'attr', 'width', 'height', 'depth', 'named_id', 'dest_id', 'dest_type', 'xyz_zoom', 'objnum'
]
_whatsit_pdf_action_field = [
'action_type', 'action_id', 'named_id', 'file', 'new_window', 'data'
]
whatsit_pdf_action_action_type = {
0: 'page',
1: 'goto',
2: 'thread',
3: 'user'
}
whatsit_pdf_action_new_window = {
0: 'notset',
1: 'new',
2: 'nonew'
}
_whatsit_pdf_thread_field = [
'attr', 'width', 'height', 'depth', 'named_id', 'thread_id', 'thread_attr'
]
_whatsit_pdf_start_thread_field = [
'attr', 'width', 'height', 'depth', 'named_id', 'thread_id', 'thread_attr'
]
_whatsit_pdf_end_thread_field = [
'attr'
]
_whatsit_pdf_colorstack_field = [
'attr', 'stack', 'command', 'data'
]
_whatsit_pdf_setmatrix_field = [
'attr', 'data'
]
_whatsit_pdf_save_field = [
'attr'
]
_whatsit_pdf_restore_field = [
'attr'
]
__luatex_variables = []
__global_keys = list(globals().keys())
for key in __global_keys:
val = globals()[key]
if isinstance(val, (dict, list)):
start_match = re.match('(_+).*', key)
if start_match is None or len(start_match.group(1)) < 2:
__luatex_variables.append(key)
# construct output json
__defined_nodes = []
for key in __luatex_variables:
match = re.match('_(.*?)_field', key)
if match is not None:
old_length = len(globals()[key])
globals()[key] = __node_common_field + globals()[key]
# integrity check
# print(key)
# print(len(set(globals()[key])), old_length + len(__node_common_field))
assert isinstance(globals()[key], list)
assert len(set(globals()[key])) == old_length + len(__node_common_field)
__defined_nodes.append(match.group(1))
print(__defined_nodes)
__output_dict = {}
__output_dict['_node_id'] = __node_id
__output_dict['_math_options'] = __math_options
__output_dict['_whatsit_id'] = __whatsit_id
for node in __defined_nodes:
__output_dict[node] = dict()
__output_dict[node]['_field'] = globals()['_' + node + '_field']
for var_name in __luatex_variables:
if var_name.startswith(node):
key_name = var_name[len(node) + 1:]
__output_dict[node][key_name] = globals()[var_name]
with open(os.path.join('..', 'luatex-node.json'), 'w') as outfile:
json.dump(__output_dict, outfile, indent=2)
| [
"xziyue@qq.com"
] | xziyue@qq.com |
c185379f5cef1cca71aa3cc90ae80bd7a5f7afef | f03621c7ad9ee423439e55fba23f6ffa53fc8599 | /src/python/07-test-grapy.py | 46459f0870aaecebf3c37cf739658f6e778485c2 | [] | no_license | kimkangbo/mlforhighschool | 313c059db83350f5f30efedb22e9ae0225e55b08 | 9fc8fbc5574a8dbf973bdddaf8130eea1c49fe59 | refs/heads/master | 2021-06-03T16:26:40.529569 | 2020-05-17T15:56:22 | 2020-05-17T15:56:22 | 135,787,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,631 | py | # Example of solving simple linear (y(x) = mx + b) regression in Python.
#
# Uses only Numpy, with Matplotlib for plotting.
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
from matplotlib import cm
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
#from timer import Timer
def generate_data(n, m=2.25, b=6.0, stddev=1.5):
"""Generate n data points approximating given line.
m, b: line slope and intercept.
stddev: standard deviation of added error.
Returns pair x, y: arrays of length n.
"""
x = np.linspace(-2.0, 2.0, n)
y = x * m + b + np.random.normal(loc=0, scale=stddev, size=n)
return x, y
def plot_data_scatterplot(x, y, mb_history=None):
"""Plot the data: y as a function of x, in a scatterplot.
x, y: arrays of data.
mb_history:
if provided, it's a sequence of (m, b) pairs that are used to draw
animated lines on top of the scatterplot.
"""
fig, ax = plt.subplots()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
save_dpi = 80
ax.scatter(x, y, marker='x')
ax.set_xlabel('x')
ax.set_ylabel('y')
if mb_history:
m0, b0 = mb_history[0]
line, = ax.plot(x, x * m0 + b0, 'r-', linewidth=2.0)
# Downsample mb_history by 2 to reduce the number of frames shown.
def update(frame_i):
# mi, bi = mb_history[frame_i * 2]
mi, bi = mb_history[frame_i]
line.set_ydata(x * mi + bi)
ax.set_title('Fit at iteration {0}'.format(frame_i * 2))
return [line]
# anim = FuncAnimation(fig, update, frames=range(len(mb_history) // 2),
anim = FuncAnimation(fig, update, frames=range(len(mb_history)),
# interval=200)
interval=2)
# anim.save('regressionfit.gif', dpi=save_dpi, writer='imagemagick')
# else:
# fig.savefig('linreg-data.png', dpi=save_dpi)
plt.show()
def compute_cost(x, y, m, b):
"""Compute the MSE cost of a prediction based on m, b.
x: inputs array.
y: observed outputs array.
m, b: regression parameters.
Returns: a scalar cost.
"""
yhat = m * x + b
diff = yhat - y
# Vectorized computation using a dot product to compute sum of squares.
cost = np.dot(diff.T, diff) / float(x.shape[0])
# Cost is a 1x1 matrix, we need a scalar.
return cost.flat[0]
def gradient_descent(x, y, nsteps, learning_rate=0.1):
"""Runs gradient descent optimization to fit a line y^ = x * m + b.
x, y: input data and observed outputs, as array.
nsteps: how many steps to run the optimization for.
learning_rate: learning rate of gradient descent.
Yields 'nsteps + 1' triplets of (m, b, cost) where m, b are the fit
parameters for the given step, and cost is their cost vs. the real y. The
first triplet has the initial m, b and cost; the rest carry results after
each of the iteration steps.
"""
n = x.shape[0]
# Start with m and b initialized to 0s for the first try.
m, b = 0, 0
yield m, b, compute_cost(x, y, m, b)
for step in range(nsteps):
# Update m and b following the formulae for gradient updates.
yhat = m * x + b
diff = yhat - y
dm = learning_rate * (diff * x).sum() * 2 / n
db = learning_rate * diff.sum() * 2 / n
m -= dm
b -= db
yield m, b, compute_cost(x, y, m, b)
def plot_cost_3D(x, y, costfunc, mb_history=None):
"""Plot cost as 3D and contour.
x, y: arrays of data.
costfunc: cost function with signature like compute_cost.
mb_history:
if provided, it's a sequence of (m, b) pairs that are added as
crosshairs markers on top of the contour plot.
"""
lim = 10.0
N = 250
ms = np.linspace(-lim, lim, N)
bs = np.linspace(-lim, lim, N)
cost = np.zeros((N, N))
for m_idx in range(N):
for b_idx in range(N):
cost[m_idx, b_idx] = costfunc(x, y, ms[m_idx], bs[b_idx])
# Configure 3D plot.
fig = plt.figure()
fig.set_tight_layout(True)
ax1 = fig.add_subplot(1, 2, 1, projection='3d')
ax1.set_xlabel('b')
ax1.set_ylabel('m')
msgrid, bsgrid = np.meshgrid(ms, bs)
surf = ax1.plot_surface(msgrid, bsgrid, cost, cmap=cm.coolwarm)
# Configure contour plot.
ax2 = fig.add_subplot(1, 2, 2)
ax2.contour(msgrid, bsgrid, cost)
ax2.set_xlabel('b')
ax2.set_ylabel('m')
if mb_history:
ms, bs = zip(*mb_history)
plt.plot(bs, ms, 'rx', mew=3, ms=5)
plt.show()
def plot_cost_vs_step(costs):
"""Given an array of costs, plots them vs. index."""
plt.plot(range(len(costs)), costs)
plt.show()
def compute_mb_analytic(x, y):
"""Given arrays of x, y computes m, b analytically."""
xbar = np.average(x)
ybar = np.average(y)
m = (xbar * ybar - np.average(x * y)) / (xbar ** 2 - np.average(x ** 2))
b = ybar - m * xbar
return m, b
def compute_rsquared(x, y, m, b):
"""Compute R^2 - the coefficient of determination for m, b.
x, y: arrays of input, output.
m, b: regression parameters.
Returns the R^2 - a scalar.
"""
yhat = m * x + b
diff = yhat - y
SE_line = np.dot(diff.T, diff)
SE_y = len(y) * y.var()
return 1 - SE_line / SE_y
if __name__ == '__main__':
# Follow through the code here to see how the functions are used. No
# plotting is done by default. Uncomment relevant lines to produce plots.
# For reproducibility.
np.random.seed(42)
# Generate some pseudo-random data we're goign to fit with linear
# regression.
N = 500
x, y = generate_data(N)
print('Generated {0} data points'.format(N))
# Run gradient descent.
NSTEPS = 100
mbcost = list(gradient_descent(x, y, NSTEPS))
mb_history = [(m, b) for m, b, _ in mbcost]
print('Final m={0}, b={1}; cost={2}'.format(mbcost[-1][0], mbcost[-1][1],
mbcost[-1][2]))
# Plot the data in a scatterplot, with an animated line fit.
#plot_data_scatterplot(x, y, mb_history)
# Plot the cost function in 3D and as contours; add markers for the costs
# values returned by the gradient descent procedure.
plot_cost_3D(x, y, compute_cost, mb_history)
m, b = compute_mb_analytic(x, y)
print('Analytic: m={0}, b={1}'.format(m, b))
rsquared = compute_rsquared(x, y, m, b)
print('Rsquared:', rsquared)
| [
"kimkangbo@gmail.com"
] | kimkangbo@gmail.com |
40801f7d40c23e8ef9fd71aef06229192814b53d | 738b6d6ec4572f5848940b6adc58907a03bda6fb | /tests/pymcell4_positive/3000_reports_check/model.py | 03fdbc184e2cc0fea4c8bae504524b1bb53ba093 | [
"MIT",
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | mcellteam/mcell_tests | 09cd1010a356e0e07c88d7e044a73c5606c6e51a | 34d2d967b75d56edbae999bf0090641850f4f4fe | refs/heads/master | 2021-12-24T02:36:24.987085 | 2021-09-24T14:19:41 | 2021-09-24T14:19:41 | 174,733,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | #!/usr/bin/env python3
import sys
import os
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
params = m.bngl_utils.load_bngl_parameters('test.bngl')
ITERATIONS = int(params['ITERATIONS'])
# ---- load bngl file ----
model = m.Model()
if 'MCELL_DEFAULT_COMPARTMENT_VOLUME' in params:
MCELL_DEFAULT_COMPARTMENT_VOLUME = params['MCELL_DEFAULT_COMPARTMENT_VOLUME']
MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH = MCELL_DEFAULT_COMPARTMENT_VOLUME**(1.0/3.0)
default_compartment = m.geometry_utils.create_box(
'default_compartment', MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH
)
model.add_geometry_object(default_compartment)
else:
MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH = 1
default_compartment = None
model.load_bngl('test.bngl', './react_data/seed_' + str(1).zfill(5) + '/', default_compartment)
# ---- configuration ----
model.config.total_iterations = ITERATIONS
model.notifications.rxn_and_species_report = True
model.initialize()
model.run_iterations(ITERATIONS)
model.end_simulation()
# check that reports exist
assert os.path.exists(os.path.join('reports', 'rxn_report_00001.txt'))
assert os.path.exists(os.path.join('reports', 'species_report_00001.txt'))
assert os.path.exists(os.path.join('reports', 'warnings_report_00001.txt'))
| [
"ahusar@salk.edu"
] | ahusar@salk.edu |
87610080866d9cad3191923528acbeeed82d6233 | 547548a6ae8db52b1b183d6f3ba3ad63f4247962 | /train/gen/kl/paths.py | 5bc1abac26c206f66572dc7988a9abdd8620b8c0 | [
"MIT"
] | permissive | jeffkrupa/SubtLeNet | 21870c8cc88080c101edffb414832d863c299455 | e0e74b7a0a1c76fd6d6e21c80ce57302a2cd6b6f | refs/heads/master | 2022-06-25T16:33:36.427635 | 2022-06-10T16:15:53 | 2022-06-10T16:15:53 | 187,670,116 | 0 | 2 | MIT | 2019-08-02T20:26:20 | 2019-05-20T15:44:13 | Python | UTF-8 | Python | false | false | 288 | py | #basedir = '/fastscratch/snarayan/genarrays/v_deepgen_3/'
#figsdir = '/home/snarayan/public_html/figs/deepgen/v3/'
basedir = '/data/t3serv014/snarayan/deep//v_deepgen_4_small/'
figsdir = '/home/snarayan/public_html/figs/deepgen/v4_kl/'
from os import system
system('mkdir -p '+figsdir)
| [
"sidn@mit.edu"
] | sidn@mit.edu |
be2a2cffd88039d7e883315a60934503019a8713 | c313288fe7a31f75e562afec42ef82ce79001cae | /tests/test_main.py | c04ef37849bff81f033be8c44f2bf8b67abd37fb | [] | no_license | 3slab/pygisapi | 1ba4138da13152c9152cac3f7e34462529f7d810 | ccead89919a9ef342d36bedfc528b906bfd9930d | refs/heads/master | 2022-12-12T14:40:27.985938 | 2020-10-02T14:45:01 | 2020-10-02T14:45:01 | 221,058,634 | 1 | 0 | null | 2022-12-08T10:54:47 | 2019-11-11T19:53:50 | Python | UTF-8 | Python | false | false | 188 | py | # -*- coding: utf-8 -*-
from . import client
def test_home():
response = client.get("/")
assert response.status_code == 404
assert response.json() == {'detail': 'Not Found'}
| [
"jonathan.bouzekri@gmail.com"
] | jonathan.bouzekri@gmail.com |
0cb8793177ffef90d8390d28c3b577deb8cb9e29 | 370e95125f5bd6ba4103a04aa9492035f7dc5994 | /lab4/patterns/pattern06.py | 25bb9512a6546ac451f992bf709f8bdda9aba59e | [] | no_license | TariniS/lab4_CS | 1c074a89b5be32676cb6ce56603ce849f98f87cc | 17e36d490a557db9492a36f19a7c37b66e3923bc | refs/heads/main | 2023-02-04T01:37:14.565019 | 2020-12-21T22:09:09 | 2020-12-21T22:09:09 | 323,458,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | import driver
def letter(row,col):
if((row==col)or((col+row==6))):
return 'X'
else:
return 'O'
if __name__ == '__main__':
driver.comparePatterns(letter)
| [
"noreply@github.com"
] | noreply@github.com |
19c958c9aeb6d3f7038d4699459b22ccbfeb451c | 693a6631f854e04775f546cef91af3e93d6c6f27 | /blog/models.py | 02091040fa991fedd038588cfb640164121cbb65 | [] | no_license | edsonjnior/my-first-blog | ff9c2d90466ac177acc9652b6b7f8f1c93327444 | f245216761ff69face9ee5bfb25684d8f2c03017 | refs/heads/master | 2020-03-22T11:55:50.819475 | 2018-08-20T23:29:48 | 2018-08-20T23:29:48 | 140,005,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default = timezone.now)
published_date = models.DateTimeField(blank = True, null = True)
def publish(self):
self.published_date = timezone.now()
self.save
def __str__(self):
return self.title
| [
"edsonjnior@gmail.com"
] | edsonjnior@gmail.com |
44bccce4581a9be8d40ef02613ab79ebd72ed817 | ba98e2171d38c85ddc122cb35b400df30c952cd6 | /translifyapp/migrations/0001_initial.py | 8969cb9d1c1107c49d17b532ffb9a14f60b156c3 | [] | no_license | dbribe/DemoANPR | 5f760b39aaa324be2539e3366bddd5b3516550e7 | 4cbc761b97d67685faa6586b2e671aa57e49621b | refs/heads/master | 2020-03-21T09:50:34.174266 | 2018-06-25T16:49:54 | 2018-06-25T16:49:54 | 137,877,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,901 | py | from django.contrib.postgres.operations import CITextExtension
import django.contrib.postgres.fields.citext
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import translifyapp.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('localization', '0002_country'),
]
operations = [
CITextExtension(),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False)),
('email', models.EmailField(max_length=254, unique=True)),
('first_name', models.CharField(blank=True, max_length=30)),
('last_name', models.CharField(blank=True, max_length=30)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now)),
('username', django.contrib.postgres.fields.citext.CICharField(blank=True, error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and ./_ only.', max_length=30, null=True, unique=True, validators=[django.core.validators.RegexValidator(code='invalid', message='Enter a valid username. This value may contain at most one consecutive separator( . or _ characters).', regex='^((?![_.]{2,}).)*$'), django.core.validators.RegexValidator(code='invalid', message='Enter a valid username. This value may contain only letters,numbers and separators ( . or _ characters).', regex='^[\\w.]+$'), django.core.validators.RegexValidator(code='invalid', message='Enter a valid username. This value may not start with a separator ( . or _ characters).', regex='^[^._]'), django.core.validators.RegexValidator(code='invalid', message='Enter a valid username. This value may not end with a separator ( . or _ characters).', regex='[^._]$')])),
('locale_language', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='localization.Language')),
],
options={
'abstract': False,
},
managers=[
('objects', translifyapp.models.UserManager()),
],
),
] | [
"kir141996@gmail.com"
] | kir141996@gmail.com |
55c7a91a2d15442966a2c78923fa3cc202a4b7c3 | 2331bca795bf4dff9b58e05b18922847fd401818 | /Search Algorithms/BFS.py | 3fa5036a36439ece39558cf04ad92731c3fe948c | [] | no_license | MrAliTheGreat/AI_Projects | 794fe46754ca16324294dc08d255cda26304f0c0 | 6b25079473dcc4343fd5be6c998621f7f2dbb59d | refs/heads/master | 2023-07-27T21:26:28.124077 | 2021-08-20T08:55:12 | 2021-08-20T08:55:12 | 352,749,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,416 | py | import time
class State():
def __init__(self, curLoc_x , curLoc_y , curCapacity , curNumRemainingBalls , needDelivery , hasPicked ,
parentState=None , depth=0):
self.curLoc_x = curLoc_x
self.curLoc_y = curLoc_y
self.curCapacity = curCapacity
self.curNumRemainingBalls = curNumRemainingBalls
self.needDelivery = needDelivery
self.parentState = parentState
self.hasPicked = hasPicked
if(self.parentState is None):
self.depth = 0
else:
self.depth = self.parentState.depth + 1
# def __eq__(self , other):
# return self.curLoc_x == other.curLoc_x and self.curLoc_y == other.curLoc_y and \
# self.curNumRemainingBalls == other.curNumRemainingBalls
def getHash(self):
strNeedDelivery = "".join([str(t) for t in self.needDelivery])
if(self.parentState is None):
strParent = "-00"
else:
strParent = str(self.parentState.curLoc_x) + str(self.parentState.curLoc_y)
return str(self.curLoc_x) + str(self.curLoc_y) + str(self.curNumRemainingBalls) + strNeedDelivery + strParent
def getHashBFS(self):
strNeedDelivery = "".join([str(t) for t in self.needDelivery])
return str(self.curLoc_x) + str(self.curLoc_y) + str(self.curNumRemainingBalls) + strNeedDelivery
def getNumPicked(self):
return len(self.hasPicked)
def getCurLoc_x(self):
return self.curLoc_x
def getCurLoc_y(self):
return self.curLoc_y
def getCurCapacity(self):
return self.curCapacity
def getCurNumRemainingBalls(self):
return self.curNumRemainingBalls
def getNeedDelivery(self):
return self.needDelivery
def getHasPicked(self):
return self.hasPicked
def getParentState(self):
return self.parentState
def getDepth(self):
return self.depth
def setCurCapacity(self , newValue):
self.curCapacity = newValue
def setCurNumRemainingBalls(self , newValue):
self.curNumRemainingBalls = newValue
def setNeedDelivery(self , newValue):
self.needDelivery = newValue
def setHasPicked(self , newValue):
self.hasPicked = newValue
def getMazeElement(maze , x , y):
return maze[x][y * 2]
def isValidInMaze(maze , x , y , num_rows , num_columns):
if(x >= 0 and y >= 0 and x < num_rows and y < num_columns and getMazeElement(maze , x , y) != '*'):
return True
return False
def updateFrontier(frontier , newState , goal_x , goal_y , explored):
frontier.append(newState)
explored.add(newState.getHashBFS())
if(newState.getCurLoc_x() == goal_x and newState.getCurLoc_y() == goal_y and newState.getCurNumRemainingBalls() == 0):
return True
return False
def placeBall(maze , curState , ballsDestLocs):
if(len(curState.getNeedDelivery()) <= 0):
return
try:
deliveryIndex = curState.getNeedDelivery().index((curState.getCurLoc_x() , curState.getCurLoc_y()))
except ValueError:
return
curStateNeedDelivery = list(curState.getNeedDelivery())
del curStateNeedDelivery[deliveryIndex]
curState.setNeedDelivery(curStateNeedDelivery)
curState.setCurCapacity(curState.getCurCapacity() + 1)
curState.setCurNumRemainingBalls(curState.getCurNumRemainingBalls() - 1)
def grabBall(maze , curState , ballsInitLocs , ballsDestLocs , frontier , explored):
if(curState.getCurCapacity() <= 0):
return
try:
ballIndex = ballsInitLocs.index((curState.getCurLoc_x() , curState.getCurLoc_y()))
except ValueError:
return
if((curState.getCurLoc_x() , curState.getCurLoc_y()) in curState.getHasPicked()):
return
newNeedDelivery = list(curState.getNeedDelivery())
newNeedDelivery.append(ballsDestLocs[ballIndex])
newHasPicked = list(curState.getHasPicked())
newHasPicked.append((curState.getCurLoc_x() , curState.getCurLoc_y()))
newState = State(curState.getCurLoc_x() , curState.getCurLoc_y() , curState.getCurCapacity() - 1 ,
curState.getCurNumRemainingBalls() , newNeedDelivery , newHasPicked , curState)
if(newState.getHashBFS() not in explored):
frontier.append(newState)
explored.add(newState.getHashBFS())
def BFS(maze , num_rows , num_columns , start_x , start_y , goal_x , goal_y , capacity , num_balls , ballsInitLocs ,
ballsDestLocs):
frontier = []
explored = set()
start_state = State(start_x , start_y , capacity , num_balls , [] , [])
frontier.append(start_state)
explored.add(start_state.getHashBFS())
uniqueExplored = set()
numExploredStates = 0; numUniqueExploredStates = 0
if(start_x == goal_x and start_y == goal_y and num_balls == 0):
return frontier[0] , numExploredStates , numUniqueExploredStates
while(len(frontier) != 0):
numExploredStates += 1
curState = frontier.pop(0)
if(curState.getHashBFS() not in uniqueExplored):
numUniqueExploredStates += 1
uniqueExplored.add(curState.getHashBFS())
grabBall(maze , curState , ballsInitLocs , ballsDestLocs , frontier , explored)
placeBall(maze , curState , ballsDestLocs)
if(isValidInMaze(maze , curState.getCurLoc_x() , curState.getCurLoc_y() - 1 , num_rows , num_columns)): # Left
newState = State(curState.getCurLoc_x() , curState.getCurLoc_y() - 1 , curState.getCurCapacity() ,
curState.getCurNumRemainingBalls() , curState.getNeedDelivery() , curState.getHasPicked() ,
curState)
if(newState.getHashBFS() not in explored):
if(updateFrontier(frontier , newState , goal_x , goal_y , explored)):
return newState , numExploredStates , numUniqueExploredStates
if(isValidInMaze(maze , curState.getCurLoc_x() + 1 , curState.getCurLoc_y() , num_rows , num_columns)): # Down
newState = State(curState.getCurLoc_x() + 1 , curState.getCurLoc_y() , curState.getCurCapacity() ,
curState.getCurNumRemainingBalls() , curState.getNeedDelivery() , curState.getHasPicked() ,
curState)
if(newState.getHashBFS() not in explored):
if(updateFrontier(frontier , newState , goal_x , goal_y , explored)):
return newState , numExploredStates , numUniqueExploredStates
if(isValidInMaze(maze , curState.getCurLoc_x() - 1 , curState.getCurLoc_y() , num_rows , num_columns)): # Up
newState = State(curState.getCurLoc_x() - 1 , curState.getCurLoc_y() , curState.getCurCapacity() ,
curState.getCurNumRemainingBalls() , curState.getNeedDelivery() , curState.getHasPicked() ,
curState)
if(newState.getHashBFS() not in explored):
if(updateFrontier(frontier , newState , goal_x , goal_y , explored)):
return newState , numExploredStates , numUniqueExploredStates
if(isValidInMaze(maze , curState.getCurLoc_x() , curState.getCurLoc_y() + 1 , num_rows , num_columns)): # Right
newState = State(curState.getCurLoc_x() , curState.getCurLoc_y() + 1 , curState.getCurCapacity() ,
curState.getCurNumRemainingBalls() , curState.getNeedDelivery() , curState.getHasPicked() ,
curState)
if(newState.getHashBFS() not in explored):
if(updateFrontier(frontier , newState , goal_x , goal_y , explored)):
return newState , numExploredStates , numUniqueExploredStates
def getNumAndPrintActions(firstState , printActions = False):
num_levels = 0
while(firstState.getParentState() is not None):
if(printActions):
print(str(firstState.getCurLoc_x()) + " " + str(firstState.getCurLoc_y()) +
" capacity: " + str(firstState.getCurCapacity()) )
firstState = firstState.getParentState()
num_levels += 1
return num_levels
for i in range(1 , 5):
testFile = open("Tests/" + str(i) + ".txt" , "r")
inputs = testFile.read().splitlines()
testFile.close()
num_rows , num_columns = list(map(int , inputs[0].split()))
start_x , start_y = list(map(int , inputs[1].split()))
goal_x , goal_y = list(map(int , inputs[2].split()))
capacity = int(inputs[3])
num_balls = int(inputs[4])
ballsInitLocs = []
ballsDestLocs = []
for i in range(num_balls):
s_x , s_y , d_x , d_y = list(map(int , inputs[5 + i].split()))
ballsInitLocs.append((s_x , s_y)); ballsDestLocs.append((d_x , d_y))
maze = inputs[5 + num_balls :]
tic = time.time()
goalState , numExploredStates , numUniqueExploredStates = BFS(maze , num_rows , num_columns , start_x , start_y ,
goal_x , goal_y , capacity , num_balls , ballsInitLocs ,
ballsDestLocs)
toc = time.time()
num_levels = getNumAndPrintActions(goalState)
print("Num of levels: " + str(num_levels))
print("Number of explored States: " + str(numExploredStates))
print("Number of unique explored States: " + str(numUniqueExploredStates))
print("Time: " + str(toc - tic) + " s")
print("==============") | [
"alibahari007@gmail.com"
] | alibahari007@gmail.com |
a042d6383847e72133f0b16ca96826a94468373b | cb8c9fd1699d26afaa656396c7636f398b77ae0a | /musicdisplay/myapp/views.py | 94d1e8b5d98ff920bb75c07420e0ab39036691f3 | [] | no_license | JannyBiubiu/DAM | 98d305a40b941d2c7b7115f1a00193cd2065afe1 | ffe81d582c001236b631f2e0c25df5d711343cc6 | refs/heads/master | 2020-03-31T11:59:04.332152 | 2018-11-15T16:32:16 | 2018-11-15T16:32:16 | 152,198,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | """
Routes and views for the flask application.
"""
#-*- coding:utf-8 -*-
from datetime import datetime
from flask import render_template,request
from myapp import app
import os
import json
with open("./myapp/static/meta.json","r+",encoding="utf-8") as code:
obj=json.load(code)
fields=obj["media_src"]
def getmeta(path):
list=[]
with open(path,"r+",encoding="utf-8") as code:
for line in code:
obj=json.loads(line)
list.append(obj)
return list
@app.route('/')
@app.route('/home')
def home():
return render_template(
'index.html',
title='主页',
fields=fields,
year =datetime.now().year,
)
@app.route("/about")
def about():
return render_template(
'about.html',
title='关于我',
year=datetime.now().year,
fields=fields
)
@app.route("/#/<name>",methods=['post','get'])
def field(name):
list=getmeta("./myapp/static/"+name+"/meta.json")
return render_template(
'field.html',
title='分区',
fields=fields,
name=name,
year =datetime.now().year,
list=list
)
@app.route("/#/<name>/<id>",methods=['post','get'])
def play(name,id):
list=getmeta("./myapp/static/"+name+"/meta.json")
data={}
for song in list:
if song["id"]==id:
data=song
break
json_obj={}
if 'lrc' in data:
lrc="./myapp"+data["lrc"][2:]
f=open(lrc,"r+",encoding="utf-8")
lrcs=f.read()
lrcs=lrcs.split('\n')
for lrc in lrcs:
list=lrc.split(']')
if len(list)==1:
continue
if list[1]=="":
continue
value=list[-1]
for j in range(len(list)-1):
key_music=list[j][1:]
key_time=key_music.split(':')
music_time=float(key_time[0])*60+float(key_time[1])
key=music_time
json_obj[key]=value
key_list=[]
for key1 in json_obj.keys():
key_list.append(key1)
key_list.sort()
return render_template(
'play.html',
fields=fields,
title='播放',
id=id,
name=name,
data=data,
lyrics=json_obj,
lyrics_key=key_list,
year=datetime.now().year
) | [
"3160102525@zju.edu.cn"
] | 3160102525@zju.edu.cn |
fd2c1ef20a11bfa71fa0c756a103e4bc46115f21 | 27ac3a2b2ee81d43d0381f50fbe4c866e90d2c25 | /crm/migrations/0001_initial.py | 02b533913f06aaf65fb971fa6aca167a99ef4097 | [] | no_license | Anzalsk/crm | 3557288517a9980e9ed9f13dea27827aff299940 | 5378b60bd1775ec8f671d72da2b78ff0b528be65 | refs/heads/main | 2023-02-22T03:29:31.931947 | 2021-01-29T13:40:31 | 2021-01-29T13:40:31 | 334,154,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # Generated by Django 3.1.5 on 2021-01-20 09:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('phone', models.CharField(max_length=10)),
('email', models.CharField(max_length=100)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"ejaz.sk124@gmail.com"
] | ejaz.sk124@gmail.com |
f7d11bab716283917a00ac4cf88bbe0e1409eb96 | 1818aaf61380ed516097cb40ab378d36e3c1a06a | /tests/unit/validate/example_templates/component/unnamed/a.py | 9c803aa2b037621b19d514b789ef78274e3bddc3 | [
"Apache-2.0"
] | permissive | JackBurdick/crummycm | a4504ba3efd42e4d93c62120ad4fece89c6b3ae2 | ffa9dad9b5c4c061d3767d700e90d5e34f92eac8 | refs/heads/main | 2023-01-18T20:07:24.102504 | 2020-11-25T00:56:57 | 2020-11-25T00:56:57 | 304,761,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,462 | py | from crummycm.validation.types.placeholders.placeholder import KeyPlaceholder
from crummycm.validation.types.dicts.foundation.unnamed_dict import UnnamedDict
from crummycm.validation.types.values.element.numeric import Numeric
from crummycm.validation.types.values.element.text import Text
# from crummycm.validation.types.values.base import BaseValue
A_ex = {"val": Numeric(default_value=int(0), required=False, is_type=int)}
A_unnamed_single_num_ex = {
"config": UnnamedDict(
{
KeyPlaceholder("some_key"): Numeric(
default_value=int(0), required=False, is_type=int
)
}
)
}
A_unnamed_single_exact = {
"config": UnnamedDict(
{
KeyPlaceholder("some_key", exact=True): Numeric(
default_value=2, required=True, is_type=int
)
}
)
}
A_unnamed_single_num_multi_ex = {
"config": UnnamedDict(
{
KeyPlaceholder("some_key", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
)
}
)
}
A_unnamed_single_num_startswith_ex = {
"config": UnnamedDict(
{
KeyPlaceholder("my_key", starts_with="val_"): Numeric(
default_value=int(0), required=False, is_type=int
)
}
)
}
A_unnamed_double_dist = {
"config": UnnamedDict(
{
KeyPlaceholder("my_key"): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("my_other_key", ends_with="_val", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
}
)
}
A_unnamed_triple_dist = {
"config": UnnamedDict(
{
KeyPlaceholder("my_key"): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("my_other_key", ends_with="_val", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("YAK", starts_with="val_", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
}
)
}
A_unnamed_quad_dist = {
"config": UnnamedDict(
{
KeyPlaceholder("my_key"): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("my_other_key", ends_with="_val", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("YAK", starts_with="val_", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("YAK_sw", starts_with="x_", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
}
)
}
A_unnamed_quad_inner_quad = {
"config": UnnamedDict(
{
KeyPlaceholder("my_key"): UnnamedDict(
{
KeyPlaceholder("next_my_key"): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder(
"next_my_other_key", ends_with="_val", multi=True
): Numeric(default_value=int(0), required=False, is_type=int),
KeyPlaceholder("next_YAK", starts_with="val_", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder(
"next_YAK_sw", starts_with="x_", multi=True
): Numeric(default_value=int(0), required=False, is_type=int),
}
),
KeyPlaceholder("my_other_key", ends_with="_val", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("YAK", starts_with="val_", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("YAK_sw", starts_with="x_", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
}
)
}
A_unnamed_single_num_endswith_ex = {
"config": UnnamedDict(
{
KeyPlaceholder("my_key", ends_with="_val"): Numeric(
default_value=int(0), required=False, is_type=int
)
}
)
}
A_unnamed_single_num_endswith_req = {
"config": UnnamedDict(
{
KeyPlaceholder("my_key", ends_with="_val"): Numeric(
default_value=int(0), required=True, is_type=int
)
}
)
}
A_unnamed_out = UnnamedDict(
{
KeyPlaceholder("my_num"): Numeric(
default_value=int(0), required=False, is_type=int
)
}
)
A_nested_unnamed_num = {
"config": UnnamedDict(
{
KeyPlaceholder("some_dict"): UnnamedDict(
{
KeyPlaceholder("inner_num"): Numeric(
default_value=int(0), required=False, is_type=int
)
}
)
}
)
}
A_quad_nested_unnamed_num = {
"config": UnnamedDict(
{
KeyPlaceholder("some_dict"): UnnamedDict(
{
KeyPlaceholder("another_dict"): UnnamedDict(
{
KeyPlaceholder("yet_another_dict"): UnnamedDict(
{
KeyPlaceholder("some_num"): Numeric(
default_value=int(0),
required=False,
is_type=int,
)
}
)
}
)
}
)
}
)
}
A_unnamed_quad_nested_inner_quad = {
"config": UnnamedDict(
{
KeyPlaceholder("my_key"): UnnamedDict(
{
KeyPlaceholder("next_my_key"): UnnamedDict(
{
KeyPlaceholder("deeper_key"): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder(
"next_my_other_key", ends_with="_val", multi=True
): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder(
"next_YAK", starts_with="val_", multi=True
): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder(
"next_YAK_sw", starts_with="x_", multi=True
): Numeric(
default_value=int(0), required=False, is_type=int
),
}
),
KeyPlaceholder(
"next_my_other_key", ends_with="_val", multi=True
): Numeric(default_value=int(0), required=False, is_type=int),
KeyPlaceholder("next_YAK", starts_with="val_", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder(
"next_YAK_sw", starts_with="x_", multi=True
): Numeric(default_value=int(0), required=False, is_type=int),
}
),
KeyPlaceholder("my_other_key", ends_with="_val", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("YAK", starts_with="val_", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
KeyPlaceholder("YAK_sw", starts_with="x_", multi=True): Numeric(
default_value=int(0), required=False, is_type=int
),
}
)
}
| [
"jackbburdick@gmail.com"
] | jackbburdick@gmail.com |
7d7faf9f09aff41374a60d36ae6d52a4afb8344f | e5ee1b485c0dc12f8e1121daeff9a4d78a40f9f9 | /travesty/__init__.py | cf9ae069a98ed1267b27bf435630b1995b526cbe | [] | no_license | CodeMan99/travesty | ac2e194f6c7a326891460ffae176ceb5b6f55f72 | 0357d1db9db6ff3b1bf249628c5522f7d6308f4e | refs/heads/master | 2021-01-17T23:04:01.341515 | 2016-09-24T03:19:39 | 2016-09-24T03:39:14 | 34,356,089 | 0 | 0 | null | 2015-04-21T22:47:40 | 2015-04-21T22:47:39 | Python | UTF-8 | Python | false | false | 1,979 | py | from .base import Leaf, unwrap, core_marker, GraphDispatcher, Marker
from .base import Wrapper, Traversable, to_typegraph, make_dispatcher
from .base import graphize, validate, dictify, undictify, associate_typegraph
from .base import clone, mutate, traverse, IGNORE, CHECK, CHECK_ALL
from .datetypes import DateTime, Date, Time, TimeDelta
from .invalid import Invalid, InvalidAggregator
from .list import List
from .mapping import SchemaMapping, StrMapping, UniMapping
from .object_marker import ObjectMarker
from .optional import Optional
from .passthrough import Passthrough
from .polymorph import Polymorph
from .schema_obj import SchemaObj
from .tuple import Tuple, NamedTuple
from .typed_leaf import TypedLeaf, Boolean, String, Bytes, Int, Number, Complex
from .validated import Validated
from .validators import Validator, InRange, OneOf, RegexMatch
from .validators import AsciiString, Email, NonEmptyString, StringOfLength
from .document import Document, DocSet
from . import document
from . import validators
__all__ = [
'AsciiString',
'Boolean',
'Bytes',
'Complex',
'CHECK',
'CHECK_ALL',
'Date',
'DateTime',
'DocSet',
'Document',
'Email',
'GraphDispatcher',
'IGNORE',
'InRange',
'Int',
'Invalid',
'InvalidAggregator',
'Leaf',
'List',
'Marker',
'NamedTuple',
'Number',
'NonEmptyString',
'StringOfLength',
'ObjectMarker',
'OneOf',
'Optional',
'Passthrough',
'Polymorph',
'RegexMatch',
'SchemaMapping',
'SchemaObj',
'String',
'Time',
'TimeDelta',
'Traversable',
'Tuple',
'TypedLeaf',
'StrMapping',
'UniMapping',
'Validated',
'Validator',
'Wrapper',
'associate_typegraph',
'core_marker',
'clone',
'dictify',
'document',
'make_dispatcher',
'mutate',
'to_typegraph',
'traverse',
'graphize',
'undictify',
'unwrap',
'validate',
'validators',
]
| [
"dplepage@gmail.com"
] | dplepage@gmail.com |
4eb772561fe04abfb679dd1063c0227fdab4288c | dffc8bb91840e85a102a3680a409bc03a2c2cda4 | /routers.py | 7be903ab25b6c413e1645d397179076474aa4632 | [] | no_license | quanted/qed_pisces | 0cca18bc6f1c97c4e3aa25cc468a57ec446639cc | 7a3a84a697ac193d440db3fc84d66abfed3e01d7 | refs/heads/master | 2021-05-01T23:52:43.294938 | 2017-10-16T17:04:04 | 2017-10-16T17:04:04 | 78,057,905 | 0 | 1 | null | 2017-10-16T17:04:06 | 2017-01-04T22:24:07 | Python | UTF-8 | Python | false | false | 1,180 | py | #Class for pisces_db routing
class PiscesRouter(object):
"""
A router to control all database operations on models in the
pisces application.
"""
def db_for_read(self, model, **hints):
"""
Attempts to read hem models go to pisces_db.
"""
if model._meta.app_label == 'pisces_app':
return 'pisces_db'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write auth models go to pisces_db.
"""
if model._meta.app_label == 'pisces_app':
return 'pisces_db'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the pisces app is involved.
"""
if obj1._meta.app_label == 'pisces_app' or \
obj2._meta.app_label == 'pisces_app':
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the hem app only appears in the 'pisces_db'
database.
"""
if app_label == 'pisces_app':
return db == 'pisces_db'
return None
| [
"kurt.wolfe@gmail.com"
] | kurt.wolfe@gmail.com |
98096f66b312e6ddd29fff6907327916490295c7 | e8294bb01d47a59745065bf50ca718d1321c7184 | /accounts/urls.py | e8227847fe408eb693d9683b54ea4ac58eb4699e | [
"MIT"
] | permissive | ko-naka-3010527/accup-python | 59164929ae2eadef5820f2f0b8c2edbd92f78c19 | e10ff96111be889573d830f4b710d9b8b19e1828 | refs/heads/master | 2023-03-22T04:46:52.998623 | 2021-03-18T05:15:16 | 2021-03-18T05:15:16 | 348,617,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | from django.urls import path
from . import views
app_name = 'accounts'
urlpatterns = [
path('', views.index, name='index'),
path('<str:fmt>/', views.index, name='fmt'),
path('html/login/', views.login, name='login'),
path('html/redirect/', views.login_redirect, name='loginredirect'),
path('html/logout/', views.logout, name='logout'),
path('<str:fmt>/update/<str:username>/form/', views.account_update_form, name='update_form'),
path('<str:fmt>/update/<str:username>/', views.account_update, name='update'),
path('<str:fmt>/update/<str:username>/success/', views.account_updatesuccess, name='update_success'),
path('<str:fmt>/create/form/', views.account_create_form, name='create_form'),
path('<str:fmt>/create/', views.account_create, name='create'),
path('<str:fmt>/create/success/', views.account_createsuccess, name='create_success'),
]
| [
"ymbk@k-off.org"
] | ymbk@k-off.org |
71fd4b0019784bfea40cddbd9b8a5bf51b2edb78 | 4dabe29fcc463c334530b5dac48c9009a803cdda | /Levinson-Durbin.py | 0bedd09c4e00cb1ba827a758a7296f9ae912137b | [] | no_license | Abdel-Malik/Projet_ASAR | 7567934426fe7cbcc328774f90f7bec152a00bd2 | 8d9b089c531845f94a293b371329f6a11e5abfa0 | refs/heads/master | 2021-01-18T22:22:31.649306 | 2017-04-24T09:30:02 | 2017-04-24T09:30:02 | 87,048,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 10 18:00:01 2017
@author: ledentp
"""
# Caclul du vecteur contenant les coefficients d'autocorrelation
# Yn est le vecteur des donnes
# R est un vecteur contenant les coefficitens d'autocorrelation
# Complexite O(n*n)
def coeffsAuto(Yn,p):
# r stocke les coefficients auto regressifs
n=len(Yn)
R=[]
for i in range(p+1):
# Calcul des coefficients d'auto-correlation r(i)
som=0
for j in range(n-i):
som+=Yn[j]*Yn[i+j]
R.append(som)
return R
# Application de l'algorithme de Levinson-Durbin pour le modele AR(p) a partir des coefficients d'autocorrelation
# Entrees : p est l'ordre du modele
# R est le vecteur des coefficients d'autocorellation
# Sortie : A est le vecteur contenant les coefficients du modele AR
# Complexite : O(n^2)
def RecLevinsonDurbin(R,p):
# Variables locales :
# E est un vecteur d'inconnues
# lambd est un nombre dans une combinaison lineaire
# Initialisation
Ak = zeros(p+1)
Ak[0] = 1;
Ek = R[0]
# Recurtion de Levinson-Durbin
for k in range(p):
# calcul de lambda
lambd = 0
for j in range(k+1):
lambd -= Ak[j]*R[k+1-j]
lambd /= Ek
# Mise a jour de Ak
for n in range(1+int((k+1)/2)):
temp = Ak[k+1-n]+lambd*Ak[n]
Ak[n]=Ak[n]+lambd*Ak[k+1-n]
Ak[k+1-n] = temp
# Mise a jour de Ek
Ek *= 1-lambd*lambd
return Ak
# Application de l'algorithme de Levinson-Durbin pour le modele AR(p) a partir des donnes
def LevinsonDurbin(Yn,p):
return RecLevinsonDurbin(coeffsAuto(Yn,p),p);
| [
"noreply@github.com"
] | noreply@github.com |
3f6b280bdd02b4576ac013f02d7caa5265b2339d | fc9bf6483a22dff83c6251e1425fa494f7a41563 | /main.py | b141641a5ef525c00516c617f3d7a0524ee8c5f6 | [] | no_license | nicholasosto/Trembus-Tech | dc390ba07850ef53c58ec6d211635b5adcb0ba67 | a9ab1ee054dacc07549b77ad16c414fe987686af | refs/heads/master | 2016-09-09T17:56:59.063235 | 2014-07-31T22:18:50 | 2014-07-31T22:18:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions = ['jinja2.ext.autoescape'],
autoescape = True)
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.write("Get request!")
template_values = {
'var1': 100,
'var2': 'Nick the Stick'
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
def post(self):
self.response.write("Post request!")
application = webapp2.WSGIApplication([
('/', MainPage),('/pizzle', MainPage)
], debug=True) | [
"nicholasosto@Nicholass-iMac.local"
] | nicholasosto@Nicholass-iMac.local |
acc4182cbee129776c793f9c60c02d618d834485 | 59e53439e205f7252bef8fe72d658211c9f22bf8 | /{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/migrations/__init__.py | 9f19b1712a5651649c6cf8441e26cbc63005d765 | [] | no_license | D-Bits/Django-Cookiecutter | 291a991620777e5569d484ac2bc2d4ba14f31b5f | 7c4a653eb61f386b6379422508cd868c27838a2d | refs/heads/master | 2023-04-29T20:02:53.231144 | 2019-12-05T01:31:56 | 2019-12-05T01:31:56 | 212,965,058 | 0 | 0 | null | 2023-04-21T20:40:40 | 2019-10-05T08:06:04 | Python | UTF-8 | Python | false | false | 24 | py | # Package for migrations | [
"Dana.Lockwood2@seattlecolleges.edu"
] | Dana.Lockwood2@seattlecolleges.edu |
4e36fc5436291385399f6f1b6bcdf595e9102b42 | bbc0c6116117ab835d8fe28d4aa1dedb2a64200e | /SplashScreen.py | 7f24c3bb07298516e86d55b3df98ca2e97bbf149 | [] | no_license | osakhawalkar/tartanhacks18 | 4aa2ae84d29592491e02bd675da8ffe08bebfb3d | cf9384b0e0ad9231dccc75e92a3e26feab2d4cf5 | refs/heads/master | 2021-05-01T19:52:58.597937 | 2018-02-10T20:26:15 | 2018-02-10T20:26:15 | 120,954,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,838 | py | from tkinter import *
def startScreen(canvas,data):
canvas.create_rectangle(0, 0, data.width, data.height,
fill='AntiqueWhite1', width=0)
(helloX , helloY) = data.hello
#canvas.create_rectangle(helloX - 60 , helloY - 20, helloX + 60, helloY + 20, width= 5, fill = "grey" )
canvas.create_text(data.hello, text = "Welcome to Shillow")
(newX, newY) = data.new
canvas.create_rectangle(newX - 80, newY - 20 , newX + 80 , newY + 20, width = 5, fill = "grey")
canvas.create_text(data.new, text = "Net Present Value Calculator")
(returningX, returningY) = data.returning
canvas.create_rectangle(returningX - 50, returningY - 20, returningX + 50, returningY +20, width = 5, fill = "grey")
canvas.create_text(data.returning, text = "Recommender")
def RECsystem(canvas, data):
canvas.create_rectangle(0, 0, data.width, data.height,
fill='AntiqueWhite1', width=0)
(helloX, helloY) = data.hello
# canvas.create_rectangle (helloX - 60 , helloY - 20, helloX + 60, helloY + 20, width= 5, fill = "grey" )
canvas.create_text(data.hello, text="Recommendation Mode", font = "120")
(newX, newY) = data.new
new = (newX - 80, newY)
canvas.create_rectangle(newX - 75, newY - 20, newX + 75, newY + 20, width = 2 , fill="white")
canvas.create_text(newX - 75, newY, text = data.name, anchor = 'w')
canvas.create_text(new, text="Zip Code", anchor = 'e')
(returningX, returningY) = data.returning
returning = (returningX - 80, newY + 40)
canvas.create_rectangle(returningX - 75, newY + 20, returningX + 75, newY + 60 , width= 2 , fill="white")
canvas.create_text(returningX - 75, newY + 40, text = data.password, anchor = 'w')
canvas.create_text(returning, text="Bedrooms", anchor ='e')
returning = (returningX - 80, newY + 80 )
canvas.create_rectangle(returningX - 75, newY + 60, returningX + 75, newY + 100, width= 2 , fill="white")
canvas.create_text(returningX - 75, newY + 80, text = data.assets, anchor = 'w')
canvas.create_text(returning, text="Bathrooms", anchor ='e')
returning = (returningX - 80, newY + 120 )
canvas.create_rectangle(returningX - 75, newY + 100, returningX + 75, newY + 140, width= 2 , fill="white")
canvas.create_text(returningX - 75, newY + 120, text = data.risk, anchor = 'w')
canvas.create_text(returning, text="Square Feet", anchor ='e')
# Label(canvas, text="Username").grid(row=1, column=1, sticky=W) # entry fields
# Label(canvas, text="Password").grid(row=2, column=1, sticky=W)
#
# data.name = Entry(canvas, textvariable=data.name, justify=RIGHT).grid(row=1, column=2) # entry field data with positioning
#
# data.password = Entry(canvas, textvariable=data.password, justify=RIGHT).grid(row=2, column=2)
# btCalculate = Button(window, text="Calculate", command=self.Calculate).grid(row=6, column=2,
# sticky=E) # calculate button
def NPV(canvas,data): #ask for name, assets, risk, frequency, password
canvas.create_rectangle(0, 0, data.width, data.height,
fill='AntiqueWhite1', width=0)
(helloX, helloY) = data.hello
# canvas.create_rectangle(helloX - 60 , helloY - 20, helloX + 60, helloY + 20, width= 5, fill = "grey" )
canvas.create_text(data.width/2,0, text="Net Present Value", font = "120", anchor = "n")
(newX, newY) = data.new
new = (newX - 80, newY)
canvas.create_rectangle(newX - 75, newY - 20, newX + 75, newY + 20, width = 2 , fill="white")
canvas.create_text(newX - 75, newY, text = data.name, anchor = 'w')
canvas.create_text(new, text="Zipcode", anchor = 'e')
(returningX, returningY) = data.returning
returning = (returningX - 80, newY + 80 )
canvas.create_rectangle(returningX - 75, newY + 60, returningX + 75, newY + 100, width= 2 , fill="white")
canvas.create_text(returningX - 75, newY + 80, text = data.assets, anchor = 'w')
canvas.create_text(returning, text="Years", anchor ='e')
returning = (returningX - 80, newY + 120 )
canvas.create_rectangle(returningX - 75, newY + 100, returningX + 75, newY + 140, width= 2 , fill="white")
canvas.create_text(returningX - 75, newY + 120, text = data.risk, anchor = 'w')
canvas.create_text(returning, text="Years In Place", anchor ='e')
returning = (returningX - 80, newY + 160 )
canvas.create_rectangle(returningX - 75, newY + 140, returningX + 75, newY + 180, width= 2 , fill="white")
canvas.create_text(returningX - 75, newY + 160, text = data.frequency, anchor = 'w')
canvas.create_text(returning, text="Yearly Income", anchor ='e')
| [
"noreply@github.com"
] | noreply@github.com |
b626e2a94b9b88f526ed11ae5ece091ffd01df8a | 7e99ded7f8f300651f99e4b8b295d6f4bccb9a85 | /venv/Scripts/django-admin.py | 47f2b0ed24a9536a97ee93d864b73b1fb1bb7597 | [] | no_license | Godwinjthomas/Full-stack- | 075b604c590fab1ca15660e1fc8b30e1308edc6e | eb51942913ae2134584c3dd42e2002c0a9ef5879 | refs/heads/master | 2023-07-02T17:58:23.468193 | 2021-08-04T12:39:25 | 2021-08-04T12:39:25 | 392,565,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | #!c:\users\godwin\pycharmprojects\textoperations\venv\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"87801046+Godwinjthomas@users.noreply.github.com"
] | 87801046+Godwinjthomas@users.noreply.github.com |
720fc250757a422d1392d63e837b0e5d97ab5faa | 114d78320fb2bcd99c9dc802a3ed5bab47c537e6 | /ksp_scraper/tests/user_utilities_test.py | a78de591f5e943dd497f3a80ac7faf51078f5014 | [] | no_license | johnnie172/scraper-web-app | 1d36ffe9e476c0590c023e4907bbf0d44e0e7539 | 2b6c72c4d34a5a30c485c90f879aab30325af25d | refs/heads/master | 2023-03-28T14:47:37.148053 | 2021-02-28T10:17:50 | 2021-02-28T10:17:50 | 336,233,542 | 0 | 0 | null | 2021-02-28T10:17:51 | 2021-02-05T10:07:30 | Python | UTF-8 | Python | false | false | 429 | py | import unittest
from unittest import mock
import UserUtilities
import consts
class TestUserUtilities(unittest.TestCase):
# @mock.patch('builtins.input', side_effect=["walla@walla.com", "123456", "gmail@gmail.com", "123456"])
# def test_user_login(self, side_effect):
# self.assertEqual(users_utilities.user_login()[0], 1)
def test_user_signup(self):
pass
def user_log_out(self):
pass
| [
"jmpstillsart@gmail.com"
] | jmpstillsart@gmail.com |
6cb5a5e75a9c8324286d70e2893b91e427710002 | 60cbdf1f9771159f872e632017fa736800784297 | /Codewars/Find-the-odd-int.py | c1567718b97f70ca035fd5cb5332e8b15ddf1595 | [] | no_license | AG-Systems/programming-problems | 6ea8c109f04c4d22db6e63fe7b665894c786242a | 39b2d3546d62b48388788e36316224e15a52d656 | refs/heads/master | 2023-04-16T16:59:20.595993 | 2023-04-05T01:25:23 | 2023-04-05T01:25:23 | 77,095,208 | 10 | 3 | null | 2019-10-14T16:16:18 | 2016-12-22T00:03:14 | Python | UTF-8 | Python | false | false | 355 | py | def find_it(seq):
hashtable = {}
for x in seq:
if x in hashtable:
hashtable[x] += 1
else:
hashtable[x] = 1
for key,val in hashtable.items():
if val % 2 != 0:
return key
"""
def find_it(seq):
for i in seq:
if seq.count(i)%2!=0:
return i
CLEVER SOLUTION
"""
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.