blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8b2d298b6d6dd1f9af96d7ca7332a979fe1e6fb2 | a6b7dbdf66de11f6e04bac5b9da5cb2d71b8e111 | /summary/main.py | 20667469ee285696734faf0e0f74dc2f014641f8 | [] | no_license | gabrielwong159/tf | 9b9e144682daab3901c5cde703d39d5ee1a68b72 | bd506341034ecb47ea50a0b38040c1765003deb3 | refs/heads/master | 2021-07-03T23:50:22.391246 | 2019-03-15T10:29:25 | 2019-03-15T10:29:25 | 143,256,201 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py | import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
from model import MNIST
from os.path import join
from tqdm import trange
summaries_dir = 'summaries'
learning_rate = 1e-4
batch_size = 50
num_iterations = 5_000
mnist = input_data.read_data_sets('data/', one_hot=False, reshape=False)
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def make_summaries(model):
for layer in ['conv1', 'conv2', 'fc1', 'fc2']:
for var_type in ['weights', 'biases']:
with tf.name_scope(layer), tf.name_scope(var_type):
var = '/'.join([layer, var_type])
variable_summaries(slim.get_variables_by_name(var)[0])
tf.summary.histogram('keep_prob', model.keep_prob)
tf.summary.histogram('predictions', model.logits)
tf.summary.scalar('loss', model.loss)
tf.summary.scalar('accuracy', model.accuracy)
merged_summaries = tf.summary.merge_all()
return merged_summaries
def main():
tf.reset_default_graph()
model = MNIST()
optimizer = tf.train.AdamOptimizer(learning_rate)
train_step = optimizer.minimize(model.loss)
with tf.Session() as sess:
merged_summaries = make_summaries(model)
train_writer = tf.summary.FileWriter(join(summaries_dir, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(join(summaries_dir, 'test'))
sess.run(tf.global_variables_initializer())
for i in trange(num_iterations):
if i % 10 == 0:
summary = sess.run(merged_summaries, feed_dict={
model.x: mnist.test.images,
model.y: mnist.test.labels,
model.keep_prob: 1.0,
})
test_writer.add_summary(summary, i)
else:
x, y = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_step, merged_summaries], feed_dict={
model.x: x,
model.y: y,
model.keep_prob: 0.5,
})
train_writer.add_summary(summary, i)
if __name__ == '__main__':
main()
| [
"gabrielwong159@gmail.com"
] | gabrielwong159@gmail.com |
e0a7b3ae1374ca144770517141e8db9a5a0dd8f4 | 7c990eb27315940b28bd4cb5bca5dcab038987ef | /cmake-build-debug/catkin_generated/generate_cached_setup.py | 6b9c78be84ddf0c8b1f45c400131fbf7d4aa7132 | [] | no_license | zhangtygs/ros-yolov5 | 70e4fb0f8a0066a23ddc6538629a9f801ba9bea5 | a108806682dcefb51cddceb30c49c8e15d04a5c3 | refs/heads/master | 2023-03-28T05:47:25.918738 | 2021-03-27T07:37:38 | 2021-03-27T07:37:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/ou/workspace/ros_ws/ironworks_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/ou/workspace/ros_ws/dev_ws/src/ros_yolo/cmake-build-debug/devel/env.sh')
output_filename = '/home/ou/workspace/ros_ws/dev_ws/src/ros_yolo/cmake-build-debug/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"706545330@qq.com"
] | 706545330@qq.com |
fefe183ee81e64abb008a8dfded629c1fe7cc6c7 | e3714fb9ce66e45ab2b3a64bc4918fb44ab9dce5 | /compress_image.py | e1d80a4209fdbaafea936b3d553d6053401923ff | [] | no_license | nitr-himanshu/python-modules | 14b985b50cf6e7e75580615ae8250ee5fd1c7f12 | 24c1e1a576fa7969f999e74ea7955ca3464bd753 | refs/heads/master | 2020-09-19T09:36:15.464758 | 2020-02-12T04:39:18 | 2020-02-12T04:39:18 | 224,218,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from PIL import Image
def compress(img_path, new_height, inplace=True, new_img_path=""):
'''
:param
img_path
new_height
inplace(optional)
new_img_path (required when inplace=false)
:return
new file path
'''
img = Image.open(img_path)
hpercent = (new_height / float(img.size[1]))
wsize = int((float(img.size[0]) * float(hpercent)))
img = img.resize((wsize, new_height), Image.ANTIALIAS)
if(inplace):
new_img_path = img_path
img.save(new_img_path)
return new_img_path
| [
"thehimanshukeshri@gmail.com"
] | thehimanshukeshri@gmail.com |
ec316d01099ea1bc5877dcde7de1cce9a20f69a5 | 5cafda777e72d0d637597567c6ea773be071475c | /misc/echo_server.py | 8d1c972c643c0bba71e71fd711a1326645857580 | [] | no_license | skyris/web_tech_stepik | 8a73c6231d689981531cb3c970ae348ac7ceebb2 | 7906fd8891a1b48a0aa716c7e514677f2cac1480 | refs/heads/master | 2020-07-23T10:35:48.793013 | 2016-11-20T23:20:24 | 2016-11-20T23:20:24 | 73,810,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | #! /usr/bin/env python
import socket
def receive(sock, msglen):
msg = ""
while len(msg) < msglen:
chunk = sock.recv(msglen - len(msg))
print(repr(chunk))
if chunk == "":
print("chao")
raise RuntimeError("broken")
if chunk in ["close", "close\n", "close\r\n"]:
print(repr(chunk))
sock.close()
break
msg = msg + chunk
print(repr(msg))
return msg
def send(sock, msg):
total_sent = 0
while total_sent < len(msg):
sent = sock.send(msg[total_sent:])
print(sent)
if sent == 0:
print("chao cacao")
raise RuntimeError("broken")
total_sent = total_sent + sent
def server():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("0.0.0.0", 2223))
server_socket.listen(1)
while True:
client_socket, remote_address = server_socket.accept()
while True:
data = receive(client_socket, 1024)
if data == "close":
client_socket.close()
break
send(client_socket, data.upper())
def server2():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("0.0.0.0", 2222))
server_socket.listen(1)
while True:
client_socket, remote_address = server_socket.accept()
while True:
data = client_socket.recv(1024)
if data in ["close", "close\n", "close\r\n"]:
client_socket.close()
break
client_socket.send(data)
server2()
# server()
| [
"4klimov@gmail.com"
] | 4klimov@gmail.com |
751255d530780137e0285d8df5447aef573973a3 | 85469c44c38853752fe4d68abf57f6163dc1bd14 | /application/admin/admin_routes.py | c060569b272856032adf00c00ead1bf88ebe657a | [] | no_license | betfund/betfund-webapp | a1cab76b1ce510ab16722c0a08c017ed551729a9 | 3d4784fb7696867d55383a5ea3ee869fdcff8776 | refs/heads/master | 2021-05-18T05:39:49.530796 | 2020-03-29T21:58:42 | 2020-03-29T21:58:42 | 251,140,323 | 0 | 0 | null | 2020-04-10T06:17:49 | 2020-03-29T21:39:59 | Python | UTF-8 | Python | false | false | 761 | py | from flask import Blueprint, jsonify
from application.models import User
from flask_login import login_required
admin_bp = Blueprint('admin_bp', __name__, template_folder='templates')
@admin_bp.route('/admin', methods=['GET', 'POST'])
@login_required
def admin():
"""
Admin end point.
TODO :: This DEFINITELY needs to be updated. We'll
want to build out an actual admin end point with
`flask_admin`, most likely. For now, this just
returns all of the user data from the database.
"""
users = User.query.all()
users_json = [{
'id': u.id,
'first': u.first_name,
'last': u.last_name,
'email': u.email_address,
'pass': u.password
} for u in users]
return jsonify(users_json)
| [
"mitchbregs@gmail.com"
] | mitchbregs@gmail.com |
1b3509386baedb66e3612538112b0031faddc94e | d0c8ca75d4d87d6698e6f96d8520d8a3104b7d88 | /MT/src/onmt/Models.py | 834decfc92b7b7c8e45ac813b968c975f15811f6 | [] | no_license | pingfansong/Controllable-Invariance | 44d8ad4a7a7aa204157d66387f62107b624e86a2 | 373ac88548f93fe18d0a8f77a4faa444e0b1ba63 | refs/heads/master | 2021-01-25T13:47:36.602040 | 2017-12-04T07:46:39 | 2017-12-04T07:46:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,752 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
import onmt.modules
import torch.nn.utils.rnn as rnn_utils
import math
def check_decreasing(lengths):
lens, order = torch.sort(lengths, 0, True)
if torch.ne(lens, lengths).sum() == 0:
return None
else:
_, rev_order = torch.sort(order)
return lens, Variable(order), Variable(rev_order)
class Encoder(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.num_directions = 2 if opt.brnn else 1
assert opt.rnn_size % self.num_directions == 0
self.hidden_size = opt.rnn_size // self.num_directions
#self.hidden_size = opt.rnn_size
inputSize = opt.word_vec_size
self.opt = opt
super(Encoder, self).__init__()
if opt.rb_init_token:
self.rb_lut = None
self.word_lut = nn.Embedding(dicts.size() + opt.num_rb_bin,
opt.word_vec_size,
padding_idx=onmt.Constants.PAD)
else:
self.word_lut = nn.Embedding(dicts.size(),
opt.word_vec_size,
padding_idx=onmt.Constants.PAD)
if opt.num_rb_bin > 0 and opt.use_rb_emb and opt.use_src_rb_emb:
self.rb_lut = nn.Embedding(opt.num_rb_bin, opt.rb_vec_size)
inputSize += opt.rb_vec_size
else:
self.rb_lut = None
self.rnn = nn.LSTM(inputSize, self.hidden_size,
num_layers=opt.layers,
dropout=opt.dropout,
bidirectional=opt.brnn)
# self.rnn.bias_ih_l0.data.div_(2)
# self.rnn.bias_hh_l0.data.copy_(self.rnn.bias_ih_l0.data)
self.dict_size = dicts.size()
if opt.pre_word_vecs_enc is not None:
pretrained = torch.load(opt.pre_word_vecs_enc)
self.word_lut.weight.copy_(pretrained)
def forward(self, input, input_rb, hidden=None):
batch_size = input.size(0) # [batch x sourceL] batch first for multi-gpu compatibility
if self.opt.rb_init_token:
input = torch.cat([input_rb.unsqueeze(1) + self.dict_size, input], 1)
emb = self.word_lut(input).transpose(0, 1) # [sourceL x batch x emb_size]
if self.rb_lut is not None:
rb_emb = self.rb_lut(input_rb) #[batch x emb_size]
seq_len = emb.size(0)
emb = torch.cat([emb, rb_emb.unsqueeze(0).expand(seq_len, *rb_emb.size())], 2)
# if hidden is None:
# h_size = (self.layers * self.num_directions, batch_size, self.hidden_size)
# h_0 = Variable(emb.data.new(*h_size).zero_(), requires_grad=False)
# c_0 = Variable(emb.data.new(*h_size).zero_(), requires_grad=False)
# hidden = (h_0, c_0)
# outputs, hidden_t = self.rnn(emb, hidden)
lengths = input.data.ne(onmt.Constants.PAD).sum(1).squeeze(1)
check_res = check_decreasing(lengths)
if check_res is None:
packed_emb = rnn_utils.pack_padded_sequence(emb, lengths.tolist())
packed_out, hidden_t = self.rnn(packed_emb)
outputs, srclens = rnn_utils.pad_packed_sequence(packed_out)
else:
lens, order, rev_order = check_res
packed_emb = rnn_utils.pack_padded_sequence(emb.index_select(1, order), lens.tolist())
packed_out, hidden_t = self.rnn(packed_emb)
outputs, srclens = rnn_utils.pad_packed_sequence(packed_out)
outputs = outputs.index_select(1, rev_order)
hidden_t = (hidden_t[0].index_select(1, rev_order),
hidden_t[1].index_select(1, rev_order))
return hidden_t, outputs
class Discriminator(nn.Module):
def __init__(self, opt):
super(Discriminator, self).__init__()
self.adv_att = None
self.opt = opt
self.disc_type = opt.disc_type
if opt.no_adv:
self.discriminator = None
else:
if opt.disc_type == "DNN":
init_in_size = opt.rnn_size
elif opt.disc_type == "RNN":
init_in_size = opt.disc_size
self.num_directions = 2 if opt.disc_bi_dir else 1
self.rnn = nn.LSTM(opt.rnn_size, opt.disc_size // self.num_directions,
num_layers=1,
dropout=opt.dropout,
bidirectional=opt.disc_bi_dir)
elif opt.disc_type == "CNN":
assert False
else:
assert False
if opt.adv_att:
self.adv_att = onmt.modules.SelfAttention(init_in_size)
modules = []
for i in range(opt.disc_layer):
if i == 0:
in_size = init_in_size
else:
in_size = opt.disc_size
modules += [nn.Linear(in_size, opt.disc_size)]
if opt.batch_norm:
modules += [nn.BatchNorm1d(opt.disc_size)]
if opt.non_linear == "tanh":
modules += [nn.Tanh()]
elif opt.non_linear == "relu":
modules += [nn.ReLU()]
else:
assert False
modules += [nn.Dropout(opt.adv_dropout_prob)]
if opt.label_smooth:
modules += [nn.Linear(opt.disc_size, 1)]
modules += [nn.Sigmoid()]
else:
modules += [nn.Linear(opt.disc_size, opt.num_rb_bin)]
if opt.disc_obj_reverse:
modules += [nn.Softmax()]
else:
modules += [nn.LogSoftmax()]
self.dnn = nn.Sequential(*modules)
def forward(self, input, context, grad_scale):
adv_norm = []
if self.opt.no_adv:
disc_out = None
adv_norm.append(0)
else:
adv_context_variable = torch.mul(context, 1)
if not self.opt.separate_update:
adv_context_variable.register_hook(adv_wrapper(adv_norm, grad_scale))
else:
adv_norm.append(0)
if self.disc_type == "DNN":
adv_context_variable = adv_context_variable.t().contiguous()
padMask = input.eq(onmt.Constants.PAD)
if self.opt.rb_init_token:
rb_token_mask = Variable(torch.zeros(padMask.size(0), 1).byte())
if self.opt.cuda:
rb_token_mask = rb_token_mask.cuda()
padMask = torch.cat([rb_token_mask, padMask], 1)
if self.adv_att:
self.adv_att.applyMask(padMask.data) #let it figure out itself. Backprop may have problem if not
averaged_context = self.adv_att(adv_context_variable)
else:
padMask = 1. - padMask.float() #batch * sourceL
masked_context = adv_context_variable * padMask.unsqueeze(2).expand(padMask.size(0), padMask.size(1), context.size(2))
sent_len = torch.sum(padMask, 1).squeeze(1)
averaged_context = torch.div(torch.sum(masked_context, 1).squeeze(1), sent_len.unsqueeze(1).expand(sent_len.size(0), context.size(2)))
disc_out = self.dnn(averaged_context)
elif self.disc_type == "RNN":
lengths = input.data.ne(onmt.Constants.PAD).sum(1).squeeze(1)
check_res = check_decreasing(lengths)
if check_res is None:
packed_emb = rnn_utils.pack_padded_sequence(adv_context_variable, lengths.tolist())
packed_out, hidden_t = self.rnn(packed_emb)
if self.adv_att:
assert False
outputs, srclens = rnn_utils.pad_packed_sequence(packed_out)
else:
hidden_t = (_fix_enc_hidden(hidden_t[0], self.num_directions)[-1],
_fix_enc_hidden(hidden_t[1], self.num_directions)[-1]) #The first one is h, the other one is c
#print hidden_t[0].size(), hidden_t[1].size()
#hidden_t = torch.cat(hidden_t, 1)
#print hidden_t.size()
disc_out = self.dnn(hidden_t[0])
else:
assert False
else:
assert False
return disc_out, adv_norm
class StackedLSTM(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
for i in range(num_layers):
layer = nn.LSTMCell(input_size, rnn_size)
self.add_module('layer_%d' % i, layer)
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i in range(self.num_layers):
layer = getattr(self, 'layer_%d' % i)
h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input, (h_1, c_1)
class Decoder(nn.Module):
def __init__(self, opt, dicts, attn_type='global'):
self.layers = opt.layers
self.input_feed = opt.input_feed
input_size = opt.word_vec_size
if self.input_feed:
input_size += opt.rnn_size
self.opt = opt
self.dict_size = dicts.size()
super(Decoder, self).__init__()
if opt.rb_init_tgt:
self.word_lut = nn.Embedding(dicts.size() + opt.num_rb_bin,
opt.word_vec_size,
padding_idx=onmt.Constants.PAD)
self.rb_lut = None
else:
self.word_lut = nn.Embedding(dicts.size(),
opt.word_vec_size,
padding_idx=onmt.Constants.PAD)
if opt.num_rb_bin > 0 and opt.use_rb_emb and opt.use_tgt_rb_emb:
self.rb_lut = nn.Embedding(opt.num_rb_bin, opt.rb_vec_size)
input_size += opt.rb_vec_size
else:
self.rb_lut = None
if self.input_feed:
self.rnn = StackedLSTM(opt.layers, input_size, opt.rnn_size, opt.dropout)
else:
self.rnn = nn.LSTM(input_size, opt.rnn_size, num_layers=opt.layers, dropout=opt.dropout)
if attn_type.lower() == 'global':
self.attn = onmt.modules.GlobalAttention(opt.rnn_size)
elif attn_type.lower() == 'cosine':
self.attn = onmt.modules.CosineAttention(opt.rnn_size)
elif attn_type.lower() == 'mlp':
self.attn = onmt.modules.MLPAttention(opt.rnn_size)
self.dropout = nn.Dropout(opt.dropout)
self.context_dropout = nn.Dropout(opt.decoder_context_dropout)
# self.rnn.bias_ih.data.div_(2)
# self.rnn.bias_hh.data.copy_(self.rnn.bias_ih.data)
self.hidden_size = opt.rnn_size
if opt.pre_word_vecs_enc is not None:
pretrained = torch.load(opt.pre_word_vecs_dec)
self.word_lut.weight.copy_(pretrained)
def forward(self, input, input_rb, hidden, context, init_output):
emb = self.word_lut(input).transpose(0, 1)
context = self.context_dropout(context)
if self.rb_lut is not None:
#print input_rb
rb_emb = self.rb_lut(input_rb) #[batch x emb_size]
#print rb_emb
seq_len = emb.size(0)
emb = torch.cat([emb, rb_emb.unsqueeze(0).expand(seq_len, *rb_emb.size())], 2)
batch_size = input.size(0)
h_size = (batch_size, self.hidden_size)
output = Variable(emb.data.new(*h_size).zero_(), requires_grad=False)
# n.b. you can increase performance if you compute W_ih * x for all
# iterations in parallel, but that's only possible if
# self.input_feed=False
outputs = []
attns = []
output = init_output
if self.input_feed:
for i, emb_t in enumerate(emb.chunk(emb.size(0), dim=0)):
emb_t = emb_t.squeeze(0)
if self.input_feed:
emb_t = torch.cat([emb_t, output], 1)
output, h = self.rnn(emb_t, hidden)
output, attn = self.attn(output, context.t())
output = self.dropout(output)
outputs += [output]
attns.append(attn)
hidden = h
else:
rnn_out, h = self.rnn(emb, hidden)
for i, rnn_out_t in enumerate(rnn_out.split(split_size=1, dim=0)):
output, attn = self.attn(rnn_out_t.squeeze(0), context.t())
output = self.dropout(output)
outputs += [output]
attns.append(attn)
outputs = torch.stack(outputs)
attns = torch.stack(attns)
return outputs.transpose(0, 1), h, attns.transpose(0, 1) #it becomes batch * targetL * embedding
def _fix_enc_hidden(h, num_directions):
# the encoder hidden is (layers*directions) x batch x dim
# we need to convert it to layers x batch x (directions*dim)
if num_directions == 2:
return h.view(h.size(0) // 2, 2, h.size(1), h.size(2)) \
.transpose(1, 2).contiguous() \
.view(h.size(0) // 2, h.size(1), h.size(2) * 2)
else:
return h
class NMTModel(nn.Module):
def __init__(self, encoder, decoder, generator, discriminator, opt):
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.generator = generator
self.generate = False
self.discriminator = discriminator
self.opt = opt
self.adv_grad_norm = 0
self.dec_grad_norm = 0
def get_seq2seq_parameters(self):
for comp in [self.encoder, self.decoder, self.generator]:
for p in comp.parameters():
yield p
def get_disc_parameters(self):
for comp in [self.discriminator]:
if comp is None:
continue
for p in comp.parameters():
yield p
def get_encoder_parameters(self):
for comp in [self.encoder]:
for p in comp.parameters():
yield p
def set_generate(self, enabled):
self.generate = enabled
def make_init_decoder_output(self, context):
batch_size = context.size(1)
h_size = (batch_size, self.decoder.hidden_size)
return Variable(context.data.new(*h_size).zero_(), requires_grad=False)
def forward(self, input, return_attn=False, grad_scale=None):
src = input[0]
tgt = input[1][:, :-1] # exclude last target from inputs
enc_hidden, context = self.encoder(src, input[2])
init_output = self.make_init_decoder_output(context)
#how does it works
enc_hidden = (_fix_enc_hidden(enc_hidden[0], self.encoder.num_directions),
_fix_enc_hidden(enc_hidden[1], self.encoder.num_directions))
dec_context_variable = torch.mul(context, 1)
dec_norm = []
dec_context_variable.register_hook(dec_wrapper(dec_norm))
if self.opt.no_adv:
disc_out = None
adv_norm = [0]
else:
disc_out, adv_norm = self.discriminator(input[0], context, grad_scale)
if self.opt.rb_init_tgt:
tgt = torch.cat([input[3].unsqueeze(1) + self.decoder.dict_size, tgt[:, 1:]], 1)
out, dec_hidden, attn = self.decoder(tgt, input[3], enc_hidden, dec_context_variable, init_output)
if self.generate:
out = self.generator(out)
if return_attn:
return out, attn, disc_out, dec_norm, adv_norm
else:
return out, disc_out, dec_norm, adv_norm
def dec_wrapper(norm):
def hook_func(grad):
norm.append(math.pow(grad.norm().data[0], 2))
pass
return hook_func
def adv_wrapper(norm, grad_scale):
def hook_func(grad):
new_grad = -grad * grad_scale
#print new_grad
norm.append(math.pow(new_grad.norm().data[0], 2))
return new_grad
pass
return hook_func
torch.backends.cudnn.enabled = False
| [
"cheezer94@gmail.com"
] | cheezer94@gmail.com |
f089183b785e1121300cfb0257d7c0b43a4df73c | 8b3551600c4e12a12d604fd08408814e80b0db9e | /src/accounts/forms.py | 702f9e2a97931f69399b8f142d595dc31973d631 | [] | no_license | GaLaxY1101/CrashCourse | abe73879e4d6623321ce4d800ba452455a718605 | a528fe4ebb3ed5d59602a3d226dd9e04f503dc20 | refs/heads/main | 2023-04-16T22:30:50.380524 | 2021-05-04T10:28:38 | 2021-05-04T10:28:38 | 361,800,054 | 0 | 0 | null | 2021-05-04T10:09:33 | 2021-04-26T15:27:56 | Python | UTF-8 | Python | false | false | 1,047 | py | from django.forms import ModelForm
from .models import Order, Customer
#For user register
from django.contrib.auth.forms import UserCreationForm
from django import forms
#For login
from django.contrib.auth import authenticate
class AccountAuthenticationForm(ModelForm):
password = forms.CharField(label='Password')
class Meta:
model = Customer
fields = ('email','password')
def clean(self):
if self.is_valid(): #self = form
email = self.cleaned_data['email']
password = self.cleaned_data['password']
if not authenticate(email=email, password=password):
raise forms.ValidationError('Invalid login')
class OrderForm(ModelForm):
class Meta:
model = Order # модель, к которой мы делаем форму
fields = '__all__' # или ['field1','field2'] название поолей нужно брнать из модели
class CreateUserForm(UserCreationForm):
email = forms.EmailField(max_length=60,)
class Meta:
model = Customer
fields = ('email','username','password1', 'password1')
| [
"korniykhik3@gmail.com"
] | korniykhik3@gmail.com |
ca0d1b7730390c96c8aa0842a2430a0e01ad1a18 | 2e80e43fbbaadca6bba401214a2b02f48a06f4a3 | /multiappproject/multiappproject/settings.py | 6ebc21e084302b74cf515862e5e9df03df352a11 | [] | no_license | anilkumarreddyn/DjangoProjects | 751a95718079d42e5a03857f20cd812aadc01ba3 | c10c34308762a2bfd05b56f6e0838055e06a601a | refs/heads/master | 2021-06-24T05:21:10.670792 | 2019-06-25T05:53:19 | 2019-06-25T05:53:19 | 193,641,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | """
Django settings for multiappproject project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qd(ibl2bn#=yjoxniv@fj@5&x-u52#tn8h@z5u+wr5hz8swkga'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'firstApp',
'secondApp',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'multiappproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'multiappproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"anilkumarreddyn@outlook.com"
] | anilkumarreddyn@outlook.com |
73821d45b2e81c6381e427248500318d56b21d72 | 5f4e13201d4c5b7edc8dbbda289380682a187bec | /dltc/coffeehouse_dltc/__init__.py | e05637a846b663221e3778d66f0804fd27c8bfc0 | [] | no_license | intellivoid/CoffeeHousePy | 92f4fb344de757837c3d3da05cb5513e90408039 | 57c453625239f28da88b88ddd0ae5f1ecdd4de3c | refs/heads/master | 2023-02-23T14:32:01.606630 | 2021-01-28T02:57:10 | 2021-01-28T02:57:10 | 324,419,067 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | from . import main
from .main import *
from . import config
from .config import *
from . import utils
from .utils import *
from . import base
from .base import *
from . import chmodel
from .chmodel import *
from . import nn
from .nn import *
__all__ = ['main', 'base', 'chmodel', 'nn', 'DLTC'] | [
"netkas@intellivoid.net"
] | netkas@intellivoid.net |
775271a58abd0433fdefa7dccf9f7d67305d1eac | 5a8222a754ba01ce9a9c317bf98970a4db033d67 | /slackings/wsgi.py | 12b40e31063e835b00ae8118fe1b3e5927020e5d | [] | no_license | dLook/slackings | 35e342be401b91b170adc35594d35aa0a73b902b | 63943c66626e39e40a89d0fb82aeec3239edc7e3 | refs/heads/master | 2020-03-09T03:54:56.727371 | 2018-04-07T23:16:03 | 2018-04-07T23:16:03 | 128,575,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """
WSGI config for slackings project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "slackings.settings")
application = get_wsgi_application()
| [
"dlook@Dinos-MacBook-Pro.local"
] | dlook@Dinos-MacBook-Pro.local |
84fd947eeb59b2e53824d13d01685f9a5049699f | d9cf44ed3e734ce27d7d6d8ca0d95654a27d76d6 | /src/annotation/GenericReadTest.py | 142cf2f6f3025f5b48116f0c223c5964347254c5 | [] | no_license | skill-lang/pythonTest | 87d273fc018302fc18e207b4744a559d98ace2f0 | 2891d6bee891d9885701c9ce1afbb62767b8b455 | refs/heads/master | 2020-07-02T07:47:47.377793 | 2019-08-09T12:20:55 | 2019-08-09T12:20:55 | 201,461,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,307 | py |
import unittest
from tempfile import TemporaryFile
from python.src.annotation.api import *
from python.src.common.CommonTest import CommonTest
class GenericReadTest(unittest.TestCase, CommonTest):
"""
Tests the file reading capabilities.
"""
def read(self, s):
return SkillFile.open("../../../../" + s, Mode.Read, Mode.ReadOnly)
def test_writeGeneric(self):
path = self.tmpFile("write.generic")
sf = SkillFile.open(path.name)
self.reflectiveInit(sf)
def test_writeGenericChecked(self):
path = self.tmpFile("write.generic.checked")
# create a name -> type map
types = dict()
sf = SkillFile.open(path.name)
self.reflectiveInit(sf)
for t in sf.allTypes():
types[t.name()] = t
# read file and check skill IDs
sf2 = SkillFile.open(path.name, Mode.Read)
for t in sf2.allTypes():
os = types.get(t.name()).__iter__()
for o in t:
self.assertTrue("to few instances in read stat", os.hasNext())
self.assertEquals(o.getSkillID(), os.next().getSkillID())
def test_annotation_read_accept_age_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/age.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_age16_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/age16.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_ageUnrestricted_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/ageUnrestricted.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_aircraft_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/aircraft.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_annotationNull_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/annotationNull.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_annotationString_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/annotationString.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_annotationTest_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/annotationTest.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_coloredNodes_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/coloredNodes.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_container_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/container.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_crossNodes_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/crossNodes.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_date_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/date.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_emptyBlocks_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/emptyBlocks.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_emptyFile_sf(self):
sf = self.read("src/test/resources/genbinary/[[all]]/accept/emptyFile.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_fourColoredNodes_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/fourColoredNodes.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_localBasePoolOffset_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/localBasePoolOffset.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_noFieldRegressionTest_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/noFieldRegressionTest.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_nodeFirstBlockOnly_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/nodeFirstBlockOnly.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_partial_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/partial.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_restrictionsAll_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/restrictionsAll.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_trivialType_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/trivialType.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_twoNodeBlocks_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/twoNodeBlocks.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_twoTypes_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/twoTypes.sf")
self.assertIsNotNone(sf)
def test_annotation_read_accept_unicode_reference_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/unicode-reference.sf")
self.assertIsNotNone(sf)
| [
"feldentm@informatik.uni-stuttgart.de"
] | feldentm@informatik.uni-stuttgart.de |
1bce8567d8f1d14ff4bc68b9b00f0fa42b87eeaa | 8fcc4f687e7e451157d7f54689b0d176a1431e40 | /freightforwarding/python/api_query.py | 7b2abeed12e642b30344d0812ef14cb079c077eb | [] | no_license | shipamax/samples | b650b56f1d5582082260874eee1af69e6a16fa26 | c924503ec3c4dc08f1cec19cea0580c994e21a3c | refs/heads/master | 2022-07-13T05:29:43.563066 | 2022-06-23T08:01:15 | 2022-06-23T08:01:15 | 132,764,935 | 2 | 1 | null | 2022-06-23T08:01:05 | 2018-05-09T13:58:40 | C# | UTF-8 | Python | false | false | 1,252 | py | import requests
import json
import argparse
import uuid
from util import login, logout
DEFAULT_HOST = 'https://developer.shipamax-api.com'
def query(_host, custom_id, _token):
""" Query parsing result """
url = '{}{}'.format(_host, '/api/v1/DocumentContainers/query')
custom_id_json = '["{}"]'.format(custom_id)
headers = {
'Content-Type': 'application/json'
}
params = {
'customIds': custom_id_json,
'access_token': _token
}
response = requests.get(url, params=params, headers=headers)
if (response.status_code != 200):
raise Exception('Query failed. Code {}'.format(response.status_code))
print(response.content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--username', type=str, required=True)
parser.add_argument('--password', type=str, required=True)
parser.add_argument('--host', type=str)
parser.add_argument('--custom_id', type=str, required=True)
args = parser.parse_args()
if args.host:
host = args.host
else:
host = DEFAULT_HOST
_token = login(host, args.username, args.password)
query(host, args.custom_id, _token)
logout(host, _token)
if __name__ == '__main__':
main()
| [
"fabianblaicher@gmail.com"
] | fabianblaicher@gmail.com |
adcaed4b3126aaf255a3fd151f53b4cd40aa336d | 8006cd33697ad21689f54891233c111082d5b3df | /components/unusable_model/inference.py | f4357cfbebef3672b4d6fccd4bcf6c9afbdf5e04 | [] | no_license | mytnitskaya/only_unusable_model | b25d41673b6ce52baec2e5c8df53dbfa9f15dbf5 | 656cc3de4b469525bda895dd9f27fb74d66e0480 | refs/heads/master | 2023-05-07T21:16:49.300771 | 2021-05-28T16:43:06 | 2021-05-28T16:43:06 | 371,759,672 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | import tensorflow as tf
def main():
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
import os, sys
import argparse
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if ROOT_DIR not in sys.path:
sys.path.append(ROOT_DIR)
from components.common import data_preprocessor_lib
from components.unusable_model import my_model as Model_class
parser = argparse.ArgumentParser(description='Path for evaluation data')
parser.add_argument('directory_path_in')
parser.add_argument('-p', dest='path_to_model_file', default='save/best_model/unusable_model.hdf5')
args = parser.parse_args()
directory_path_in = args.directory_path_in
path_to_model_file = args.path_to_model_file
preprocessor = data_preprocessor_lib.DataPreprocessor()
data = preprocessor.load_video_in_np(directory_path_in)
model = Model_class.MyModel()
model.load(path_to_model_file)
preds = model.inference(data)
print('Probability of belonging to the class usable: {0:.2f}%'.format(preds[0]*100))
if __name__ == '__main__':
main() | [
"mariya.mytnitskaya@rubius.com"
] | mariya.mytnitskaya@rubius.com |
733336c7a0df9dd8e420d8a5d326083e093bc156 | 4e39dbcd39c746dc661478d601d5e9ae0893b084 | /TensorFlow2/Segmentation/UNet_Medical/utils/cmd_util.py | 6866333185e2c63257c7fcffb178d2aa62a2b951 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | gpauloski/DeepLearningExamples | 2ff368cf0414ad8451a85465f023a94d1a5753f9 | 81178d2aa6e6eaa88c40727276601b52739ba408 | refs/heads/master | 2023-02-03T13:33:41.822429 | 2020-12-14T16:52:31 | 2020-12-14T16:52:31 | 254,721,527 | 2 | 0 | null | 2020-04-10T19:42:36 | 2020-04-10T19:42:35 | null | UTF-8 | Python | false | false | 4,634 | py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line argument parsing"""
import argparse
from munch import Munch
PARSER = argparse.ArgumentParser(description="UNet-medical")
PARSER.add_argument('--exec_mode',
choices=['train', 'train_and_predict', 'predict', 'evaluate', 'train_and_evaluate'],
type=str,
default='train_and_evaluate',
help="""Execution mode of running the model""")
PARSER.add_argument('--model_dir',
type=str,
default='/results',
help="""Output directory for information related to the model""")
PARSER.add_argument('--data_dir',
type=str,
required=True,
help="""Input directory containing the dataset for training the model""")
PARSER.add_argument('--log_dir',
type=str,
default=None,
help="""Output directory for training logs""")
PARSER.add_argument('--batch_size',
type=int,
default=1,
help="""Size of each minibatch per GPU""")
PARSER.add_argument('--learning_rate',
type=float,
default=0.0001,
help="""Learning rate coefficient for AdamOptimizer""")
PARSER.add_argument('--crossvalidation_idx',
type=int,
default=None,
help="""Chosen fold for cross-validation. Use None to disable cross-validation""")
PARSER.add_argument('--max_steps',
type=int,
default=1000,
help="""Maximum number of steps (batches) used for training""")
PARSER.add_argument('--weight_decay',
type=float,
default=0.0005,
help="""Weight decay coefficient""")
PARSER.add_argument('--log_every',
type=int,
default=100,
help="""Log performance every n steps""")
PARSER.add_argument('--warmup_steps',
type=int,
default=200,
help="""Number of warmup steps""")
PARSER.add_argument('--seed',
type=int,
default=0,
help="""Random seed""")
PARSER.add_argument('--augment', dest='augment', action='store_true',
help="""Perform data augmentation during training""")
PARSER.add_argument('--no-augment', dest='augment', action='store_false')
PARSER.set_defaults(augment=False)
PARSER.add_argument('--benchmark', dest='benchmark', action='store_true',
help="""Collect performance metrics during training""")
PARSER.add_argument('--no-benchmark', dest='benchmark', action='store_false')
PARSER.set_defaults(augment=False)
PARSER.add_argument('--use_amp', dest='use_amp', action='store_true',
help="""Train using TF-AMP""")
PARSER.set_defaults(use_amp=False)
PARSER.add_argument('--use_xla', dest='use_xla', action='store_true',
help="""Train using XLA""")
PARSER.set_defaults(use_amp=False)
PARSER.add_argument('--use_trt', dest='use_trt', action='store_true',
help="""Use TF-TRT""")
PARSER.set_defaults(use_trt=False)
def _cmd_params(flags):
return Munch({
'exec_mode': flags.exec_mode,
'model_dir': flags.model_dir,
'data_dir': flags.data_dir,
'log_dir': flags.log_dir,
'batch_size': flags.batch_size,
'learning_rate': flags.learning_rate,
'crossvalidation_idx': flags.crossvalidation_idx,
'max_steps': flags.max_steps,
'weight_decay': flags.weight_decay,
'log_every': flags.log_every,
'warmup_steps': flags.warmup_steps,
'augment': flags.augment,
'benchmark': flags.benchmark,
'seed': flags.seed,
'use_amp': flags.use_amp,
'use_trt': flags.use_trt,
'use_xla': flags.use_xla,
})
| [
"pstrzelczyk@nvidia.com"
] | pstrzelczyk@nvidia.com |
989c322db653f0c43afcb8cb9d00fbe13c961e15 | 65156c80527fb8543d5b17ac6e548f8d0d6d391c | /github_wh/urls.py | b4b2d0aa31da3f40072ceb6a7a112bea3cf29b4f | [] | no_license | ArtemAAA/github-wh | 3abee596f5b932ed39008d19e041b0fe6880b973 | dd2690792384bd44c04b5f9acc9cda09f36c1242 | refs/heads/master | 2022-11-07T04:36:15.803505 | 2019-07-12T11:28:01 | 2019-07-12T11:28:01 | 196,535,953 | 0 | 0 | null | 2022-11-04T19:39:50 | 2019-07-12T08:00:47 | Python | UTF-8 | Python | false | false | 217 | py | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('apps.api.urls')),
path('', include('apps.webhooks.urls')),
]
| [
"artemkozlovets@gmail.com"
] | artemkozlovets@gmail.com |
e3132af7574a4af4aa2f40ac4284b1a802d240a9 | f7b7412991b101d0d7ea0d2321047dceee5b8370 | /options/option_data_tool.py | 66024cd86c99fe074ba70337da6f0de71bd995b6 | [] | no_license | spelee/Options | 5d9860b1878d3d1b79daf266c60d1a147926f950 | 3449bc295edb14c1bccae44e7693628eaa7dacbf | refs/heads/master | 2021-04-12T04:30:15.961163 | 2018-05-02T20:57:15 | 2018-05-02T20:57:15 | 125,902,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,968 | py | import blpapi
class BLPRefSession():
"""XXX need to later decide how to create access to bbrg data
Do we want single session that is on? Possible multiple session instantiations?
redo for use as context manager?
"""
def __init__(self):
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost("localhost")
sessionOptions.setServerPort(8194)
# Create a Session
self.session = blpapi.Session(sessionOptions)
def start(self):
# Start a Session
if not self.session.start():
print("Failed to start session.")
return
if not self.session.openService("//blp/refdata"):
print("Failed to open //blp/refdata")
return
self.refDataService = self.session.getService("//blp/refdata")
def get_price(self, ticker):
"""Pass an iterable of bloomberg tickers
"""
request = self.refDataService.createRequest("ReferenceDataRequest")
# append securities to request
for t in ticker:
print("Ticker:", t)
request.append("securities", t)
# append fields to request
request.append("fields", "PX_LAST")
#request.append("fields", "DS002")
print("Sending Request:", request)
self.session.sendRequest(request)
# Process received events
while(True):
# We provide timeout to give the chance to Ctrl+C handling:
ev = self.session.nextEvent(500)
for msg in ev:
print("Message...")
print("--- correlationIds")
print(msg.correlationIds())
print("--- asElement")
print(msg.asElement())
print("--- element name")
print(msg.asElement().name())
print("--- numElements")
print(msg.numElements())
print("--- messageType")
print(msg.messageType())
print("---")
print(msg)
# Response completly received, so we could exit
if ev.eventType() == blpapi.Event.RESPONSE:
print("---2 getElement")
elist = msg.getElement("securityData")
for i,e in enumerate(elist.values()):
sube = e.getElement("fieldData").getElement("PX_LAST")
print("{}-{}".format(i, sube))
break
# Stop the session
#self.session.stop()
def main():
mysession = BLPRefSession()
print("here1")
mysession.start()
print("here2")
mysession.start()
print("here3")
print(mysession.get_price(["UUP 05/04/18 C24 Equity"]))
print(mysession.get_price(["AMZN Equity", "MU Equity"]))
if __name__ == "__main__":
print("Testing...")
try:
main()
except KeyboardInterrupt:
print("Ctrl+C pressed. Stopping...") | [
"spelee@gmail.com"
] | spelee@gmail.com |
d91092559747ba009e3bd6de574cea1337cf31a8 | f3502ff306980f9f9fe3807795881ce740bf98a9 | /venv/bin/easy_install-2.7 | e32914450a347e58ba2c8124daf0a8dc562ce695 | [] | no_license | umroh/FirstWeb | df66207e7e10e3be9f67d294253de6e71d43a8a5 | 087e35c8f1517faf14afeeb1e1228a2bebc05300 | refs/heads/master | 2021-01-10T02:02:29.307254 | 2016-03-18T11:14:25 | 2016-03-18T11:14:25 | 52,944,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | 7 | #!/home/umroh/PycharmProjects/prototypeAsisten/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'distribute==0.6.24','console_scripts','easy_install-2.7'
__requires__ = 'distribute==0.6.24'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('distribute==0.6.24', 'console_scripts', 'easy_install-2.7')()
)
| [
"umroh.machfudza@ui.ac.id"
] | umroh.machfudza@ui.ac.id |
52bd5b80c303f7ec03c6a84634f9654784e1fe1c | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/modelName_def.py | ca5790b97b3bf22a70902abdc87628726645d7a4 | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | import pyeccodes.accessors as _
def load(h):
def wrapped(h):
originatingCentre = h.get_l('originatingCentre')
if originatingCentre == 242:
return 'cosmo-romania'
if originatingCentre == 220:
return 'cosmo-poland'
if originatingCentre == 96:
return 'cosmo-greece'
generatingProcessIdentifier = h.get_l('generatingProcessIdentifier')
if originatingCentre == 76 and generatingProcessIdentifier == 235:
return 'cosmo_ru-eps'
if originatingCentre == 76 and generatingProcessIdentifier == 135:
return 'cosmo_ru'
if originatingCentre == 200 and generatingProcessIdentifier == 131:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 46:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 42:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 38:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 34:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 32:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 31:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 148:
return 'cosmo-i2'
if originatingCentre == 200 and generatingProcessIdentifier == 144:
return 'cosmo-i2'
if originatingCentre == 200 and generatingProcessIdentifier == 139:
return 'cosmo-i2'
if originatingCentre == 200 and generatingProcessIdentifier == 36:
return 'cosmo-i2'
subCentre = h.get_l('subCentre')
if subCentre == 250:
return 'cosmo'
if originatingCentre == 250:
return 'cosmo'
return wrapped
| [
"baudouin.raoult@ecmwf.int"
] | baudouin.raoult@ecmwf.int |
06648aa873d47bf1a3429114bfc2a4d5585aa1c1 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_20914.py | 3c20fbf20829f9b981a226ccaa46c39051a43f32 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | # py.test Tracebacks: Highlight my code, fold frames of framework
--tb=short
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
e9698f717dafb868d4d636d00100df973ead155f | 23c8bbd92b7d95055d86b024ff5b00379a7eacea | /core/__init__.py | deb3a1e972a1be849572c12a2eaa172469d3599c | [
"MIT"
] | permissive | bhok/kundouzhishou | b2f867ed545748ad2ab16f53cfeeb3b3684777b4 | 0c9d41e89e51e2217fdbee30cc3494f4ab643010 | refs/heads/master | 2020-12-07T15:20:40.374020 | 2016-07-30T11:29:42 | 2016-07-30T11:29:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | import conf
import core
from yunbi_wrapper import yunbi_wrapper
from poloniex_wrapper import poloniex_wrapper
from bittrex_wrapper import bittrex_wrapper
from exchange_pair import exchange_pair
from exchange.yunbi import yunbi
from exchange.poloniex import poloniex
from exchange.bittrex import bittrex
| [
"kundouzhishou@gmail.com"
] | kundouzhishou@gmail.com |
11d50c1286a304fed91b138875020fb4bd18920c | c81b633c452616636120daba9ef3fa9a2b2640b3 | /Class10/example_5_tdd.py | 7b14d71ab2eb2ffcea775fc59ec4134f947d258d | [] | no_license | formigaVie/SNWD_Works_201711 | ba3dca2ef4cf74166b8a5c7c804ea01ccc866876 | 16ec6c169a5828cadc515b7612dbfd8638ba7224 | refs/heads/master | 2021-09-07T03:41:36.674833 | 2018-02-16T18:58:47 | 2018-02-16T18:58:47 | 110,582,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#Funktion schreiben, die zu jeder Zahl die Quadratzahl zurückgibt
def square_number(x):
return x**2
def check_square_number():
assert square_number(9) == 81
assert square_number(-3) == 9
if __name__ == '__main__':
check_square_number()
print "Passed test completed" | [
"manfredg30@gmail.com"
] | manfredg30@gmail.com |
f294ff625480eee76eba21c36fc1128186724cee | 3c5fc55c8a19f13448fe83061131b761e586e650 | /wishImage/wish_image.py | 4daa23a2acd455b282fe045cc62db0bf4b183ddf | [] | no_license | psstores/E-business | bc2c8e0fd97444688532634b5570965a9e7f7937 | c5890c4b10fd19b73abcc1f2a4747b1c594e5b4a | refs/heads/master | 2021-01-10T04:45:56.105340 | 2016-03-05T08:53:45 | 2016-03-05T08:53:45 | 47,673,252 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | #encoding=utf-8
import re
import os
from os import getcwd
import urllib
from bs4 import BeautifulSoup
import time
# url='https://www.wish.com/search/scooter#cid=55ff8599e768aa10f8c45af7'
f=open('temp.html','r')
html=f.read()
soup=BeautifulSoup(html,'html.parser',from_encoding='utf-8')
g_data=soup.find_all('div',{'class':re.compile("picture-wrapper")})
# tupianxiazai
f_ex=r'.jpg'
f_init='0.jpg'
i=0
t_path=r'\images'
##for item in g_data:
## link=item.find('img').get('src')
## image_url=link.replace('-small.jpg','.jpg').replace('-tiny','.jpg')
## print image_url
if os.path.exists(getcwd()+t_path):
print u'此目录已经存在>>>>>>>>>>>>>>>>.'
pass
else:
print u'创建目录路径'
os.mkdir(getcwd()+t_path)
mypath=getcwd()+t_path
os.chdir(mypath)
for item in g_data:
link=item.find('img').get('src')
image_url=link.replace('small.jpg','').replace('tiny','')
print image_url
urllib.urlretrieve(image_url,f_init)
i=i+1
print u'正在下载第'+str(i)+u'张图片'
## time.sleep()
print 'time sleep for 3 sec'
f_init=str(i)+f_ex
os.chdir(os.path.pardir)
print 'Download finished'
| [
"2669578421@qq.com"
] | 2669578421@qq.com |
e2df814ae7681abe960261c59ca05a1857aa0349 | c98a98486663b2fd4654e9d0b80b57776f6b4db8 | /python/src/main/python/Q069.py | 7976fb172cd24039cfdc926afdd1d9fb92414a00 | [] | no_license | renkeji/leetcode | 6ae31d63cd4819ebd2a69be0665d04907fc8cc3c | 1d40d357ff20a0560656727a07bf3e1c00dc4cb8 | refs/heads/master | 2020-05-22T04:24:48.263249 | 2017-05-21T22:26:05 | 2017-05-21T22:26:05 | 41,816,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | from src.main.python.Solution import Solution
# Implement int sqrt(int x).
#
# Compute and return the square root of x.
class Q069(Solution):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
if x < 2:
return x
epsilon = 0.000001
left, right = 0, x
while True:
ans = (left + right) / 2.0
sqr = ans ** 2
if x-epsilon <= sqr <= x+epsilon:
return int(ans)
elif sqr > x+epsilon:
right = ans
else:
left = ans
| [
"kren@apple.com"
] | kren@apple.com |
303bdc24b9ea78a05b85cec43ce43fdde458378f | 3eb4d64a8bb0bc240a2ef189724f4d51b5275eac | /heltour/tournament/migrations/0056_auto_20160810_0204.py | e08071bf6059ce3ea2af6bcb2b399bc27522b344 | [
"MIT"
] | permissive | brucemubayiwa/heltour | c01cc88be7f86dce8246f619d7aa2da37e0e0ac2 | fa4e9b06343acaf6a8a99337860e1ad433e68f6b | refs/heads/master | 2021-01-23T19:59:04.099215 | 2017-09-06T03:34:31 | 2017-09-06T03:34:31 | 102,840,526 | 1 | 0 | null | 2017-09-08T08:53:30 | 2017-09-08T08:53:30 | null | UTF-8 | Python | false | false | 850 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-10 02:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tournament', '0055_auto_20160809_2228'),
]
operations = [
migrations.AddField(
model_name='league',
name='competitor_type',
field=models.CharField(choices=[('team', 'Team'), ('individual', 'Individual')], default='team', max_length=32),
preserve_default=False,
),
migrations.AddField(
model_name='league',
name='pairing_type',
field=models.CharField(choices=[('swiss-dutch', 'Swiss Tournament: Dutch Algorithm')], default='swiss-dutch', max_length=32),
preserve_default=False,
),
]
| [
"lakin@structuredabstraction.com"
] | lakin@structuredabstraction.com |
467db476e9ed6f07be6d963d9636a39636293a3c | 172c7d494865f85ff598c0a7cff4628fcb6477e9 | /milk2.py | 2d695b892024e725b68a3f070c200f68ba1f9dee | [
"Apache-2.0"
] | permissive | dazer-chen/usaco | f9ba5265c959a5b6d0a826249fd80fd8a198223d | 82673a2a235e7393b1f8f610925c1063149176dd | refs/heads/master | 2020-08-06T00:23:09.121745 | 2019-10-04T07:21:14 | 2019-10-04T07:21:14 | 212,769,605 | 1 | 0 | Apache-2.0 | 2019-10-04T08:30:26 | 2019-10-04T08:30:24 | null | UTF-8 | Python | false | false | 1,269 | py | """
ID: tony_hu1
PROG: milk2
LANG: PYTHON3
"""
a = []
m=[]
total = []
with open('milk2.in') as filename:
for line in filename:
a.append(line.rstrip())
num_cows = int(a[0])
for i in range(num_cows):
b = a[i+1].split(' ')
m.append(int(b[0]))
m.append(int(b[1]))
total.append(m)
m = []
time = []
def is_sorted(record):
for i in range(len(record)-1):
b1 = record[i+1][0]
b2 = record[i][1]
if b1 <= b2:
return False
return True
total.sort()
while is_sorted(total) == False:
time= [[0,0]]
for i in range(len(total)):
a = total[i][0]
b =time[len(time)-1][0]
c =time[len(time)-1][1]
judgement = (a >= b )and (a <= c)
if judgement:
period = [time[len(time)-1][0],max(total[i][1],time[len(time)-1][1])]
time[len(time)-1] = period
else:
time.append(total[i])
if time[0]==[0,0]:
del time[0]
total = time
no_cows = 0
for i in range(len(total)-1):
x = total[i+1][0] - total[i][1]
no_cows = max(no_cows,x)
cows = 0
for i in range(len(total)):
x = total[i][1] - total[i][0]
cows = max(cows,x)
fout = open ('milk2.out', 'w')
a = str(cows) + ' ' + str(no_cows)+'\n'
fout.write(a) | [
"tony@tonys-MacBook-Air.local"
] | tony@tonys-MacBook-Air.local |
3bd95a3b443aa1ebb62757343cb66710a7f88821 | fac77900129f3e11b2b72cd65a4f9e088aaa8bbc | /PythonExercicios/ex112/utilidadesCeV/moeda/__init__.py | 49b54b5b8ecd3111fc70113cce1a95716c8da5d3 | [
"MIT"
] | permissive | Lucas-ns/Python-3-Curso-Em-Video | ff01cc35b383b7c548e4aa1e92e6ed60bad25079 | f6d338fffd7a4606d34fab09634eea0fe4b3dfb3 | refs/heads/main | 2023-03-18T09:02:49.685841 | 2021-03-16T20:45:14 | 2021-03-16T20:45:14 | 348,480,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | def aumentar(preço=0, taxa=0, formato=False):
"""
-> Calcula o aumento de um determinado preço,
retornando o resultado com ou sem formatação.
:param preço: o preço que se quer reajustar.
:param taxa: qual é a porcentagem do aumento.
:param formato: quer a saída formatada ou não?
:return: o valor reajustado, com ou sem formato.
"""
res = preço + (preço * (taxa/100))
return res if formato is False else moeda(res)
def diminuir(preço=0, taxa=0, formato=False):
res = preço - (preço * (taxa / 100))
return res if formato is False else moeda(res)
def dobro(preço=0, formato=False):
res = preço * 2
return res if formato is False else moeda(res)
def metade(preço = 0, formato=False):
res = preço / 2
return res if formato is False else moeda(res)
def moeda(preço=0, moeda='R$'):
return f'{moeda}{preço:>.2f}'.replace('.', ',')
def resumo(preço=0, aumento=10, redução=5):
print('-' * 30)
print(f'{"RESUMO DO VALOR":^30}')
print('-' * 30)
print(f'Preço analisado: \t{moeda(preço)}')
print(f'Dobro do preço: \t{dobro(preço, True)}')
print(f'Metade do preço: \t{metade(preço, True)}')
print(f'{aumento}% de aumento: \t{aumentar(preço, aumento, True)}')
print(f'{redução}% de redução: \t{diminuir(preço, redução, True)}')
print('-' * 30)
| [
"nascimentolucas786@gmail.com"
] | nascimentolucas786@gmail.com |
4bdb1d41c5ecb5a22515bf4eef03d89fc2731ce1 | 40f89ee8843b10928d1d2147c7368d498fe931aa | /List Exercise 5.py | d6a9e3b8c5f3e7a120c11e1a693163e28b5ab40b | [] | no_license | the-llama-codes16/the-llama-codes-Python | 56e78b084f7d01f3c60fe94570453ad6d87c0cc6 | a0a48d108adae76fd58509d245214a10973794cf | refs/heads/main | 2023-05-30T08:56:10.807303 | 2021-06-02T14:33:22 | 2021-06-02T14:33:22 | 367,573,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | fname=input('Enter file name:')
fhand=open(fname)
count=0
for line in fhand:
line=line.rstrip()
if line.startswith('From '):
count=count+1
wordlista=line.split()
print(wordlista[1])
else:
continue
print('There were',count,'lines in the file with From as the first word.')
| [
"noreply@github.com"
] | noreply@github.com |
0b708efca3d4ee6d405876434f35114d143c360c | 37b7c4e552e72951d83ab4bce60edd6a12e3039d | /scraper.py | 9900c568f0ced099c6d71206a95d8871d9638bae | [] | no_license | mjrice04/mass_audobon_scraper | 5f8222ef2ed1601cf9f1cb1da4fb3ce2ee2a945b | 3b0daa68cd0c74f068848d26176af46106bb634e | refs/heads/master | 2020-09-16T18:50:50.454309 | 2020-01-06T04:44:46 | 2020-01-06T04:44:46 | 223,857,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,186 | py | """
Requirements Take 1
Takes in list of mass audobon locations
Able to scrape the events coming up in the month
Able to tune parameters of search
Able to email this list to my email
What I want to get from this project
Practice Web Scraping
Learning to set up a tool to send emails to me (could be a good reusable service
Practice working with click module
Setting up Server to run jobs on a schedule
"""
import requests
from lxml import html
from typing import List
import sys
class AudobonScraper:
"""
Web Scraper for Mass Audobon events page
"""
def __init__(self, url: str):
self.base_url = 'https://www.massaudubon.org/program-catalog/results'
self.url = url
def clean_raw_xpath(self, result_list: List[str], url=False):
"""
Cleans raw xpath returned from website
:param result_list: list of xpath results
:param url: Boolean to determine if a paremeterized url is included
:return:
"""
clean_data = [' '.join(''.join(raw_item).split()) for raw_item in result_list]
if url:
clean_data = [f"{self.base_url}{url}" for url in clean_data]
return clean_data
def parser(self):
"""
Parses raw html with xpath to get desired variables. Gets raw xpath parsed html and cleans it. Returns
clean events
:return:
"""
full_url = f"{self.base_url}{self.url}"
page = requests.get(full_url)
doc = html.fromstring(page.content)
xpath_event_date = ('//div[@class="short-results-program-listings-divs"]'
'/div[@class="next-meeting-date-divs"]/text()')
xpath_event_time = ('//div[@class="short-results-program-listings-divs"]'
'/div[@class="next-meeting-time-divs"]/text()')
xpath_event_group = ('//div[@class="audience-search-form-divs"]'
'/div[@class="audience-indicator-divs"]/text()')
xpath_event_link = ('//div[@class="short-results-program-listings-divs"]'
'/div[@class="attribute-title program-title-and-location-divs"]//a/@href')
xpath_event_name = ('//div[@class="short-results-program-listings-divs"]'
'/div[@class="attribute-title program-title-and-location-divs"]//a/text()')
xpath_event_location = '//div[@class="location-official-name-divs"]/text()'
raw_list = [xpath_event_date, xpath_event_time, xpath_event_group, xpath_event_name,
xpath_event_link, xpath_event_location]
clean_events = []
for item in raw_list:
raw_xpath = doc.xpath(item)
if item == xpath_event_link:
event = self.clean_raw_xpath(raw_xpath, url=True)
else:
event = self.clean_raw_xpath(raw_xpath)
clean_events.append(event)
return clean_events
def data_handler(self, clean_events):
"""
Handles and cleans list of events returned from scraper
:param clean_events: list of clean events to process
:return:
"""
len_list = [len(x) for x in clean_events]
all_events = all(x == len_list[0] for x in len_list)
if not all_events:
# If for whatever reason events are unequal (the scraper needs to be altered)
sys.exit("Scraper failed. Please look into parser script")
events = []
event_count = len(clean_events[0])
for i in range(event_count):
event_item = []
# 6 fields I care about
event_item.append(clean_events[0][i])
event_item.append(clean_events[1][i])
event_item.append(clean_events[2][i])
event_item.append(clean_events[3][i])
event_item.append(clean_events[4][i])
event_item.append(clean_events[5][i])
events.append(event_item)
return events
def run(self):
"""
Runs the parser and returns clean events
:return:
"""
clean_list = self.parser()
events = self.data_handler(clean_list)
return events
| [
"xricexx77@gmail.com"
] | xricexx77@gmail.com |
fbfc207ef43a7797ae51a3f77a2080848f479024 | d94be223f733daa58ce03f6f2dd701c55355f044 | /docs/data/new_east_st_louis-3.py | 7270fd573042b368984fc13a16e5220c497a576b | [] | no_license | emirdemirel/JAAH | 7bb4f9c2a434e1df34d99596dd294b7c96836bfe | 8c065c3b043ad7ac95241c242bb468fe4c731ec7 | refs/heads/master | 2023-02-10T14:10:52.755206 | 2021-01-07T23:11:02 | 2021-01-07T23:11:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | import siteUtils
siteUtils.show5HexagramsForFileList(['../../../annotations/new_east_st_louis.json']) | [
"seva@ringrows.ru"
] | seva@ringrows.ru |
01aaab4806daf83624fce5a5d71e77ac84e3cb95 | 714983fc24c6befe80d426dd94134d09ad2cbdfb | /env/lib/python3.6/site-packages/RestAuth/Services/migrations/0004_delete_old_m2m.py | 31494a3ab34e3a19585de405f5ad81cb7bb1f511 | [] | no_license | sachinlokesh05/login-registration-forgotpassword-and-resetpassword-using-django-rest-framework- | 486354ffb3a397c79afc6cbb290ab1cd637f50ac | 60769f6b4965836b2220878cfa2e1bc403d8f8a3 | refs/heads/master | 2023-01-28T22:19:13.483527 | 2020-01-28T14:07:53 | 2020-01-28T14:07:53 | 233,223,694 | 3 | 0 | null | 2023-01-07T22:10:06 | 2020-01-11T11:49:44 | Python | UTF-8 | Python | false | false | 4,682 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field hosts on 'Service'
db.delete_table('Services_service_hosts')
def backwards(self, orm):
# Adding M2M table for field hosts on 'Service'
db.create_table('Services_service_hosts', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('service', models.ForeignKey(orm['Services.service'], null=False)),
('serviceaddress', models.ForeignKey(orm['Services.serviceaddress'], null=False))
))
db.create_unique('Services_service_hosts', ['service_id', 'serviceaddress_id'])
models = {
'Services.service': {
'Meta': {'object_name': 'Service', '_ormbases': ['auth.User']},
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'Services.serviceaddress': {
'Meta': {'object_name': 'ServiceAddress'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '39'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'services': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'hosts'", 'symmetrical': 'False', 'to': "orm['Services.Service']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['Services'] | [
"sachin.beee.15@acharya.ac.in"
] | sachin.beee.15@acharya.ac.in |
4a259a11584bf85810a4da9b13274f6414e5308f | 28483b16e58f04219b9e25640ffbc36360641a0a | /charissa_johnson/belt_reviewer/apps/belt_reviewer/migrations/0002_auto_20170725_1614.py | 0f20ff5c85fe5fbc24b75a96cfb513e70f23956b | [] | no_license | charissayj/python_july_2017 | c69755a4d068440c2799b2b4a37ad15a4fb94a80 | 3939f823646b90b51f5c2d6f64699357728c3ab4 | refs/heads/master | 2020-12-02T06:18:14.106345 | 2017-07-27T20:20:47 | 2017-07-27T20:20:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-25 16:14
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('belt_reviewer', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='book',
old_name='users',
new_name='user',
),
]
| [
"charissa.y.johnson@gmail.com"
] | charissa.y.johnson@gmail.com |
bb5ebaf33900bfcc44fdc19ac42207993daeaa5f | 551d993b15f7e54635cc11d7ed3ee45a2e9aacc6 | /AAE/Tensorflow_implementation/unsupervised/regularized_z/model.py | df4e3fcf6ad90ce669025df91eb33dfbcfbcb10a | [
"MIT"
] | permissive | hendrikTpl/GAN_models | 6185a3c112a8b45205bdd4c556164b6153fbec19 | 8234c7f04be39d20fe09f81511b591deab9152a9 | refs/heads/master | 2021-10-25T16:52:13.239290 | 2019-04-05T15:28:06 | 2019-04-05T15:28:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,844 | py | from component_without_bn import *
class Object:
pass
def build_graph(is_test=False):
# Inputs
images = tf.placeholder(dtype=tf.float32, shape=[None, config.ndim_x])
z_sampler = tf.placeholder(dtype=tf.float32, shape=[None, config.ndim_z])
learning_rate = tf.placeholder(dtype=tf.float32, shape=[])
# Graph
encoder = encoder_x_z
decoder = decoder_z_x
discriminator = discriminator_z
with tf.variable_scope('encoder'):
z_representation = encoder(images)
with tf.variable_scope('decoder'):
reconstruction = decoder(z_representation)
if is_test:
test_handle = Object()
test_handle.x = images
test_handle.z_r = z_representation
test_handle.x_r = reconstruction
return test_handle
probability_fake_sample = discriminator(z_representation)
probability_true_sample = discriminator(z_sampler, reuse=True)
# Loss function
# classification
# 0 -> true sample
# 1 -> generated sample
class_true = tf.ones(shape=(config.batch_size, config.ndim_z / 2), dtype=tf.int32)
class_fake = tf.zeros(shape=(config.batch_size, config.ndim_z / 2), dtype=tf.int32)
loss_discriminator = opt.softmax_cross_entropy(probability_fake_sample, probability_true_sample, class_fake,
class_true)
loss_encoder = opt.softmax_cross_entropy(probability_fake_sample, probability_true_sample,\
class_fake, class_true, for_generator=True)
loss_resconstruction = opt.euclidean_distance(images, reconstruction)
# Variables Collection
variables_encoder = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')
variables_decoder = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='decoder')
variables_discriminator = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
# Optimizer
counter_encoder = tf.Variable(trainable=False, initial_value=0, dtype=tf.float32)
counter_resconstruction = tf.Variable(trainable=False, initial_value=0, dtype=tf.float32)
counter_discriminator = tf.Variable(trainable=False, initial_value=0, dtype=tf.float32)
opt_resconstruction = opt.optimize(loss_resconstruction, variables_decoder + variables_encoder,
optimizer=tf.train.AdamOptimizer if config.optimizer_is_adam is True else tf.train.RMSPropOptimizer,
learning_rate=learning_rate, global_step=counter_resconstruction
)
opt_discriminator = opt.optimize(config.scale_ratio * loss_discriminator, variables_discriminator,
optimizer=tf.train.AdamOptimizer if config.optimizer_is_adam is True else tf.train.RMSPropOptimizer,
learning_rate=learning_rate, global_step=counter_discriminator
)
opt_encoder = opt.optimize(config.scale_ratio * loss_encoder, variables_encoder,
optimizer=tf.train.AdamOptimizer if config.optimizer_is_adam is True else tf.train.RMSPropOptimizer,
learning_rate=learning_rate, global_step=counter_encoder
)
# output what we want
graph_handle = Object()
graph_handle.x = images
graph_handle.z = z_sampler
graph_handle.x_ = reconstruction
graph_handle.z_r = z_representation
graph_handle.opt_r = opt_resconstruction
graph_handle.opt_d = opt_discriminator
graph_handle.opt_e = opt_encoder
graph_handle.loss_d = loss_discriminator
graph_handle.loss_e = loss_encoder
graph_handle.loss_r = loss_resconstruction
graph_handle.lr = learning_rate
return graph_handle
| [
"1019636836@qq.com"
] | 1019636836@qq.com |
4023b90f8758b34748d937ccd2ac854ae94b604a | a83a08f7192f09876b893392faf7f15fb529cd25 | /app/models.py | ba8d9d75e78a663ddae91e2afacf9e1688b2b344 | [] | no_license | jsnyder10/45 | 80d92988f4f6c53b2e2d9ce1cf52223d5d13cf47 | e488ad07c492170311bfac79e740510e17b217ca | refs/heads/master | 2022-09-03T20:02:27.281725 | 2017-07-14T02:56:55 | 2017-07-14T02:56:55 | 96,055,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,173 | py | from hashlib import md5
import re
from app import db
from app import app
from passlib.apps import custom_app_context as pwd_context
import datetime
from dateutil.parser import parse
followers = db.Table(
'followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), index=True, unique=True, nullable=False)
manpower_admin = db.Column(db.Boolean, default=False)
mobility_admin = db.Column(db.Boolean, default=False)
password_hash = db.Column(db.String(128))
email = db.Column(db.String(120), index=True, unique=True)
posts = db.relationship('Post', backref='author', lazy='dynamic')
#mobilitys = db.relationship('Mobility', backref='username', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime)
followed = db.relationship('User',
secondary=followers,
primaryjoin=(followers.c.follower_id == id),
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'),
lazy='dynamic')
def hash_password(self, password):
self.password_hash=pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
@staticmethod
def make_valid_username(username):
return re.sub('[^a-zA-Z0-9_\.]', '', username)
@staticmethod
def make_unique_username(username):
if User.query.filter_by(username=username).first() is None:
return username
version = 2
while True:
new_username = username + str(version)
if User.query.filter_by(username=new_username).first() is None:
break
version += 1
return new_username
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def avatar(self, size):
return 'http://www.gravatar.com/avatar/%s?d=mm&s=%d' % \
(md5(self.username.encode('utf-8')).hexdigest(), size)
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
return self
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
return self
def is_following(self, user):
return self.followed.filter(
followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
return Post.query.join(
followers, (followers.c.followed_id == Post.user_id)).filter(
followers.c.follower_id == self.id).order_by(
Post.timestamp.desc())
def __repr__(self): # pragma: no cover
return '<user> %r' % (self.username)
class Mobility(db.Model):
username=db.Column(db.String, primary_key=True)
cc_letter=db.Column(db.String(45))
drug_pref=db.Column(db.String(45))
afsc=db.Column(db.String(5))
qnft=db.Column(db.String(45))
edi= db.Column(db.Integer)
line_badge= db.Column(db.Integer)
dog_tags= db.Column(db.Boolean)
pt_excellence= db.Column(db.Integer)
pt_score= db.Column(db.Numeric)
pt_test= db.Column(db.DateTime)
#Manual DateTimes
cac_expiration= db.Column(db.DateTime)
gtc_expiration= db.Column(db.DateTime)
bus_license= db.Column(db.DateTime)
mri_hri= db.Column(db.DateTime)
vred= db.Column(db.DateTime)
form_2760= db.Column(db.DateTime)
small_arms= db.Column(db.DateTime)
security_clearance= db.Column(db.DateTime)
form_55= db.Column(db.DateTime)
green_dot= db.Column(db.DateTime)
#CBT's autopopulate
sabc_hands_on_cbt= db.Column(db.DateTime)
cbrn_cbt= db.Column(db.DateTime)
sabc_cbt= db.Column(db.DateTime)
dod_iaa_cyber_cbt= db.Column(db.DateTime)
force_protection_cbt= db.Column(db.DateTime)
human_relations_cbt= db.Column(db.DateTime)
protecting_info_cbt= db.Column(db.DateTime)
af_c_ied_video_cbt= db.Column(db.DateTime)
af_c_ied_awrns_cbt= db.Column(db.DateTime)
af2a_culture_cbt= db.Column(db.DateTime)
biometrics_cbt= db.Column(db.DateTime)
collect_and_report_cbt= db.Column(db.DateTime)
east_cbt= db.Column(db.DateTime)
eor_cbt= db.Column(db.DateTime)
free_ex_of_religion_cbt= db.Column(db.DateTime)
loac_cbt= db.Column(db.DateTime)
mental_health_cbt= db.Column(db.DateTime)
tbi_awareness_cbt= db.Column(db.DateTime)
uscentcom_cult_cbt= db.Column(db.DateTime)
unauthorized_disclosure_cbt= db.Column(db.DateTime)
deriv_class_cbt= db.Column(db.DateTime)
marking_class_info_cbt= db.Column(db.DateTime)
sere_100_cst_cbt= db.Column(db.DateTime)
def add_months(self, dt0, months):
for i in range(months):
dt1 = dt0.replace(day=1)
dt2 = dt1 + datetime.timedelta(days=32)
dt0 = dt2.replace(day=1)
return dt0
def is_expired(self, attr_name, date):
a=Rules.query.filter_by(name=attr_name).first()
if str(type(a)) == "<type \'NoneType\'>":
return True
value=getattr(self, attr_name)
if value != None:
#Rule 1 checks 36 months into future
if a.rule==1:
date=parse(date)
value=self.add_months(value, 36)
if date>value:
return True
#Rule 2 checks 12 months into the future
elif a.rule==2:
date=parse(date)
value=self.add_months(value, 12)
if date>value:
return True
#Rule 3 checks 24 months into the future
elif a.rule==3:
date=parse(date)
value=self.add_months(value,24)
if date>value:
return True
#Rule 4 checks 76 months into the future
elif a.rule==4:
date=parse(date)
value=self.add_months(value,76)
if date>value:
return True
#Rule 5 checks current date
elif a.rule==5:
date=parse(date)
if date>value:
return True
#Rule 6 checks 12 months if pt_score>90 else 6 months
elif a.rule==6:
date=parse(date)
if self.pt_score>=90:
value=self.add_months(value,12)
else:
value=self.add_months(value,6)
if date>value:
return True
return False
def __repr__(self): # pragma: no cover
return '<Mobility %r>' % (self.username)
class Rules(db.Model):
name=db.Column(db.String, primary_key=True)
rule=db.Column(db.Integer, default='0', unique=False)
args=db.Column(db.String)
def __repr__(self): # pragma: no cover
return '<Rules %r>' % (self.name)
class History(db.Model):
id=db.Column(db.Integer, primary_key=True)
name=db.Column(db.String)
date=db.Column(db.DateTime)
table=db.Column(db.String)
column=db.Column(db.String)
valueOld=db.Column(db.String)
valueNew=db.Column(db.String)
def __repr__(self): # pragma: no cover
return '<History %r>' % (self.name)
class Post(db.Model):
__searchable__ = ['body']
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
language = db.Column(db.String(5))
def __repr__(self): # pragma: no cover
return '<Post %r>' % (self.body) | [
"jsnyder10@gmail.com"
] | jsnyder10@gmail.com |
8fbeae9a4326bddee26e1a4de2ade8d305654222 | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW06_20210715223105.py | d97dd858d1598f85d9ebd66b49358181614c0345 | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,070 | py | """
Georgia Institute of Technology - CS1301
HW06 - Text Files & CSV
Collaboration Statement:
"""
#########################################
"""
Function Name: findCuisine()
Parameters: filename (str), cuisine (str)
Returns: list of restaurants (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def findCuisine(filename, cuisine):
file = open(filename,'r')
content = file.readlines()
listOfRestaurants = []
for i in range(len(content)):
if content[i].strip() == cuisine:
listOfRestaurants.append(content[i-1].strip()) #add the name of the restaurant, which is the previous line
file.close()
return listOfRestaurants
"""
Function Name: restaurantFilter()
Parameters: filename (str)
Returns: dictionary that maps cuisine type (str)
to a list of restaurants of the same cuisine type (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantFilter(filename):
dict = {}
file = open(filename,'r')
content = file.readlines()
cuisines = []
for i in range(1,len(content),4):
line = content[i].strip()
if line not in cuisines:
cuisines.append(line)
for i in range(len(cuisines)):
dict[cuisines[i]] = []
for i in range(0,len(content),4):
line = content[i].strip()
lineBelow = content[i+1].strip()
dict[lineBelow].append(line)
return dict
"""
Function Name: createDirectory()
Parameters: filename (str), output filename (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def createDirectory(filename, outputFilename):
readFile = open(filename, 'r')
writeFile = open(outputFilename, 'w')
content = readFile.readlines()
fastfood = []
sitdown = []
fastfoodcounter = 1
sitdowncouter = 1
for i in range(2,len(content), 4):
restaurant = content[i-2].strip()
cuisine = content[i-1].strip()
group = content[i].strip()
if group == 'Fast Food':
fastfood.append(str(fastfoodcounter) + '. ' + restaurant + ' - ' + cuisine + '\n')
fastfoodcounter += 1
else:
sitdown.append(str(sitdowncouter) + '. ' + restaurant + ' - ' + cuisine)
sitdowncouter += 1
writeFile.write('Restaurant Directory' + '\n')
writeFile.write('Fast Food' + '\n')
writeFile.writelines(fastfood)
writeFile.write('Sit-down' + '\n')
for i in range(len(sitdown)):
if i != len(sitdown)-1:
writeFile.write(sitdown[i] + '\n')
else:
writeFile.write(sitdown[i])
"""
Function Name: extraHours()
Parameters: filename (str), hour (int)
Returns: list of (person, extra money) tuples (tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def extraHours(filename, hour):
overtime = []
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
name = line[0]
wage = int(line[2])
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
return overtime
"""
Function Name: seniorStaffAverage()
Parameters: filename (str), year (int)
Returns: average age of senior staff members (float)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def seniorStaffAverage(filename, year):
averageAge = 0.0
employeeCount = 0
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = int(line[3])
if yearHired < year:
averageAge += age
employeeCount += 1
averageAge /= employeeCount
return round(averageAge,2)
"""
Function Name: ageDict()
Parameters: filename (str), list of age ranges represented by strings (list)
Returns: dictionary (dict) that maps each age range (str) to a list of employees (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def ageDict(filename, ageRangeList):
employeeAgeDictionary = {}
for i in ageRangeList:
employeeAgeDictionary[i] = []
print(employeeAgeDictionary)
# print(findCuisine('restaurants.txt', 'Mexican'))
# print(restaurantFilter('restaurants.txt'))
# print(createDirectory('restaurants.txt','output.txt'))
# print(extraHours('employees.csv', 40))
# print(seniorStaffAverage('employees.csv', 2019))
print(ageDict('employees.csv'))
| [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
3a08573df3e117f7abeacc2d96fc32821ca167f3 | 8ac92ae9f84ef349ac66c8e83bcd7952c45bfdf2 | /trainsite/wsgi.py | 0a3c8d71299ad4f087ed105f2c349b69b0b4f0f2 | [] | no_license | zzy19960618/trainsite | 6bc78b706119ea78624f4a0d20df484e73681958 | 0dc8b87c801a756b3433ff6a51b4af363fb97977 | refs/heads/master | 2020-05-23T23:19:03.913892 | 2019-05-16T09:08:05 | 2019-05-16T09:08:05 | 186,992,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for trainsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trainsite.settings')
application = get_wsgi_application()
| [
"zouziyi_9618@163.com"
] | zouziyi_9618@163.com |
4a1913a5680d68adee209c5aa25118387d6527a1 | 35a43fd34bccacf87b6b3c1d56cc4573e75ec744 | /notification/message.py | 0e5a93a8072b3a923d450869130dde04c0503bff | [
"Apache-2.0"
] | permissive | silence99/cigarette | 31782b7de18fa7dc9fc038051a60945dd1e21ff4 | 468ada4f40aeaedf0efa5bbed9eb75f23a825737 | refs/heads/master | 2023-03-17T22:05:25.935560 | 2021-03-08T13:47:29 | 2021-03-08T13:47:29 | 345,304,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py | from .status import Status
CR = '\r'
CL = '\n'
NewLine = CR + CL
MessageFlags = 'CITI'
Version = '1.0'
MessageType = 'CONFIG'
ContentLength = 'content length'
ContentType = 'content type'
DefaultEncoding = 'utf-8'
Encoding = "encoding"
class Message(object):
def __init__(self):
pass
@classmethod
def Generate(cls, content, headers=None, messageType=MessageType, contentType='text', encoding=DefaultEncoding):
'''
headers encoding with ascii, boday encoding with params
'''
head = []
first = "%s %s %s" % (MessageFlags, Version, messageType)
head.append(first)
encodingExp = "%s: %s" % (Encoding, encoding)
head.append(encodingExp)
contentTypeExp = "%s: %s" % (ContentType, contentType)
head.append(contentTypeExp)
body = None if content is None else content.encode(encoding)
length = 0 if body is None else len(body)
contentLengthExp = "%s: %s" % (ContentLength, length)
head.append(contentLengthExp)
headExp = ""
for h in head:
headExp = "%s%s%s" % (headExp, h, NewLine)
headExp += NewLine
msg = headExp.encode('ascii') + body
return msg
class ResponseMessage:
def __init__(self):
super().__init__()
self.status = Status.UNKNOW
self.content = ''
self.version = '1.0'
self.head = None #bytes - split by \r\n\r\n
self.body = None
self.bodyObj = None
self.contentType = 'text'
self.encoding = 'utf-8'
@classmethod
def GetMessage(cls, response):
msg = ResponseMessage()
# only read first line to check status currently
msg.loadFromBuffer(response)
return msg
@classmethod
def _getStatus(cls, st):
for i in list(Status):
if i.name == st:
return i
return Status.UNKNOW
def loadFromBuffer(self, buffer):
self._split(buffer)
self.loadHead()
self.loadBody()
def _split(self, bytesContent):
if bytesContent is not None:
sp = bytesContent.split(("%s%s" % (NewLine, NewLine)).encode('ascii'))
self.head = sp[0]
self.body = sp[1]
def loadHead(self):
if self.head:
sp = self.head.split(NewLine.encode('ascii'))
statusExp = sp[0]
sts = statusExp.split()
if sts[0] == MessageFlags and sts[1] == Version:
msg.version = Version
msg.status = _getStatus(sts[2].upper())
else:
return None
for line in sp[1:]:
kvs = line.split(':')
if len(kvs) == 2:
self.assignHead(kvs[0], kvs[1])
def assignHead(self, key, v):
if key == Encoding:
self.encoding = v
elif key == ContentType:
self.contentType = v
else:
pass
def loadBody(self):
self.bodyObj = self.body.decode(encoding=self.encoding)
| [
"liuff_yantai@126.com"
] | liuff_yantai@126.com |
cf804c286316be21436f2bc8bac54f7846881266 | d8148d71f69190e2fb82aac0b241485751a803d1 | /metadata/find_nest.py | c772447d667cdbf9f03dc2710a0712de0ebe13b0 | [] | no_license | MTG/acousticbrainz-labs | df23a3af030260da9817c15b03e3d75910b509e7 | dc0a48ac855c3fa50c3b1496e2cd14eb27a81eed | refs/heads/master | 2022-09-08T10:08:30.304372 | 2021-09-02T10:37:24 | 2021-09-02T10:37:24 | 26,653,441 | 27 | 8 | null | 2022-08-23T17:32:55 | 2014-11-14T19:14:11 | Jupyter Notebook | UTF-8 | Python | false | false | 240 | py | import threader
class EchoNest(threader.ComputationThread):
"""
This thread tries to get data from the echnonest
"""
def _calculate(self):
self.data = ''
if __name__ == '__main__':
threader.main(EchoNest)
| [
"alastair@porter.net.nz"
] | alastair@porter.net.nz |
28bea0e23fcc6f2869cb586ae611db56fe7a8c7f | 1154de9658be97b0c6625a6ae03b8bf4c1875a35 | /src/MyNetworks/beta/testST.py | 8955222a39d3a2b3eba6a9b027770fcb6a2af376 | [] | no_license | ghostotof/stage_s6 | 720bd5be7f357bdf6e326f76f11908ab695c6a6b | 30f8948ccaafc85795aaadd12f206aed528ddcf2 | refs/heads/master | 2021-01-19T17:56:45.206288 | 2014-12-11T14:55:53 | 2014-12-11T14:55:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,996 | py | #!/usr/bin/python2
# coding:utf-8
from brian import *
from time import time
import pickle
##############
# PARAMETERS #
##############
min_period = 1 * msecond
basePeriod = 125 * min_period
t_pres = basePeriod * 4
# min_weight = -10.0 * volt
# # min_weight = 0 * volt
# max_weight = 1.0 * volt * 10.0
# inc_weight = max_weight * 0.1
# dec_weight = max_weight * 0.05
# init_weight = ( max_weight - min_weight ) / 2.0
# std_init_weight = min ( (max_weight - init_weight) , (init_weight - min_weight) )
inhib_weight = 1.0 * volt * 50.0
nbN_I = 200
nbN_1 = 200
Vt_1 = 15 * volt
Vr_1 = 0.0 * volt
tau_1 = basePeriod * (2.0/3.0)
refractory_1 = 0.5 * basePeriod
inhibit_refractory_1 = 1.05 * basePeriod
neuron_eqs_1 = Equations ("""
dv/dt = ( - v - inh ) / tau_1 : volt
dinh/dt = - inh / inhibit_refractory_1 : volt
""")
nbN_2 = 100
Vt_2 = 15 * volt
Vr_2 = 0.0 * volt
tau_2 = basePeriod * (2.0/3.0)
refractory_2 = 0.5 * basePeriod
inhibit_refractory_2 = 1.05 * basePeriod
neuron_eqs_2 = Equations ("""
dv/dt = ( - v - inh ) / tau_2 : volt
dinh/dt = - inh / inhibit_refractory_2 : volt
""")
nbN_3 = 2
Vt_3 = 15 * volt
Vr_3 = 0.0 * volt
tau_3 = basePeriod * (2.0/3.0)
refractory_3 = 0.5 * basePeriod
inhibit_refractory_3 = 1.05 * basePeriod
neuron_eqs_3 = Equations ("""
dv/dt = ( - v - inh ) / tau_3 : volt
dinh/dt = - inh / inhibit_refractory_3 : volt
""")
###
spikesTimes = []
with open('spikesTimesT.spt','rb') as file:
depick = pickle.Unpickler(file)
spikesTimes = depick.load()
i = 0
for a,b in spikesTimes:
spikesTimes[i] = (a, b * t_pres)
i += 1
nbImagesH = 486
nbImagesN = 482
nbImages = nbImagesH + nbImagesN
#################
# NEURON GROUPS #
#################
input = SpikeGeneratorGroup(nbN_I, spikesTimes)
couche1 = NeuronGroup(N = nbN_1,
model = neuron_eqs_1,
threshold = Vt_1,
reset = Vr_1,
refractory = refractory_1)
couche2 = NeuronGroup(N = nbN_2,
model = neuron_eqs_2,
threshold = Vt_2,
reset = Vr_2,
refractory = refractory_2)
couche3 = NeuronGroup(N = nbN_3,
model = neuron_eqs_3,
threshold = Vt_3,
reset = Vr_3,
refractory = refractory_3)
############
# SYNAPSES #
############
connection = IdentityConnection(input, couche1, 'v', weight = Vt_1 * 1.05)
c1_c2 = Synapses(couche1, couche2, model = 'w:1', pre = 'v+=w')
c1_c2.load_connectivity('./saveConnec_c2')
wn = []
with open('myWeights_c2', 'rb') as fichier:
mon_depick = pickle.Unpickler(fichier)
wn = mon_depick.load()
for i in xrange(0, len(c1_c2)):
c1_c2.w[i] = wn[i]
c2_c3 = Synapses(couche2, couche3, model = 'w:1', pre = 'v+=w')
c2_c3.load_connectivity('./saveConnec_c3')
wn = []
with open('myWeights_c3', 'rb') as fichier:
mon_depick = pickle.Unpickler(fichier)
wn = mon_depick.load()
for i in xrange(0, len(c2_c3)):
c2_c3.w[i] = wn[i]
##############
# INHIBITION #
##############
# inhib_couche1 = Connection(couche1, couche1, state = 'inh', weight = 0 * volt)
# for i in xrange(2, len(couche1) - 2):
# inhib_couche1[i, i+2] = inhib_weight
# inhib_couche1[i, i-2] = inhib_weight
# inhib_couche2 = Connection(couche2, couche2, state = 'inh', weight = 0 * volt)
# for i in xrange(1, len(couche2) - 1):
# inhib_couche2[i, i+1] = inhib_weight
# inhib_couche2[i, i-1] = inhib_weight
# inhib_couche3 = Connection(couche3, couche3, state = 'inh', weight = 0 * volt)
# inhib_couche3[0,1] = inhib_weight
# inhib_couche3[1,0] = inhib_weight
inhib_loop_1 = Connection(couche2, couche1, state = 'inh', weight = inhib_weight)
inhib_loop_2 = Connection(couche3, couche2, state = 'inh', weight = inhib_weight)
############
# MONITORS #
############
mc1 = SpikeCounter(couche1)
mc2 = SpikeCounter(couche2)
mc3 = SpikeCounter(couche3)
# mv1 = StateMonitor(couche1, 'v', record = True)
# mv2 = StateMonitor(couche2, 'v', record = True)
# mv3 = StateMonitor(couche3, 'v', record = True)
##############
# SIMULATION #
##############
run(nbImagesH * t_pres, report = 'text')
print "Couche 1 :"
for i in xrange(0, nbN_1):
print "Neurone (", i, ") : ", mc1[i]
print ""
print "Couche 2 :"
for i in xrange(0, nbN_2):
print "Neurone (", i, ") : ", mc2[i]
print ""
print "Couche 3 :"
for i in xrange(0, nbN_3):
print "Neurone (", i, ") : ", mc3[i]
###
run(nbImagesN * t_pres, report = 'text')
print ""
print "Couche 1 :"
for i in xrange(0, nbN_1):
print "Neurone (", i, ") : ", mc1[i]
print ""
print "Couche 2 :"
for i in xrange(0, nbN_2):
print "Neurone (", i, ") : ", mc2[i]
print ""
print "Couche 3 :"
for i in xrange(0, nbN_3):
print "Neurone (", i, ") : ", mc3[i]
# figure('Potentiel')
# for i in xrange(0, len(couche2)):
# plot(mv2.times, mv2[i])
# for i in xrange(0, len(couche3)):
# plot(mv3.times, mv3[i])
# show()
| [
"christophe.piton22@gmail.com"
] | christophe.piton22@gmail.com |
f46b7a5482737a9f8febe9dbd264cf281c128fac | 3f06f0c74219805beaca5146934c6b079aeba729 | /1-dive-in-python/hw1/root.py | 27d01911c5baff2234c895dcf9021c187fecd550 | [] | no_license | bolshagin/python-spec | 133b90aad48ca09e2256ea02172e932d8dadfc93 | be573b1164ca2222948369c9041c894d1eb080bd | refs/heads/master | 2022-10-20T11:31:09.511367 | 2020-06-15T17:08:52 | 2020-06-15T17:08:52 | 265,213,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import sys
#a, b, c = 13, 236, -396
#a, b, c = 1, -3, -4
a = int(sys.argv[1])
b = int(sys.argv[2])
c = int(sys.argv[3])
x1 = (-b + (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
x2 = (-b - (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
print(int(x1), int(x2), sep='\n')
| [
"bolshagin.nikita@yandex.ru"
] | bolshagin.nikita@yandex.ru |
16653c4264c3452d13d64d7f5be4eaac7e1c0f95 | ebda7a1756f8d9ca89ef6aa9e257d747cbb9186d | /pipelines/templates/pickatlas.py | 22e8943f6e50982f16f40aa91041b986181553e8 | [
"MIT"
] | permissive | villeneuvelab/vlpp | d521ed5f991262575ec2c2cfe41c6e896935568c | 88d3cc43742e594237153c9cdd98efc345836287 | refs/heads/master | 2021-05-14T14:48:47.084329 | 2019-06-11T14:00:38 | 2019-06-11T14:00:38 | 82,604,427 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from vlpp.operation import maskFromAtlas
def main():
output = "${participant}_roi-${refName}${suffix.mask}"
maskFromAtlas("${atlas}", ${indices}, output)
if __name__ == '__main__':
main()
| [
"christophe.bedetti@gmail.com"
] | christophe.bedetti@gmail.com |
118bf383996fd25bef8f492def819d9cbe119194 | a5cdeb1246fbc105fb5d5233a8569dd27093870a | /Lab10/EntryApplicationUnitTests.py | ce07df9dae12abe5fa7eb5c8182ccfa56543e9c5 | [] | no_license | gongyiwen/ECE364 | 58bc571cf1711bf0dc2387a81b8cb2611866f3d0 | 24afe1b144ef02e8463dc23fccc5fa63cdb40cbb | refs/heads/master | 2021-05-04T08:38:04.400833 | 2015-11-18T02:27:23 | 2015-11-18T02:27:23 | 45,637,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,914 | py | import os
import unittest
from PySide.QtTest import *
from PySide.QtCore import *
from EntryApplication import *
singletonApplication = None
singletonForm = None
class EntryApplicationTestSuite(unittest.TestCase):
def setUp(self):
"""
Creates the QApplication singleton instance, if not present.
"""
global singletonApplication, singletonForm
if singletonApplication is None:
singletonApplication = QApplication(sys.argv)
singletonForm = EntryApplication()
self.app = singletonApplication
self.form = singletonForm
# Define a list of text widgets to refer to them all when need.
self.textWidgets = [self.form.txtFirstName, self.form.txtLastName, self.form.txtAddress,
self.form.txtCity, self.form.txtState, self.form.txtZip, self.form.txtEmail]
def tearDown(self):
"""
Remove the running application from the self instance.
"""
del self.app
del self.form
# Clean up the file.
if os.path.exists("target.xml"):
os.remove("target.xml")
def test_ClearForm(self):
formCleared = True
# Populate all entries.
QTest.keyClicks(self.form.txtFirstName, "Sherlock")
QTest.keyClicks(self.form.txtLastName, "Holmes")
QTest.keyClicks(self.form.txtAddress, "1223 End St.")
QTest.keyClicks(self.form.txtCity, "West Lafayette")
QTest.keyClicks(self.form.txtState, "IN")
QTest.keyClicks(self.form.txtZip, "47906")
# Click the button.
QTest.mouseClick(self.form.btnClear, Qt.LeftButton)
# Read the form.
for widget in self.textWidgets:
formCleared &= widget.text() == ""
formCleared &= self.form.lblError.text() == ""
formCleared &= self.form.btnLoad.isEnabled()
formCleared &= not self.form.btnSave.isEnabled()
self.assertEqual(formCleared, True)
def test_LoadValidData(self):
dataCorrect = True
# Load the xml file.
self.form.loadFromXmlFile("test_case_1.xml")
# Check values.
dataCorrect &= self.form.txtFirstName.text() == "George"
dataCorrect &= self.form.txtLastName.text() == "Clooney"
dataCorrect &= self.form.txtAddress.text() == "414 Second St."
dataCorrect &= self.form.txtCity.text() == "Some City"
dataCorrect &= self.form.txtState.text() == "CA"
dataCorrect &= self.form.txtZip.text() == "10001"
dataCorrect &= self.form.txtEmail.text() == "clooney@nowhere.com"
# Check the buttons.
dataCorrect &= self.form.lblError.text() == ""
dataCorrect &= self.form.btnSave.isEnabled()
dataCorrect &= not self.form.btnLoad.isEnabled()
self.assertEqual(dataCorrect, True)
def test_SaveValidDataDirect(self):
# Load the xml file.
self.form.loadFromXmlFile("test_case_1.xml")
# Save without modification.
QTest.mouseClick(self.form.btnSave, Qt.LeftButton)
with open("test_case_1.xml", "r") as xml:
source = xml.read()
with open("target.xml", "r") as xml:
target = xml.read()
self.assertEqual(source, target)
def test_SaveValidDataModified(self):
# Load the xml file.
self.form.loadFromXmlFile("test_case_1.xml")
self.form.txtFirstName.clear()
QTest.keyClicks(self.form.txtFirstName, "Amal")
self.form.txtLastName.clear()
QTest.keyClicks(self.form.txtLastName, "Alamuddin")
self.form.txtAddress.clear()
QTest.keyClicks(self.form.txtAddress, "909 Second St.")
self.form.txtCity.clear()
QTest.keyClicks(self.form.txtCity, "Irvine")
self.form.txtState.clear()
QTest.keyClicks(self.form.txtState, "TX")
self.form.txtZip.clear()
QTest.keyClicks(self.form.txtZip, "56489")
self.form.txtEmail.clear()
QTest.keyClicks(self.form.txtEmail, "amal@hereAndThere.com")
QTest.mouseClick(self.form.btnSave, Qt.LeftButton)
with open("test_case_1_Mod.xml", "r") as xml:
source = xml.read()
with open("target.xml", "r") as xml:
target = xml.read()
self.assertEqual(source, target)
def test_SaveWithEmptyEntries(self):
# Load the xml file.
self.form.loadFromXmlFile("test_case_2.xml")
# Save without modification.
QTest.mouseClick(self.form.btnSave, Qt.LeftButton)
errorShown = self.form.lblError.text() != ""
fileSaved = os.path.exists("target.xml")
self.assertEqual(errorShown and not fileSaved, True)
def test_SaveWithEmptyEntriesPartialFixed(self):
# Load the xml file.
self.form.loadFromXmlFile("test_case_2.xml")
QTest.keyClicks(self.form.txtLastName, "Jackson")
# Try to save.
QTest.mouseClick(self.form.btnSave, Qt.LeftButton)
errorShown = self.form.lblError.text() != ""
fileSaved = os.path.exists("target.xml")
self.assertEqual(errorShown and not fileSaved, True)
def test_SaveWithEmptyEntriesFixed(self):
# Load the xml file.
self.form.loadFromXmlFile("test_case_2.xml")
QTest.keyClicks(self.form.txtLastName, "Jackson")
QTest.keyClicks(self.form.txtCity, "Los Angeles")
# Try to save.
QTest.mouseClick(self.form.btnSave, Qt.LeftButton)
errorShown = self.form.lblError.text() == ""
with open("test_case_2_Mod.xml", "r") as xml:
source = xml.read()
with open("target.xml", "r") as xml:
target = xml.read()
self.assertTrue(errorShown)
self.assertEqual(source, target)
def test_SaveWithInvalidEntriesStateFixed(self):
# Load the xml file.
self.form.loadFromXmlFile("test_case_3.xml")
self.form.txtState.clear()
QTest.keyClicks(self.form.txtState, "NY")
# Try to save.
QTest.mouseClick(self.form.btnSave, Qt.LeftButton)
errorShown = self.form.lblError.text() != ""
fileSaved = os.path.exists("target.xml")
self.assertEqual(errorShown and not fileSaved, True)
def test_SaveWithInvalidEntriesStateAndZipFixed(self):
# Load the xml file.
self.form.loadFromXmlFile("test_case_3.xml")
self.form.txtState.clear()
QTest.keyClicks(self.form.txtState, "NY")
self.form.txtZip.clear()
QTest.keyClicks(self.form.txtState, "20201")
# Try to save.
QTest.mouseClick(self.form.btnSave, Qt.LeftButton)
errorShown = self.form.lblError.text() != ""
fileSaved = os.path.exists("target.xml")
self.assertEqual(errorShown and not fileSaved, True)
def test_SaveWithInvalidEntriesAllFixed(self):
# Load the xml file.
self.form.loadFromXmlFile("test_case_3.xml")
self.form.txtState.clear()
QTest.keyClicks(self.form.txtState, "NY")
self.form.txtZip.clear()
QTest.keyClicks(self.form.txtZip, "20201")
self.form.txtEmail.clear()
QTest.keyClicks(self.form.txtEmail, "someone@famous.com")
# Try to save.
QTest.mouseClick(self.form.btnSave, Qt.LeftButton)
errorShown = self.form.lblError.text() == ""
with open("test_case_3_Mod.xml", "r") as xml:
source = xml.read()
with open("target.xml", "r") as xml:
target = xml.read()
self.assertTrue(errorShown)
self.assertEqual(source, target)
if __name__ == '__main__':
unittest.main()
| [
"gong32@purdue.edu"
] | gong32@purdue.edu |
489eb03b22c1ffffde1be70112c97a985e0f64e9 | ca4b4cb4ce7da93c3a35dab3a877e4a6546edcb4 | /encode.py | 4654e878bd8627bdb271cb1e4192adb443a3a1c5 | [] | no_license | arjun-14/hidden-api | ecb68a193a9e1e8dacc8334b4f5fa429db5e8c6e | 80492197f6160028c383d0fbbf7af0d257367999 | refs/heads/main | 2023-07-13T15:21:29.582878 | 2021-08-24T17:47:45 | 2021-08-24T17:47:45 | 399,554,449 | 0 | 0 | null | 2021-08-24T17:41:26 | 2021-08-24T17:41:25 | null | UTF-8 | Python | false | false | 5,115 | py | from PIL import Image
import re
from io import BytesIO
from fastapi import HTTPException
class encode:
def __init__(self, image, message, content_type):
self.image = image
self.message = message
self.content_type = content_type
def initial_validation(self):
if(not(self.content_type == "image/png" or self.content_type == "image/jpeg")):
raise HTTPException(
status_code=400, detail="Unsupported image format. Currently supported formats: image/jpeg, image/png.")
def convert_to_rgb(self):
try:
with Image.open(BytesIO(self.image)) as self.im:
self.im = self.im.convert("RGBA")
except Exception as e:
raise HTTPException(
status_code=400, detail="Unable to convert image mode to RGB. " + str(e))
def convert_message_to_binary(self):
self.encoded_message = ""
utf8_style = ""
try:
for i in self.message:
unicode_number = ord(i)
unicode_number_in_binary = format(ord(i), "b")
utf8_style = ""
if(unicode_number < 128):
utf8_style = unicode_number_in_binary.zfill(8)
elif(unicode_number < 2048):
unicode_number_in_binary = unicode_number_in_binary.zfill(
11)
utf8_style = "110" + \
unicode_number_in_binary[0:5] + \
"10" + unicode_number_in_binary[5:12]
elif(unicode_number < 65536):
unicode_number_in_binary = unicode_number_in_binary.zfill(
16)
utf8_style = "1110" + unicode_number_in_binary[0:4] + "10" + \
unicode_number_in_binary[4:10] + \
"10" + unicode_number_in_binary[10:16]
elif(unicode_number <= 1114111):
unicode_number_in_binary = unicode_number_in_binary.zfill(
21)
utf8_style = "11110" + unicode_number_in_binary[0:3] + "10" + unicode_number_in_binary[3: 9] + \
"10" + unicode_number_in_binary[9:15] + \
"10" + unicode_number_in_binary[15:21]
else:
raise Exception()
self.encoded_message = self.encoded_message + utf8_style
except Exception as e:
raise HTTPException(
status_code=500, detail="Unexpected error while processing the message text. " + str(e))
def put_message_in_image(self):
try:
pixels = self.im.load()
except:
raise HTTPException(
status_code=500, detail="Unknown error while loading image.")
final_encoded_message = self.encoded_message + "10"
msg_length = len(final_encoded_message)
self.max_length = self.im.width * self.im.height * 3 * 2
if(msg_length > self.max_length):
raise HTTPException(
status_code=400, detail="Image with more pixels needed for encoding current message.")
pixel_number = [0, 0]
self.pixel_count = 0
for i in range(0, msg_length, 6):
rm = final_encoded_message[i: i+2]
gm = final_encoded_message[i+2: i+4]
bm = final_encoded_message[i+4: i+6]
# rm will always be full.
r = int(
format(pixels[pixel_number[0], pixel_number[1]][0], '08b')[0:6]+rm, 2)
if (gm != ""):
g = int(
format(pixels[pixel_number[0], pixel_number[1]][1], '08b')[0:6]+gm, 2)
else:
g = pixels[pixel_number[0], pixel_number[1]][1]
if (bm != ""):
b = int(
format(pixels[pixel_number[0], pixel_number[1]][2], '08b')[0:6]+bm, 2)
else:
b = pixels[pixel_number[0], pixel_number[1]][2]
a = pixels[pixel_number[0], pixel_number[1]][3]
self.pixel_count = self.pixel_count + 1
pixels[pixel_number[0], pixel_number[1]] = (r, g, b, a)
if(pixel_number[0] < self.im.width-1):
pixel_number[0] = pixel_number[0] + 1
else:
pixel_number[0] = 0
pixel_number[1] = pixel_number[1] + 1
def convert_to_buffered(self):
buffered = BytesIO()
self.im.save(buffered, format="png")
return buffered.getvalue()
def run(self):
try:
self.initial_validation()
self.convert_to_rgb()
self.convert_message_to_binary()
self.put_message_in_image()
return(
{
"buffered": self.convert_to_buffered(),
"noOfPixelsModified": str(self.pixel_count), "percentOfImageModified": str(
self.pixel_count/(self.im.width*self.im.height)*100
)
}
)
except:
raise
| [
"impmmmm@gmail.com"
] | impmmmm@gmail.com |
99d6070d539a5b867b13698bdd27be4a9352e2b6 | 66e68aae8714d9c74dd99f8a39f9f8c21f3a2041 | /if_while_for.py | f76534ff5b89c23635b6ccec7f3e50a4608befff | [] | no_license | turtlecoder207/Python-Practice | d102d6e0b746824a62d2e51f0f167f5ffa3d205a | 646c20fd9983ebdcc03ce3a9dd1d6a7cb1cc6685 | refs/heads/master | 2021-01-22T03:53:42.094225 | 2017-05-25T14:15:03 | 2017-05-25T14:15:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | #돈이 3000원 이상 있으면 택시를 타고 그렇지 않으면 걸어 가라
"""
money =1
if money >= 3000:
print("택시를 타라")
else:
print("걸어 가라")
#돈이 3000원 이상 있거나 카드가 있다면 택시를 타고 그렇지 않으면 걸어 가라
money=2000
card=1
if money>=3000 or card:
print('택시를 타라')
else:
print('걸어 가라')
#만약 주머니에 돈이 있으면 택시를 타고, 없으면 걸어 가라
pocket = ['paper','cellphone','money']
if 'money' in pocket:
print('택시를 타라')
else:
print('걸어 가라')
#주머니에 돈이 있으면 택시를 타고, 주머니에 돈은 없지만 카드가 있으면 택시를 타고, 돈도 없고 카드로 없으면 걸어 가라
pocket = ['paper','cellphone']
card = 1
if 'money' in pocket:
print("택시를 타라")
else:
if card:
print("택시를 타라")
else:
print("걸어 가라")
#using elif
pocket = ['paper','cellphone']
card = 1
if 'money' in pocket:
print("택시를 타라")
elif card:
print("택시를 타라")
else:
print("걸어 가라")
#while문 기초
treeHit=0
while treeHit <10:
treeHit = treeHit+1
print("나무를 %d번 찍었습니다." %treeHit)
if treeHit == 10:
print("나무 넘어갑니다.")
#continue 문장
a=0
while a<10:
a= a+1
if a%2 ==0: continue
print(a)
#for문 기초
test_list = ['one','two','three']
for i in test_list:
print(i)
#for문 응용: 총 5명의 학생이 시험을 보았는데 시험 점수가 60점이 넘으면 합격이고 그렇지 않으면 불합격이다.
# 함격인지 불합격인지 결과를 보여주시오
marks = [90,25,67,45,80]
for i in marks:
if i > 60:
print("%d점 받은 학생은 합격" %i)
else:
print("%d점 받은 학생은 불합격" %i)
"""
#리스트 안에 for문 포함하기
a = [1,2,3,4]
result = []
for num in a:
result.append(num*3)
print(result)
| [
"chohj377@gmail.com"
] | chohj377@gmail.com |
96764c52211e9e183c77e8463ac38389e6aaee5c | 70ca6ff825eec1cbc818a406a167173d9db9e5a5 | /jdSpider/jd/pipelines.py | c3c132e2a35684a5f95ea651e6df64605288f247 | [] | no_license | guyueyuqi/JDSpider | 2c89e5d4c69e4a427046c330e9994a85ac74616c | 13e933acda61d5519dcb7d4b2de26abb0ef34a74 | refs/heads/master | 2023-06-25T02:45:20.769657 | 2018-03-23T12:05:31 | 2018-03-23T12:05:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,759 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import random
import os
from .items import JdcommentItem
class JdspiderPipeline(object):
def __init__(self):
self.file = open('./京东商品信息.txt','w',encoding='utf-8')
def process_item(self, item, spider):
if spider.name == 'jd':
for name,content in zip(item['name'],item['content']):
self.file.write(name+":"+content+"\n")
self.file.write('购买网址:'+item['url']+'\n\n\n\n')
self.file.flush()
return item
def __del__(self):
self.file.close()
class JdcommentPipeline(object):
# def __init__(self):
# self.file = open(str(random.randint(1,99999))+ '.txt','w',encoding='utf-8')
def process_item(self, item, spider):
if spider.name == 'JDcomment':
filename = item['name'] + '.txt'
filepath = './京东商品评论'
if not os.path.exists(filepath):
os.makedirs(filepath)
filepath = os.path.join(filepath,filename)
if not os.path.exists(filepath):
file = open(filepath,'w',encoding='utf-8')
url = item['url']
file.write(url + '\n\n')
else:
file = open(filepath,'a',encoding='utf-8')
# if isinstance(item, JdcommentItem):
# print("1111111")
file.write("日期:"+item['date']+"\n")
file.write(item['content']+'\n\n-----------------------------\n\n')
# file.flush()
file.close()
return item
| [
"369943318@qq.com"
] | 369943318@qq.com |
343463fe4db8357c595ca5d678bb025251c44331 | 144e2be8fea60da984aee8c75f90af3a67c9d732 | /esempio_controllo_IP/controlla_server.py | df60bd2780b136d355f62d8b0f0f6721d8a246ca | [] | no_license | lukes1582/scripts_python | fff913ac357c67b44e1505a3c05534016cadafa2 | d9dd6a2dae7f9afb6e9d006ac7feb1dd372fd1db | refs/heads/master | 2021-08-22T01:51:36.016718 | 2021-07-14T09:59:10 | 2021-07-14T09:59:10 | 246,614,442 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py | '''
Created on 12/02/2021
@author: lukes158@gmail.com l0m1s
'''
import os
import re
import threading
from datetime import datetime
# dichirazione variabili personalizzabili
# indirizzo email a cui inviare i dati
mail = "lukes1582@gmail.com"
# timer PING in minuti
timePING = 5
# timer per l'invio MAIL in minuti
timeMAIL = 60
# file che contiene gli IP da controllare
wFile = "white_list.txt"
# file che verra' allegato alla mail
bFile = "black_list.txt"
# lista di supporto al programma
IP_address=[]
# metodo per il controllo degli IP
def checkIP(val):
# espressione regolare per gli IPv4
pat = re.compile("^([1][0-9][0-9].|^[2][5][0-5].|^[2][0-4][0-9].|^[1][0-9][0-9].|^[0-9][0-9].|^[0-9].)([1][0-9][0-9].|[2][5][0-5].|[2][0-4][0-9].|[1][0-9][0-9].|[0-9][0-9].|[0-9].)([1][0-9][0-9].|[2][5][0-5].|[2][0-4][0-9].|[1][0-9][0-9].|[0-9][0-9].|[0-9].)([1][0-9][0-9]|[2][5][0-5]|[2][0-4][0-9]|[1][0-9][0-9]|[0-9][0-9]|[0-9])$")
# test di correttezza
test = pat.match(val)
if test:
return val
else:
# se esiste un IP non valido viene scritto nella BLACK LIST
writeBlackList("Errore nell IP "+val)
return None
# metodo per la lettura degli IP
def readWhiteList():
fs = open(wFile,'r')
lines = fs.readlines()
for line in lines:
# inserisce gli IP in una lista di supporto
IP_address.append(checkIP(line))
fs.close()
# metodo per la scrittura del file in allegato
def writeBlackList(val):
ws = open(bFile,'a')
ws.write(val)
#metodo per la
def pingHost(hostname):
date_time = datetime.now()
t1 = date_time.strftime("%d-%b-%Y (%H:%M:%S)")
# Se sei in ambiente "Linux -c"
response = os.system("ping -n 3 " + hostname)
if response == 0:
return str(hostname + " Server on line !\n")
else:
writeBlackList(str("\n" + hostname + " Server off line ! \t "+t1+" \n"))
return str(hostname + " Server off line !")
def callHostPING():
threading.Timer((60.0*timePING), callHostPING).start()
readWhiteList()
for k in IP_address:
print(pingHost(k))
IP_address.clear()
def sendMAIL():
threading.Timer((60.0*timeMAIL), sendMAIL).start()
b = os.path.getsize("black_list.txt")
"""
Viene dato per scontato che DEVE essere installato il programma mail all'interno della macchina in cui gira lo script
"""
if(b > 0):
# crea una mail e allega il file con i server offline
bash_mail = " echo 'Server Offline' | mail -s subject "+mail+" -a black_list.txt"
# spedisce la mail
os.system(bash_mail)
# cancella il file con la lista dei server offline
os.remove(bFile)
if __name__ == '__main__':
callHostPING()
sendMAIL()
| [
"noreply@github.com"
] | noreply@github.com |
701efcd2c2b505fc3ac4e0416b386c15dcc95220 | ee268bca0d0114a543db8f5c979019ea07a8de86 | /djantube/asgi.py | ac2c150f7df8e7f6e3b64e2dba95d285971e2189 | [] | no_license | mehdi-benhariz/DjangTube | 63be28686f3e4330198a0f9158d0d456c0710667 | 646bb32edf2d9da33b71ec78eb3f67e67f22e77f | refs/heads/master | 2022-12-01T11:53:23.419049 | 2020-08-05T10:56:06 | 2020-08-05T10:56:06 | 285,043,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for djantube project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djantube.settings')
application = get_asgi_application()
| [
"benharizmehdi20@gmail.com"
] | benharizmehdi20@gmail.com |
4e84c64706c5b3dcde4f84dc13e6085aa18fa72b | 61296b98e4d481893db4bc51d75652c7109ae626 | /0000_examples/cobotta_g.py | 116d8d398c21d519f84520776dd6e95bfdd43b4d | [
"MIT"
] | permissive | Shogo-Hayakawa/wrs | 23d4560b1062cf103ed32db4b2ef1fc2261dd765 | 405f15be1a3f7740f3eb7d234d96998f6d057a54 | refs/heads/main | 2023-08-19T19:29:15.409949 | 2021-11-02T01:22:29 | 2021-11-02T01:22:29 | 423,663,614 | 0 | 0 | MIT | 2021-11-02T00:59:17 | 2021-11-02T00:59:17 | null | UTF-8 | Python | false | false | 1,418 | py | import visualization.panda.world as wd
import grasping.planning.antipodal as gp
import robot_sim.end_effectors.grippers.cobotta_gripper.cobotta_gripper as cg
import modeling.collision_model as cm
import modeling.geometric_model as gm
import numpy as np
import math
base = wd.World(cam_pos=np.array([.5, .5, .5]), lookat_pos=np.array([0, 0, 0]))
gm.gen_frame().attach_to(base)
objcm = cm.CollisionModel("objects/holder.stl")
objcm.attach_to(base)
# base.run()
hnd_s = cg.CobottaGripper()
# hnd_s.gen_meshmodel().attach_to(base)
# base.run()
grasp_info_list = gp.plan_grasps(hnd_s,
objcm,
angle_between_contact_normals=math.radians(175),
openning_direction='loc_y',
rotation_interval=math.radians(15),
max_samples=20,
min_dist_between_sampled_contact_points=.001,
contact_offset=.001)
gp.write_pickle_file(objcm_name="holder",
grasp_info_list=grasp_info_list,
file_name="cobg_holder_grasps.pickle")
for grasp_info in grasp_info_list:
jaw_width, jaw_center_pos, jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
hnd_s.grip_at_with_jcpose(jaw_center_pos, jaw_center_rotmat, jaw_width)
hnd_s.gen_meshmodel().attach_to(base)
base.run() | [
"wanweiwei07@gmail.com"
] | wanweiwei07@gmail.com |
5bdbf01baa6ade72cd727e466d72bc4ba506dd63 | 18a9b3a0edf6544c70ec729aa18e74b7f7e9befa | /models/backbone.py | bccdb347e2befcc6d0cb0ccc7d0e0b9ee235bb3d | [
"MIT"
] | permissive | AustinTapp/Spine-Transformers | 9bb1796a1dfd28b0b288c0ced399c502cbfe495d | b653d6186ac703edf8a871d46e7ad2fa051a28a4 | refs/heads/main | 2023-06-11T22:19:17.732693 | 2021-07-04T14:26:05 | 2021-07-04T14:26:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | """
Backbone modules.
"""
import torch
import torch.nn.functional as F
from torch import nn
from typing import Dict, List
from models.resnet_no_pool import generate_model
from util.misc import NestedTensor
from .position_encoding import build_position_encoding
class Backbone(nn.Module):
def __init__(self, num_channels: int, model_depth: int):
super().__init__()
self.body = generate_model(model_depth)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
layer1_x, layer2_x, layer3_x, layer4_x = self.body(tensor_list.tensors)
xs = {'0':layer1_x, '1':layer2_x, '2':layer3_x, '3':layer4_x}
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-3:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
backbone = Backbone(num_channels=2048, model_depth=50)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| [
"noreply@github.com"
] | noreply@github.com |
1a7371535df33ccdd563e4b656e57c1ddf539554 | 3019928adaf37fe4acf4444587ee6fd83e634600 | /src/getTotallyNew.py | 7b9600c5b51f2693732348747687ff95eeadd2f0 | [] | no_license | lucian-whu/Project3 | 013c9aace191fdd4c4cdef731e4889f1d80f3e15 | c5735962c7b022c0ef8f290e8dfc9b2ae5eb0839 | refs/heads/master | 2021-05-15T10:11:47.835001 | 2017-10-25T08:27:33 | 2017-10-25T08:27:33 | 108,241,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # coding=utf-8
import csv
with open('D://python_projects//project3//data//acute_myocardial_infarction_MESH_NEW_2006_2010.csv','r') as f:
lines = f.readlines()
for line in lines:
print(line[0])
# with open('','rb') as f1:
# csvReader1 = csv.reader(f1)
# for csvLine1 in csvReader1:
# if csv
| [
"lucianwhu@163.com"
] | lucianwhu@163.com |
6b668b8f038dc5cb395934f517fcc7da8dada50f | d71ab4eeb925ff1246781ed162de573b46be6ee7 | /Python/lesson_2/exersize_8.py | 8eb36100f7b8e40fb88dc5536980b39ad21d0cc6 | [] | no_license | Tixon74/test | d2cfbd0bd0519dce091bcde368e2f08fb6f5b03c | f85620d0ada0169ee4b67be3bcc8a272f624f13b | refs/heads/master | 2021-04-02T16:52:26.318401 | 2020-04-23T17:58:16 | 2020-04-23T17:58:16 | 248,296,917 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | int_num = int(input())
while int_num > 0:
if int_num % 10 == 5:
print('yes')
break
int_num = int_num // 10
else: print('no') | [
"tixondamage@gmail.com"
] | tixondamage@gmail.com |
b79625e562b14a8677b18af115c52c86d1d2319a | 9ce8cdcc0df179a9b898dac768231ed0ac41249b | /turtle_sort/turtle_sort.py | f844828add18dc00a185f030b59ded9452dcf87b | [] | no_license | maradude/aps-code | 50ce3b7fc3344f049a0497a34bdd0d2e8091669d | afc95535d638967849de48289d064bfd5d0588a9 | refs/heads/master | 2022-10-07T12:35:02.626710 | 2019-03-03T19:52:09 | 2019-03-03T19:52:09 | 171,647,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | def count_turtle_sort_steps(not_sorted, is_sorted, turtle_count):
# find how many turtles don't need to be moved, return the difference
expected_turtle = actual_turtle = turtle_count - 1
need_to_move = 0
while actual_turtle >= 0:
if is_sorted[expected_turtle] == not_sorted[actual_turtle]:
expected_turtle, actual_turtle = expected_turtle-1, actual_turtle-1
else:
actual_turtle = actual_turtle-1
need_to_move += 1
return need_to_move
if __name__ == '__main__':
import sys
arrays = []
try:
for line in sys.stdin:
arrays.append(line.strip())
except TypeError as e:
print(e)
sys.exit()
tests = int(arrays.pop(0))
for _ in range(tests):
unsorted_case = []
sorted_case = []
amount = int(arrays.pop(0))
for __ in range(amount):
unsorted_case.append(arrays.pop(0))
for __ in range(amount):
sorted_case.append(arrays.pop(0))
print(count_turtle_sort_steps(unsorted_case, sorted_case, amount))
| [
"martti@aukia.com"
] | martti@aukia.com |
960fa88e71e4cbe57c6854a3e565830fbc731386 | de9444984bb341ca72337de329f170fe7a1d0e63 | /bin/sqlformat | cb339fe340cf02d691bf9ec3cbbdffa9a569a0dc | [] | no_license | CRcr0/Oauth2Test | bc33fe491a9ee28df275d5e8b18b25f5eafff946 | 6d9116e0dc331a978b6810a55a1a458efcf2331f | refs/heads/main | 2023-09-01T14:43:11.797155 | 2021-10-31T23:16:04 | 2021-10-31T23:16:04 | 423,279,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | #!/Users/xinjianzhanghu/PycharmProjects/TestOauth2/django-vue/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"“1156898468@qq.comssh-keygen -t rsa -C “1156898468@qq.com"
] | “1156898468@qq.comssh-keygen -t rsa -C “1156898468@qq.com | |
6695e43b08ccf097696b09c23a6d6000166142d9 | 214e13b750665cffb2fad32e2020b1e003780655 | /4/4.4/foods.py | 197ee0522f9890e4c53dea6dadc7d6debe53e55c | [
"Apache-2.0"
] | permissive | liqiwa/python_work | ea4a1139b6dced941377d0baf5363a3cf6d683b8 | 3d1198d5616b28a37fee7dfba5bbef0e1d489c2d | refs/heads/master | 2021-07-08T06:28:03.085223 | 2020-07-18T07:57:13 | 2020-07-18T07:57:13 | 138,620,066 | 0 | 0 | Apache-2.0 | 2020-07-18T07:57:37 | 2018-06-25T16:17:02 | Python | UTF-8 | Python | false | false | 298 | py | my_foods = ["pizza","orange","falafel","carrot cake"]
friend_foods = my_foods[:]
print(friend_foods)
my_foods.append("my_foods + 1")
friend_foods.append("friedd_foods + 2")
print(my_foods)
print(friend_foods)
my_foods = friend_foods
my_foods.append('cannoli')
print(my_foods)
print(friend_foods) | [
"menghe163@gmail.com"
] | menghe163@gmail.com |
469eebafcf857f87276c308ad37773ed5d6351dd | 95ec78292e150591dc0587988cf3a4b9b5ad08c2 | /code/srmcollider/Residues.py | 988d50d1542f6b880b4d90b333ad0cef6cd72721 | [] | no_license | hroest/srmcollider | 40b034f4f1713d94a6f36ed78b3ed67857b47eb7 | 67c0a04fb21a4f089e3aab15d5ee8884b389ec44 | refs/heads/master | 2021-01-18T23:10:14.927217 | 2018-01-15T15:04:25 | 2018-01-15T15:04:25 | 10,242,093 | 0 | 2 | null | 2017-12-05T22:02:04 | 2013-05-23T11:18:25 | Python | UTF-8 | Python | false | false | 18,191 | py | """
*
* Program : SRMCollider
* Author : Hannes Roest <roest@imsb.biol.ethz.ch>
* Date : 05.02.2011
*
*
* Copyright (C) 2011 - 2012 Hannes Roest
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
*
"""
import string
# Isotope Modification
# 0 means no modification
# 1 means N15 (heavy nitrogen)
NOISOTOPEMODIFICATION = 0
N15_ISOTOPEMODIFICATION = 1
class Residues:
# http://www.sisweb.com/referenc/source/exactmaa.htm
# http://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl
average_elements = {
'H' : 1.007825 * 99.99/100 + 2.014102 * 0.015/100,
'N' : 14.003074 * 99.63/100 + 15.000109 * 0.37/100,
'O' : 15.994915 * 99.76/100 + 16.999131 * 0.038/100 + 17.999159 * 0.20/100,
'C' : 12.000000 * 98.90/100 + 13.003355 * 1.10,
'P' : 30.973763
}
monoisotopic_elements = {
'H' : 1.007825032,
'H2' : 2.01410178,
'C' : 12.000000,
'C13' : 13.00335484,
'N' : 14.003074005,
'N15' : 15.000108898,
'O' : 15.994914620,
'O17' : 16.999132,
'O18' : 17.999161,
'P' : 30.973762,
'S' : 31.972071
}
aa_codes = {
'A' : 'Ala',
'R' : 'Arg',
'N' : 'Asn',
'D' : 'Asp',
'C' : 'Cys',
'E' : 'Glu',
'Q' : 'Gln',
'G' : 'Gly',
'H' : 'His',
'I' : 'Ile',
'L' : 'Leu',
'K' : 'Lys',
'M' : 'Met',
'F' : 'Phe',
'P' : 'Pro',
'S' : 'Ser',
'T' : 'Thr',
'W' : 'Trp',
'Y' : 'Tyr',
'V' : 'Val',
'C[160]' : 'Cys+CAM',
'M[147]' : 'Met+Ox',
}
aa_codes_rev = dict([(v,k) for k,v in aa_codes.iteritems()])
aa_names = {
'A': 'Alanine',
'B': 'Aspartic Acid or Asparagine',
'C': 'Cysteine',
'c': 'Modified cysteine' ,
'D': 'Aspartate',
'E': 'Glutamate',
'F': 'Phenylalanine',
'G': 'Glycine',
'H': 'Histidine',
'I': 'Isoleucine',
'K': 'Lysine',
'k': 'Lys->Cys substitution and carbamidomethylation (903)',
'L': 'Leucine',
'M': 'Methionine',
'm': 'Modified methionine' ,
'N': 'Asparagine',
'P': 'Proline',
'Q': 'Glutamine',
'R': 'Arginine',
'S': 'Serine',
'T': 'Threonine',
'V': 'Valine',
'W': 'Tryptophan',
'X': 'Leucine/Isoleucine',
'Y': 'Tyrosine',
'Z': 'Glutamic acid'
}
aa_sum_formulas_text = {
'A' : 'C3H5ON',
'R' : 'C6H12ON4',
'N' : 'C4H6O2N2',
'D' : 'C4H5O3N',
'C' : 'C3H5ONS',
'E' : 'C5H7O3N',
'Q' : 'C5H8O2N2',
'G' : 'C2H3ON',
'H' : 'C6H7ON3',
'I' : 'C6H11ON',
'L' : 'C6H11ON',
'K' : 'C6H12ON2',
'M' : 'C5H9ONS',
'F' : 'C9H9ON',
'P' : 'C5H7ON',
'S' : 'C3H5O2N',
'T' : 'C4H7O2N',
'W' : 'C11H10ON2',
'Y' : 'C9H9O2N',
'V' : 'C5H9ON'
}
#from http://education.expasy.org/student_projects/isotopident/htdocs/aa-list.html
aa_sum_formulas = {
'A' : { 'C' : 3, 'H' : 5 , 'O' : 1, 'N' : 1 },
'R' : { 'C' : 6, 'H' : 12 , 'O' : 1, 'N' : 4 },
'N' : { 'C' : 4, 'H' : 6 , 'O' : 2, 'N' : 2 },
'D' : { 'C' : 4, 'H' : 5 , 'O' : 3, 'N' : 1 },
'C' : { 'C' : 3, 'H' : 5 , 'O' : 1, 'N' : 1, 'S' : 1 },
'E' : { 'C' : 5, 'H' : 7 , 'O' : 3, 'N' : 1 },
'Q' : { 'C' : 5, 'H' : 8 , 'O' : 2, 'N' : 2 },
'G' : { 'C' : 2, 'H' : 3 , 'O' : 1, 'N' : 1 },
'H' : { 'C' : 6, 'H' : 7 , 'O' : 1, 'N' : 3 },
'I' : { 'C' : 6, 'H' : 11 , 'O' : 1, 'N' : 1 },
'L' : { 'C' : 6, 'H' : 11 , 'O' : 1, 'N' : 1 },
'K' : { 'C' : 6, 'H' : 12 , 'O' : 1, 'N' : 2 },
'M' : { 'C' : 5, 'H' : 9 , 'O' : 1, 'N' : 1, 'S' : 1 },
'F' : { 'C' : 9, 'H' : 9 , 'O' : 1, 'N' : 1 },
'P' : { 'C' : 5, 'H' : 7 , 'O' : 1, 'N' : 1 },
'S' : { 'C' : 3, 'H' : 5 , 'O' : 2, 'N' : 1 },
'T' : { 'C' : 4, 'H' : 7 , 'O' : 2, 'N' : 1 },
'W' : { 'C' : 11, 'H' : 10 , 'O' : 1, 'N' : 2 },
'Y' : { 'C' : 9, 'H' : 9 , 'O' : 2, 'N' : 1 },
'V' : { 'C' : 5, 'H' : 9 , 'O' : 1, 'N' : 1 },
'C[160]' : { 'C' : 3+2, 'H' : 5+3 , 'O' : 1+1, 'N' : 1+1, 'S' : 1 }, # + CAM = H(3) C(2) N O
'M[147]' : { 'C' : 5, 'H' : 9 , 'O' : 1+1, 'N' : 1, 'S' : 1 },
}
mass_H = monoisotopic_elements['H']
mass_N = monoisotopic_elements['N']
mass_O = monoisotopic_elements['O']
mass_C = monoisotopic_elements['C']
mass_S = monoisotopic_elements['S']
mass_P = monoisotopic_elements['P']
mass_NH2 = mass_N + 2*mass_H
mass_NH3 = mass_N + 3*mass_H
mass_CO = mass_C + mass_O
mass_H2O = mass_O + 2*mass_H
mass_OH = mass_O + mass_H
mass_H3PO4 = mass_P + mass_O * 4 + mass_H * 3
mass_H1PO4 = mass_P + mass_O * 4 + mass_H * 1
mass_H1PO3 = mass_P + mass_O * 3 + mass_H * 1
mass_CAM = 2* mass_C + 4*mass_H + mass_O + mass_N #CH2-CONH2
mass_C13 = monoisotopic_elements['C13']
mass_N15 = monoisotopic_elements['N15']
mass_diffC13 = mass_C13 - mass_C
mass_diffN15 = mass_N15 - mass_N
average_data = {
# Key on abbreviation, give name, molecular weight (in daltons).
'A': ('Alanine', 71.0788),
'B': ('Aspartic Acid or Asparagine', 114.5962),
'C': ('Cysteine', 103.1448),
'c': ('Modified cysteine' , 160.1448), # Add 57
'D': ('Aspartate', 115.0886),
'E': ('Glutamate', 129.1155),
'F': ('Phenylalanine', 147.1766),
'G': ('Glycine', 57.0519),
'H': ('Histidine', 137.1411),
'I': ('Isoleucine', 113.1594),
'K': ('Lysine', 128.1741),
'k': ('Lys->Cys substitution and carbamidomethylation (903)', 128.09496 + 32.0219),
'L': ('Leucine', 113.1594),
'M': ('Methionine', 131.1986),
'm': ('Modified methionine' , 147.1986), # add 16
'N': ('Asparagine', 114.1038),
'P': ('Proline', 97.1167),
'Q': ('Glutamine', 128.1307),
'R': ('Arginine', 156.1875),
'S': ('Serine', 87.0782),
'T': ('Threonine', 101.1051),
'V': ('Valine', 99.1326),
'W': ('Tryptophan', 186.2132),
'X': ('Leucine/Isoleucine', 113.1594), # Can't distinguish leucine/isoleucine.
'Y': ('Tyrosine', 163.176),
'Z': ('Glutamic acid, or glutamine', 128),
}
#e.g. from http://education.expasy.org/student_projects/isotopident/htdocs/aa-list.html
# see also http://www.sbeams.org/svn/sbeams/trunk/sbeams/lib/perl/SBEAMS/Proteomics/AminoAcidModifications.pm
monoisotopic_data = {
# Key on abbreviation, give name, molecular weight (in daltons).
'A': ('Alanine', 71.03711),
'B': ('Aspartic Acid or Asparagine', 114.04293),
'C': ('Cysteine', 103.00919),
'D': ('Aspartate', 115.02694),
'E': ('Glutamate', 129.04259),
'F': ('Phenylalanine', 147.06841),
'G': ('Glycine', 57.02146),
'H': ('Histidine', 137.05891),
'I': ('Isoleucine', 113.08406),
'K': ('Lysine', 128.09496),
'L': ('Leucine', 113.08406),
'M': ('Methionine', 131.04049),
'N': ('Asparagine', 114.04293),
'P': ('Proline', 97.05276),
'Q': ('Glutamine', 128.05858),
'R': ('Arginine', 156.10111),
'S': ('Serine', 87.03203),
'T': ('Threonine', 101.04768),
'V': ('Valine', 99.06841),
'W': ('Tryptophan', 186.07931),
'X': ('Leucine/Isoleucine', 113.08406), # Can't distinguish leucine/isoleucine
'Y': ('Tyrosine', 163.06333),
'Z': ('Glutamic acid, or glutamine', 128.05858),
}
monoisotopic_mod = {
'c': ('Modified cysteine', monoisotopic_data["C"][1] + mass_CAM - mass_H ), # CAM replaces H
#'c': ('Modified cysteine' , 160.00919), # Add 57
'C[160]': ('Modified cysteine', monoisotopic_data["C"][1] + mass_CAM - mass_H ), # CAM replaces H
'k': ('Lys->Cys substitution and carbamidomethylation (903)', 128.09496 + 31.935685),
'N[115]': ('Asparagine', monoisotopic_data["N"][1] - mass_N - mass_H + mass_O),
#'m': ('Modified methionine', 147.04049), # add 16
'm': ('Modified methionine', monoisotopic_data["M"][1] + mass_O), # oxygen
'M[147]': ('Modified methionine', monoisotopic_data["M"][1] + mass_O), # oxygen
# SILAC labels
'K[136]' : ('heavy Lysine', monoisotopic_data["K"][1] + 8.014199), #UniMod:259
'R[166]' : ('heavy Arginine', monoisotopic_data["R"][1] + 10.008269), #UniMod:267
'R[162]' : ('heavy Arginine', monoisotopic_data["R"][1] + 6*mass_diffC13), #UniMod:188
'V[104]' : ('heavy Valine', monoisotopic_data["V"][1] + 5*mass_diffC13), # no unimod
'V[105]' : ('heavy Valine', monoisotopic_data["V"][1] + 5*mass_diffC13 + mass_diffN15), # unimod 268
# Pyro Unimod 27 and 28
'E[111]': ('pyro Glutamate', 129.04259 - mass_O - 2*mass_H),
'Q[111]': ('pyro Glutamine', 128.05858 - mass_O - 2*mass_H),
# Unimod 385 # Pyro-carbamidomethyl as a delta from Carbamidomethyl-Cys
'C[143]': ('Pyro-carbamidomethyl cysteine' , monoisotopic_data["C"][1] + mass_CAM - mass_H - 3*mass_H - mass_N),
# Phospho
'S[166]': ('Phospho Serine', 87.03203 + mass_H1PO3),
'S[167]': ('Phospho Serine', 87.03203 + mass_H1PO3),
'T[181]': ('Phospho Threonine', 101.04768 + mass_H1PO3),
'Y[243]': ('Phospho Tyrosine', 163.06333 + mass_H1PO3),
}
mod_mapping = {
"K[+8]" : "K[136]",
"R[+10]": "R[166]",
"M[+16]": "M[147]",
"N[-1]" : "N[115]",
"C[+57]": "C[160]",
"C[+40]": "C[160]",
"R[+6]" : "R[162]",
"V[+5]" : "V[104]",
"V[+6]" : "R[105]",
"S[+80]" : "S[167]",
"T[+80]" : "T[181]",
"Y[+80]" : "Y[243]",
}
monoisotopic_data.update(monoisotopic_mod)
#C[169] 58 => ?
#C[152] 2 => ?
#W[202] 23 => Oxidation?
"""
http://web.expasy.org/protscale/pscale/Hphob.Doolittle.html
GRAVY (Grand Average of Hydropathy)
The GRAVY value for a peptide or protein is calculated as the sum of hydropathy values [9] of all the amino acids, divided by the number of residues in the sequence.
Amino acid scale: Hydropathicity.
Author(s): Kyte J., Doolittle R.F.
Reference: J. Mol. Biol. 157:105-132(1982).
Amino acid scale values:
"""
Hydropathy = {
'Ala': 1.800,
'Arg': -4.500,
'Asn': -3.500,
'Asp': -3.500,
'Cys': 2.500,
'Gln': -3.500,
'Glu': -3.500,
'Gly': -0.400,
'His': -3.200,
'Ile': 4.500,
'Leu': 3.800,
'Lys': -3.900,
'Met': 1.900,
'Phe': 2.800,
'Pro': -1.600,
'Ser': -0.800,
'Thr': -0.700,
'Trp': -0.900,
'Tyr': -1.300,
'Val': 4.200,
}
Hydropathy_aa = dict([ (aa_codes_rev[k],v) for k,v in Hydropathy.iteritems()])
hydrophobicity = {
'F': 5.00,
'W': 4.88,
'L': 4.76,
'X': 4.59,
'I': 4.41,
'M': 3.23,
'V': 3.02,
'C': 2.50,
'Y': 2.00,
'A': 0.16,
'T': -1.08,
'E': -1.50,
'Z': -2.13,
'D': -2.49,
'Q': -2.76,
'R': -2.77,
'S': -2.85,
'B': -3.14,
'G': -3.31,
'N': -3.79,
'H': -4.63,
'P': -4.92,
'K': -5.00
}
basicity = {
'G': 202.7,
'C': 206.2,
'A': 206.4,
'S': 207.6,
'D': 208.6,
'V': 208.7,
'L': 209.6,
'X': 210.2,
'B': 210.7,
'I': 210.8,
'T': 211.7,
'F': 212.1,
'N': 212.8,
'Y': 213.1,
'M': 213.3,
'Q': 214.2,
'P': 214.4,
'Z': 214.9,
'E': 215.6,
'W': 216.1,
'K': 221.8,
'H': 223.7,
'R': 237.0
}
helicity = {
'F': 1.26,
'W': 1.07,
'L': 1.28,
'X': 1.29, #avg L,I
'I': 1.29,
'M': 1.22,
'V': 1.27,
'C': 0.79,
'Y': 1.11,
'A': 1.24,
'T': 1.09,
'E': 0.85,
'D': 0.89,
'Z': 0.91, #avg Q,E
'B': 0.92, #avg N,D
'Q': 0.96,
'R': 0.95,
'S': 1.00,
'G': 1.15,
'N': 0.94,
'H': 0.97,
'P': 0.57,
'K': 0.88,
}
pI = {
'G': 6.0,
'A': 6.0,
'V': 6.0,
'L': 6.0,
'X': 6.0, #L or I
'I': 6.0,
'F': 5.5,
'P': 6.3,
'S': 5.7,
'T': 5.6,
'Y': 5.7,
'C': 5.0,
'M': 5.7,
'N': 5.4,
'B': 4.1, #avg N and D
'Q': 5.7,
'Z': 4.5, #avg Q,E
'W': 5.9,
'D': 2.8,
'E': 3.2,
'K': 9.7,
'R': 10.8,
'H': 7.6
}
def __init__(self, type="mono"):
"""Set up the residue data structure."""
#add the phosphorylations
self.monoisotopic_data[ 's' ] = ('Phospho-S',
self.monoisotopic_data[ 'S' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 't' ] = ('Phospho-T',
self.monoisotopic_data[ 'T' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 'y' ] = ('Phospho-Y',
self.monoisotopic_data[ 'Y' ][1] + self.mass_H1PO3)
self.average_data[ 's' ] = ('Phospho-S',
self.average_data[ 'S' ][1] + self.mass_H1PO3)
self.average_data[ 't' ] = ('Phospho-T',
self.average_data[ 'T' ][1] + self.mass_H1PO3)
self.average_data[ 'y' ] = ('Phospho-Y',
self.average_data[ 'Y' ][1] + self.mass_H1PO3)
if not type:
self.residues = self.average_data
elif type.startswith("mono"):
self.residues = self.monoisotopic_data
elif type.startswith("av"):
self.residues = self.average_data
else:
raise ValueError("Type of residue must be one of: mono[isotopic], av[erage] (characters within [] are optional.")
keys = self.residues.keys()
self.res_pairs = [ string.join((r, s), '') for r in keys for s in keys ]
def recalculate_monisotopic_data(self):
self.monoisotopic_data = {}
for abbrev, formula in self.aa_sum_formulas.iteritems():
mysum = 0.0
for key, value in formula.iteritems():
mysum += self.monoisotopic_elements[ key ] * value
self.monoisotopic_data[ abbrev ] = ( self.aa_codes[abbrev] , mysum )
#
self.monoisotopic_data['c'] = self.monoisotopic_data['C'] + self.mass_CAM - self.mass_H
self.monoisotopic_data['c'] = ( 'Modified cystein',
self.monoisotopic_data['C'][1] + self.mass_CAM - self.mass_H)
self.monoisotopic_data['k'] = ( 'Lys->Cys substitution and carbamidomethylation (903)',
self.monoisotopic_data['K'][1] + 31.935685)
self.monoisotopic_data['m'] = ( 'Modified methionine',
self.monoisotopic_data['M'][1] + self.mass_O)
self.monoisotopic_data[ 's' ] = ('Phospho-S',
self.monoisotopic_data[ 'S' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 't' ] = ('Phospho-T',
self.monoisotopic_data[ 'T' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 'y' ] = ('Phospho-Y',
self.monoisotopic_data[ 'Y' ][1] + self.mass_H1PO3)
self.residues = self.monoisotopic_data
def recalculate_monisotopic_data_for_N15(self):
self.monoisotopic_data = {}
for abbrev, formula in self.aa_sum_formulas.iteritems():
mysum = 0.0
for key, value in formula.iteritems():
#replace N with N15
if key == 'N': key = 'N15'
mysum += self.monoisotopic_elements[ key ] * value
self.monoisotopic_data[ abbrev ] = ( self.aa_codes[abbrev] , mysum )
#IMPORTANT: CAM is added afterwards and is NOT heavy
#
self.monoisotopic_data['C[160]'] = ( 'Modified cystein',
self.monoisotopic_data['C'][1] + self.mass_CAM - self.mass_H)
self.monoisotopic_data['N[115]'] = ( 'Modified asparagine',
self.monoisotopic_data['N'][1] - self.mass_N15 - self.mass_H + self.mass_O)
self.monoisotopic_data['M[147]'] = ( 'Modified methionine',
self.monoisotopic_data['M'][1] + self.mass_O)
#
self.monoisotopic_data['c'] = ( 'Modified cystein',
self.monoisotopic_data['C'][1] + self.mass_CAM - self.mass_H)
self.monoisotopic_data['k'] = ( 'Lys->Cys substitution and carbamidomethylation (903)',
self.monoisotopic_data['K'][1] + 31.935685)
self.monoisotopic_data['m'] = ( 'Modified methionine',
self.monoisotopic_data['M'][1] + self.mass_O)
self.monoisotopic_data[ 's' ] = ('Phospho-S',
self.monoisotopic_data[ 'S' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 't' ] = ('Phospho-T',
self.monoisotopic_data[ 'T' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 'y' ] = ('Phospho-Y',
self.monoisotopic_data[ 'Y' ][1] + self.mass_H1PO3)
self.residues = self.monoisotopic_data
| [
"you@example.com"
] | you@example.com |
560bbdf2d856311a383f2556ff042c6b24798d81 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/SAF-ENTERPRISE.py | 25f13b95d7e50531041d277cb4e2ad47bc261ce1 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 2,343 | py | #
# PySNMP MIB module SAF-ENTERPRISE (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SAF-ENTERPRISE
# Produced by pysmi-0.3.4 at Wed May 1 14:59:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Integer32, Counter32, Bits, iso, Gauge32, Unsigned32, IpAddress, MibIdentifier, enterprises, TimeTicks, ModuleIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Integer32", "Counter32", "Bits", "iso", "Gauge32", "Unsigned32", "IpAddress", "MibIdentifier", "enterprises", "TimeTicks", "ModuleIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
saf = ModuleIdentity((1, 3, 6, 1, 4, 1, 7571))
if mibBuilder.loadTexts: saf.setLastUpdated('2007040300Z')
if mibBuilder.loadTexts: saf.setOrganization('SAF Tehnika')
if mibBuilder.loadTexts: saf.setContactInfo('SAF Tehnika technical support <techsupport@saftehnika.com>')
if mibBuilder.loadTexts: saf.setDescription('')
tehnika = ObjectIdentity((1, 3, 6, 1, 4, 1, 7571, 100))
if mibBuilder.loadTexts: tehnika.setStatus('current')
if mibBuilder.loadTexts: tehnika.setDescription('Subtree to register SAF tehnika modules')
microwaveRadio = MibIdentifier((1, 3, 6, 1, 4, 1, 7571, 100, 1))
pointToPoint = MibIdentifier((1, 3, 6, 1, 4, 1, 7571, 100, 1, 1))
mibBuilder.exportSymbols("SAF-ENTERPRISE", tehnika=tehnika, PYSNMP_MODULE_ID=saf, microwaveRadio=microwaveRadio, pointToPoint=pointToPoint, saf=saf)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
b18c5a2b2afb8aa641c036874755e5247c1d83d0 | be78d77bea1a5eea2a7f0d4090e1fc138623b79a | /cybox/test/objects/link_test.py | bac34e34bbbca2617a14995b938c2e2f2505741b | [
"BSD-3-Clause"
] | permissive | CybOXProject/python-cybox | 399f73feb6a54778dca9260b1c0340a3895c6369 | 25e6e8b3a6f429f079d3fbd9ace3db9eb3d5ab71 | refs/heads/master | 2020-05-21T19:05:56.725689 | 2020-05-01T13:33:48 | 2020-05-01T13:33:48 | 7,631,169 | 43 | 31 | BSD-3-Clause | 2020-05-01T12:41:03 | 2013-01-15T19:04:47 | Python | UTF-8 | Python | false | false | 980 | py | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from mixbox.vendor.six import u
from cybox.core import Observables
from cybox.objects.link_object import Link
from cybox.objects.uri_object import URI
from cybox.test.objects import ObjectTestCase
class TestLink(ObjectTestCase, unittest.TestCase):
object_type = "LinkObjectType"
klass = Link
_full_dict = {
'value': u("http://www.example.com"),
'type': URI.TYPE_URL,
'url_label': u("Click Here!"),
'xsi:type': object_type,
}
# https://github.com/CybOXProject/python-cybox/issues/202
def test_correct_namespace_output(self):
link = Link()
link.value = u("https://www.example.com")
xml = Observables(link).to_xml()
self.assertTrue(b"cybox:Properties" in xml)
self.assertTrue(b"LinkObj:Properties" not in xml)
if __name__ == "__main__":
unittest.main()
| [
"gback@mitre.org"
] | gback@mitre.org |
38bbcf1c7fd0aaa56a14c15e19764477da9b8d5b | 8bf0b8107521b03a1ebd4789f19cbb2f47380a88 | /BSmodel_modified/root_finding_algorithms.py | 537e386e647c91de3f9b2f48c0988aace2bf06ef | [] | no_license | dp540788912/OptionGreeks | d49cc59eec70cfb94918952db869a3db730678a3 | 1d004a6f04ac0b8090d188907f4b4128d273123f | refs/heads/master | 2020-07-28T20:57:05.980493 | 2019-11-29T13:12:10 | 2019-11-29T13:12:10 | 209,535,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,584 | py | import numpy as np
import pandas as pd
from datetime import datetime
from datetime import timedelta
import warnings
def bound_adjustment(target_function, lower_bound, upper_bound):
initial_search_range = upper_bound - lower_bound
max_iter = 100
_iter = 0
while target_function(lower_bound) * target_function(upper_bound) > 0 and _iter < max_iter:
upper_value = target_function(upper_bound)
lower_value = target_function(lower_bound)
if 0 < upper_value <= lower_value or lower_value <= upper_value < 0:
upper_bound = upper_bound + abs(initial_search_range)
elif 0 < lower_value < upper_value or upper_value < lower_value < 0:
lower_bound = lower_bound - abs(initial_search_range)
_iter += 1
if _iter >= max_iter:
return 0, 2
return lower_bound, upper_bound
# 二分法
def bisection_iteration(target_function, lower_bound, upper_bound, max_iteration=100, tol=1e-7):
# 首先判断求解区间是否为异号,若上下界函数取值不合理,调整上下界:
if target_function(lower_bound) * target_function(upper_bound) > 0:
lower_bound, upper_bound = bound_adjustment(target_function, lower_bound, upper_bound)
iteration = 0
mean = (upper_bound + lower_bound) / 2
while abs(target_function((upper_bound + lower_bound) / 2)) >= tol and iteration <= max_iteration:
if abs(target_function(mean)) <= tol:
status = 0
return mean, status
if abs(target_function(upper_bound)) <= tol:
status = 0
return upper_bound, status
if abs(target_function(lower_bound)) <= tol:
status = 0
return lower_bound, status
elif target_function(mean) * target_function(upper_bound) < 0:
lower_bound = mean
else:
upper_bound = mean
mean = (upper_bound + lower_bound) / 2
iteration += 1
if iteration > max_iteration:
status = 1
return mean, status
else:
status = 0
return mean, status
def newton_iteration(target_function, derivative_function, initial_value, max_iteration=100, tol=1e-7):
iteration = 0
root = initial_value
if abs(target_function(root)) <= tol:
status = 0
return root, status
# 若初始解的导数为0,会导致牛顿法出现除数为0的情况,因此需要调整初始解
if abs(derivative_function(root)) <= 1e-6:
root = root+1
while iteration <= max_iteration and abs(target_function(root)) > tol:
next_guess = root - target_function(root) / derivative_function(root)
# 若下一步迭代的解vega值小于上一步迭代解的1/100,则可判断牛顿法出现震荡,跳转至二分法求解
if derivative_function(root) / derivative_function(next_guess) >= 100:
root, status = bisection_iteration(target_function,root,next_guess)
status = 2
return root, status
else:
root = next_guess
iteration += 1
if iteration > max_iteration:
status = 1
return root, status
else:
status = 0
return root, status
# brent's method https://en.wikipedia.org/wiki/Brent%27s_method#Algorithm
def brent_iteration(target_function, x0, x1, max_iteration=100, tol=1e-7):
# 首先判断求解区间是否为异号,若上下界函数取值不合理,调整上下界:
if target_function(x0) * target_function(x1) > 0:
x0, x1 = bound_adjustment(target_function, x0, x1)
f_x0 = target_function(x0)
f_x1 = target_function(x1)
# 确保x1的函数值距离原点比x0近
if abs(f_x0) < abs(f_x1):
x0, x1 = x1, x0
f_x0, f_x1 = f_x1, f_x0
x2, f_x2 = x0, f_x0
mflag = True
iteration = 0
while iteration < max_iteration and abs(target_function(x1)) > tol:
f_x0 = target_function(x0)
f_x1 = target_function(x1)
f_x2 = target_function(x2)
if f_x0 != f_x2 and f_x1 != f_x2:
# inverse quadratic interpolation
part1 = (x0 * f_x1 * f_x2) / ((f_x0 - f_x1) * (f_x0 - f_x2))
part2 = (x1 * f_x0 * f_x2) / ((f_x1 - f_x0) * (f_x1 - f_x2))
part3 = (x2 * f_x1 * f_x0) / ((f_x2 - f_x0) * (f_x2 - f_x1))
next_guess = part1 + part2 + part3
else:
# linear interpolation
next_guess = x1 - (f_x1 * (x1 - x0)) / (f_x1 - f_x0)
# 若满足下述五个条件任一,使用二分法给出下一步迭代解
condition1 = next_guess < ((3 * x0 + x1)/4) or next_guess > x1
condition2 = mflag is True and (abs(next_guess - x1) >= abs(x1 - x2)/2)
condition3 = mflag is False and (abs(next_guess-x1) >= abs(x2-d)/2)
condition4 = mflag is True and abs(x1-x2) < tol
condition5 = mflag is False and abs(x2-d) < tol
if condition1 or condition2 or condition3 or condition4 or condition5:
next_guess = (x1 + x0) / 2
mflag = True
else:
mflag = False
f_next = target_function(next_guess)
d, x2 = x2, x1
if f_x0 * f_next < 0:
x1 = next_guess
else:
x0 = next_guess
# 确保x1的函数值距离原点比x0近
if abs(target_function(x0)) < abs(target_function(x1)):
x0, x1 = x1, x0
iteration += 1
if iteration >= max_iteration:
status = 1
return x1, status
else:
status = 0
return x1, status
| [
"deng.pan@ricequant.com"
] | deng.pan@ricequant.com |
8d91b25ef38c6a82575e0ce7a3d3056269efe663 | 399f602db61ce825299abfa9331b9dca2c23ef87 | /AIProject.py | f584a8f7b464c2bea469ef9865317c586d6fe263 | [] | no_license | FuzzyCoder20/Virtual-Drag-and-Drop- | bff71b9f29808e9be894cf1e7afb82effcc2db65 | 790db30d91a4ce0517fddb25adddc51f08898bcb | refs/heads/main | 2023-08-23T06:23:15.625179 | 2021-10-08T07:59:08 | 2021-10-08T07:59:08 | 414,892,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py |
#pip install mediapipe==0.8.7
import cv2
from cvzone.HandTrackingModule import HandDetector
import cvzone #Version: 1.4.1
import numpy as np
import time
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
detector = HandDetector(detectionCon=0.8)
colorR=(255,0,255)
cx, cy, w, h = 100, 100, 200, 200
pTime=0
class DragRect():
def __init__(self,posCenter, size=[200,200]):
self.posCenter = posCenter
self.size =size
def update(self,cursor):
cx,cy=self.posCenter
w,h = self.size
# when finger goes into that rectangle region
#x coord #y coord
if cx-w//2 < cursor[0] < cx+w//2 and cy-h//2 < cursor[1] < cy+h//2:
self.posCenter=cursor
rectList =[]
for x in range(10):
rectList.append(DragRect([x*250+150,x*250+150]))
while True:
success, img = cap.read()
img = cv2.flip(img, 1)
img = detector.findHands(img)
lmList, _ = detector.findPosition(img)
if lmList:
l,_,_ = detector.findDistance(8 ,12 ,img,draw=False)# 8 is index and 12 is middle finger
print(l)
#(if length between the fingers<30 the block can be moved)
if l<30:
#x and y of the tip
cursor=lmList[8] #8 is index fingertip
#calling the cursor
for rect in rectList:
rect.update(cursor)
# # when finger goes into that region
# #x coord #y coord
# if cx-w//2 <cursor[0] < cx+w//2 and cy-h//2 <cursor[1]< cy+h//2:
# colorR = 0,255,0 #green
# cx,cy=cursor
# else:
# colorR=(255,0,255) #purple
# draw solid Rectangle
for rect in rectList:
cx,cy=rect.posCenter
w,h = rect.size
cv2.rectangle(img, (cx-w//2,cy-h//2), (cx+w//2,cy+h//2), colorR, cv2.FILLED)
cvzone.cornerRect(img, (cx-w//2,cy-h//2,w,h ),20,rt=0)
# Frame Rate
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3,(255, 0, 0), 3)
#display
cv2.imshow("Virtual Drag and Drop",img)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
f0f0fcb3fdee07b36435350efebe87d77cde8406 | 8a9f1128a3ad23b8f6bfda17335f5b5110dbcc4d | /resources/user.py | c275b4d3a13b2ea32129ad684ca38ddb2cc3c937 | [] | no_license | cspineda/stores-rest-api | 7dcc339d68c44f41c5e7596538f7a34f29eb76fc | aa290f2928a527c45f46cc415a6a429f936cec93 | refs/heads/master | 2023-03-25T03:20:41.697525 | 2021-03-06T11:07:11 | 2021-03-06T11:07:11 | 343,125,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | from flask_restful import Resource, reqparse
from werkzeug.security import safe_str_cmp
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
get_jwt_identity,
jwt_required,
get_raw_jwt,
)
from models.user import UserModel
from blocklist import BLOCKLIST
_user_parser = reqparse.RequestParser()
_user_parser.add_argument(
'username',
type=str,
required=True,
help="this field cannot be left blank!"
)
_user_parser.add_argument(
'password',
type=str,
required=True,
help="this field cannot be left blank!"
)
class UserRegister(Resource):
def post(self):
data = _user_parser.parse_args()
if UserModel.find_by_username(data['username']):
return {"message": "A user with that username already exists"}, 400
user = UserModel(**data)
user.save_to_db()
return {"message": "User created successfully."}, 201
class User(Resource):
@classmethod
def get(cls, user_id):
user = UserModel.find_by_id(user_id)
if not user:
return {'message': 'User not found'}, 404
return user.json()
@classmethod
def delete(cls, user_id):
user = UserModel.find_by_id(user_id)
if not user:
return {'message': 'User not found'}, 404
user.delete_from_db()
return {'message': 'User deleted'}, 200
class UserLogin(Resource):
def post(self):
data = _user_parser.parse_args()
user = UserModel.find_by_username(data['username'])
if user and safe_str_cmp(user.password, data['password']):
access_token = create_access_token(identity=user.id, fresh=True)
refresh_token = create_refresh_token(user.id)
return {
'access_token': access_token,
'refresh_token': refresh_token
}, 200
return {'message': 'Invalid credentials'}, 401
class UserLogout(Resource):
@jwt_required()
def post(self):
jti = get_raw_jwt()['jti'] # jti is "JWT ID", unique id for a JWT
BLOCKLIST.add(jti)
return {'message': 'Succseffully logged out.'}, 200
class TokenRefresh(Resource):
@jwt_required(refresh=True)
def post(self):
current_user = get_jwt_identity()
new_token = create_access_token(identity=current_user, fresh=False)
return {'access_token': new_token}, 200 | [
"cspineda559@gmail.com"
] | cspineda559@gmail.com |
ad12009b6062e7d7426eb2e5ae598a4e5cf813ed | bab42fa4c574d47f57a6bad221c285676397ecdc | /Week1/Day2_3_FineTuningStringExtraction.py | 452d04047e0121c6f9cca3654cc85f9145cd7ed4 | [] | no_license | neighborpil/PY_WebCrawlingStudy | 7647f85f4610b98ed838fdff1d08d3983ff9b519 | 146f75e2bdb176c920194fdf9ce88b3e76b1ec4a | refs/heads/master | 2020-04-11T05:48:51.389458 | 2018-12-13T09:39:47 | 2018-12-13T09:39:47 | 157,983,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | """
# Fine-Tuning String Extraction
- You can refine(개선하다) the match for re.findall() and seperately determine which
portion of the match is to be extracted by using parentheses(괄호).
# 정규표현식
- \S : whitespace가 아닌 문자
- \s : whitespace 문자
- () : 괄호를 통하여 조건식에는 포함되지만 뽑아내는 부분에서는 제외 할 수 있다
"""
import re
x = 'From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008'
y = re.findall('\S+@\S+', x) # \S+ : 공백이 아닌 문자열, @ : 골뱅이, \S+ : 공백이 아닌 문자열
# Greedy하게 된다
print(y)
print('-------------------')
# 조건에서 'From '으로 시작하지만 뽑아내는 문자열에는 포함시키고 싶지 않으면 parentheses를 사용한다
y = re.findall('^From (\S+@\S+)', x)
print(y) | [
"feelongpark"
] | feelongpark |
5834e0a57800b02d71d53f916eb638ef10d37250 | 1dbbe2ecdcfb39850be6b561c7e6495e9125d638 | /HW2/grammar.py | 66fff60f8dec6efeb26c234c9fda1b60f748339f | [] | no_license | itsmenick212/NLP | bf5ec860c2eae3c09426021545d2650651f3f88a | 47a6c3de4f8b28ec42c44d9ee2c3e1c5b31d5105 | refs/heads/master | 2020-12-12T15:52:50.680005 | 2020-07-26T02:39:28 | 2020-07-26T02:39:28 | 234,165,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,169 | py | """
COMS W4705 - Natural Language Processing - Summer 19
Homework 2 - Parsing with Context Free Grammars
Daniel Bauer
Student: Nick Gupta, UNI: ng2528
"""
import sys
from collections import defaultdict
from math import fsum
class Pcfg(object):
"""
Represent a probabilistic context free grammar.
"""
def __init__(self, grammar_file):
self.rhs_to_rules = defaultdict(list)
self.lhs_to_rules = defaultdict(list)
self.startsymbol = None
self.read_rules(grammar_file)
def read_rules(self,grammar_file):
for line in grammar_file:
line = line.strip()
if line and not line.startswith("#"):
if "->" in line:
rule = self.parse_rule(line.strip())
lhs, rhs, prob = rule
self.rhs_to_rules[rhs].append(rule)
self.lhs_to_rules[lhs].append(rule)
else:
startsymbol, prob = line.rsplit(";")
self.startsymbol = startsymbol.strip()
def parse_rule(self,rule_s):
lhs, other = rule_s.split("->")
lhs = lhs.strip()
rhs_s, prob_s = other.rsplit(";",1)
prob = float(prob_s)
rhs = tuple(rhs_s.strip().split())
return (lhs, rhs, prob)
def verify_grammar(self):
"""
Return True if the grammar is a valid PCFG in CNF.
Otherwise return False.
"""
# TODO, Part 1
for gram_rules in self.lhs_to_rules.gram_rules():
all_words = self.lhs_to_rules[gram_rules]
prob_of_word = []
for word in all_words:
#using only right hand side and assuming that both RHS and LHS are consistent.
right_side = word[1]
#right_side = right hand side; it has nothing to do with right or wrong.
if not(len(right_side) == 1 or len(right_side) == 2):
return False
break
if(len(right_side) == 2):
if not (right_side[0].isupper() and right_side[1].isupper()):
#checks if nonterminal symbols are upper-case or not
return False
break
elif(len(right_side) == 1):
if not(right_side[0].islower()):
#terminal symbols are lower case
return False
break
prob_of_word.append(word[2])
round(fsum(prob_of_word), 1)
#rounds the probability upto 1
#for example, probability of 0.9 gets rounded to 1.0
if fsum(prob_of_word) != 1.0:
#checks if the tatal pobability is 1
return False
#returns false if the total probability is 1
break
return True
#returns true if the total probability is not 1
if __name__ == "__main__":
with open(sys.argv[1],'r') as grammar_file:
grammar = Pcfg(grammar_file)
result = grammar.verify_grammar()
print(result)
| [
"noreply@github.com"
] | noreply@github.com |
d7a7bfbba34482fc68919c726646fe8255199e3e | 2681edbd35d0ced02cbb995292929b3f73c8df66 | /Keys and Locks.py | 99925433b091e73417f6ac4f4ec96153092685e3 | [] | no_license | vit-aborigen/CIO_woplugin | 46a658b93e939e406f88f4d317ef15d804e3115e | f252730fd8e2efa25735c8a90732608f58fa765b | refs/heads/master | 2020-12-30T12:55:27.688801 | 2019-02-17T18:03:10 | 2019-02-17T18:03:10 | 91,370,583 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | def cut(plan):
if '#' not in plan: return 0
top, bottom, left, right = 1000, -1, 1000, -1
for idx, line in enumerate(plan[1:].splitlines()):
if '#' in line:
top = min(top, idx)
bottom = max(bottom, idx)
left = min(left, line.find('#'))
right = max(right, line.rfind('#'))
return [line[left:right + 1] for line in plan.split()[top:bottom + 1]]
def keys_and_locks(lock, some_key):
lock_pattern = cut(lock)
key_pattern = cut(some_key)
degree = 0
while degree != 360:
if key_pattern == lock_pattern:
return True
key_pattern = [''.join(value) for value in zip(*key_pattern[::-1])]
degree += 90
return False
if __name__ == '__main__':
print(keys_and_locks('''
0##0
0##0
00#0
00##
00##''',
'''
00000
000##
#####
##000
00000'''))
#These "asserts" using only for self-checking and not necessary for auto-testing
# assert keys_and_locks('''
# 0##0
# 0##0
# 00#0
# 00##
# 00##''',
# '''
# 00000
# 000##
# #####
# ##000
# 00000''') == True
#
# assert keys_and_locks('''
# ###0
# 00#0''',
# '''
# 00000
# 00000
# #0000
# ###00
# 0#000
# 0#000''') == False
#
# assert keys_and_locks('''
# 0##0
# 0#00
# 0000''',
# '''
# ##000
# #0000
# 00000
# 00000
# 00000''') == True
#
# assert keys_and_locks('''
# ###0
# 0#00
# 0000''',
# '''
# ##00
# ##00''') == False
#
# print("Coding complete? Click 'Check' to earn cool rewards!")
| [
"vit.aborigen@gmail.com"
] | vit.aborigen@gmail.com |
bad288ecbd12c0613a3e83bc87a35fb058b0f264 | 521b19d65cd2a12b522e166ea3fff0d90b1171ec | /Notebooks/LPTHW/ex13_dul.py | 3299138be6e6149993678e975c6d108e95b1bb44 | [] | no_license | sescoto/intro_ds_sat_feb_2018 | e121aae624bccfbc5c17061f52657e0e5d425813 | 32d4e43bf6a653aa3b54c2f32ff4ef589701a1c8 | refs/heads/master | 2021-08-22T03:17:53.389013 | 2020-04-25T06:53:51 | 2020-04-25T06:53:51 | 171,802,043 | 0 | 0 | null | 2019-02-21T04:48:34 | 2019-02-21T04:48:33 | null | UTF-8 | Python | false | false | 228 | py | script, first, second, third = "ex13_dul.py", "hola","bola","super"
print("The script is called:", script)
print("Your first variable is:", first)
print("Your second variable is:", second)
print("Your third variable is:", third) | [
"jpdebotton@gmail.com"
] | jpdebotton@gmail.com |
1e8fed92b77867c5a707bc1e8cdaed3ff6f5566b | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/20ed819acd6f85b1facda3b799d3c24b3ada7ad6-<run>-bug.py | 9d67f4caf81ac18c3daab8feb6cc8736cb5c336a | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | def run(self, terms, variables, **kwargs):
if (not CREDSTASH_INSTALLED):
raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
ret = []
for term in terms:
try:
version = kwargs.pop('version', '')
region = kwargs.pop('region', None)
table = kwargs.pop('table', 'credential-store')
profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
kwargs_pass = {
'profile_name': profile_name,
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'aws_session_token': aws_session_token,
}
val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
except credstash.ItemNotFound:
raise AnsibleError('Key {0} not found'.format(term))
except Exception as e:
raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e.message))
ret.append(val)
return ret | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
a0593e35dc2c449e0f9aa7678f6ceccdf3b5f2db | e8a45018a6a906a88418d7aaf2a28507125c673a | /test_filter.py | c3cb804e80d2e907e6189387cb7ed277c9d87193 | [] | no_license | shtormnick/test_saucedemo | e0ed742f2fe0b596e91384581306e7f2d7600ed8 | 911e3ded07d14b483a5e416e4156d4b6433f4e7c | refs/heads/master | 2023-06-26T20:47:57.614274 | 2021-08-02T10:37:12 | 2021-08-02T10:37:12 | 389,844,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | import pytest
from pages.product_page import ProductPage
@pytest.mark.main_test
class TestLoginFormProductPage():
@pytest.mark.checker
def test_guest_can_see_title(self, browser, setup):
link = "https://www.saucedemo.com/inventory.html"
self.page = ProductPage(browser, link)
self.page.open()
self.page.go_to_cart_page()
self.page.should_be_correct_title()
def test_guest_can_add_item_to_cart(self, browser, setup):
link = "https://www.saucedemo.com/inventory.html"
self.page = ProductPage(browser, link)
self.page.open()
self.page.add_to_cart_one_item()
def test_guest_add_to_cart_filtered_by_low_price_items(self, browser, setup):
link = "https://www.saucedemo.com/inventory.html"
self.page = ProductPage(browser, link)
self.page.open()
self.page.filtered_items_by_low_price()
def test_guest_add_to_cart_filtered_by_high_price_items(self, browser, setup):
link = "https://www.saucedemo.com/inventory.html"
self.page = ProductPage(browser, link)
self.page.open()
self.page.filtered_items_by_high_price()
| [
"foxred324@gmail.com"
] | foxred324@gmail.com |
09d3b6f3dc518b71e5ac7013db8d512620bbe1a1 | 7bb64fb43c503e8f2ecf0f02619b539af3401d39 | /test_files/Zr_Vasprun/test_Vasprun.py | adc1d5cc9df5295240f97a9d3801a1ca67e323a4 | [
"MIT"
] | permissive | montoyjh/pymatgen | 13c3179cd4cf5ff521e8380e480b23d35709c379 | 62ecae1c7382a41861e3a5d9b9c8dd1207472409 | refs/heads/master | 2023-06-09T15:02:15.309354 | 2019-04-03T14:39:33 | 2019-04-03T14:39:33 | 42,539,977 | 2 | 2 | MIT | 2019-06-21T17:15:55 | 2015-09-15T18:56:26 | Propeller Spin | UTF-8 | Python | false | false | 99 | py | #!/usr/bin/env python
from pymatgen.io.vasp.outputs import Vasprun
run = Vasprun("./vasprun.xml")
| [
"shyuep@gmail.com"
] | shyuep@gmail.com |
f4190d398a0299cf47843f1003efe2f9682f091e | 2464a7d05940a2f39231354397cb06f7b2861e0e | /country_errors.py | 3a09672e9ca5469ff078604e404d5a45dfadf1af | [] | no_license | imbiginjapan/python_crash_course | c1226aeafe296d6b78d111f2f6d934579c4676af | 422d3bb32e47af21c5f69ba517fe1da9be2d063d | refs/heads/master | 2021-05-14T03:09:19.772417 | 2018-01-08T01:28:00 | 2018-01-08T01:28:00 | 116,611,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | import json
import pygal
from pygal.style import LightColorizedStyle, RotateStyle
from country_codes import get_country_code
# load the data into a list.
filename = '/media/jeremy/ExtraDrive1/python_cc/pcc/chapter_16/population_data.json'
with open(filename) as f:
pop_data = json.load(f)
# Build a dictionary of population Data
cc_populations = {}
for pop_dict in pop_data:
if pop_dict['Year'] == '2010':
country_name = pop_dict['Country Name']
population = int(float(pop_dict['Value']))
code = get_country_code(country_name)
if not code:
print(country_name)
| [
"32070505+imbiginjapan@users.noreply.github.com"
] | 32070505+imbiginjapan@users.noreply.github.com |
ab91db034cc7f44e288520021e2620e5204b634a | 4a180430fc0dc4dd8b5948728bc5b50ac77d1662 | /ghcnFTP.py | 090c0840467038a6730b36248c23156b11cf7176 | [] | no_license | johnmchristensen/NOAA.Python | 56a0faf01d1de1bd759a2d519b808a8f6ee0ba9c | 855f5a339bef57c8b9ada6e00b9ea3e1063ac888 | refs/heads/master | 2023-03-19T02:05:09.939088 | 2021-03-14T22:01:31 | 2021-03-14T22:01:31 | 346,199,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | from ftplib import FTP
from stationInfo import StationInfo
from stationData import Station
from stationData import MonthData
def getAllStationInfo():
class Indexes:
LATITUDE_INDEX = 11
LONGITUDE_INDEX = 21
ELEVATION_INDEX = 31
STATE_INDEX = 38
NAME_INDEX = 41
GSN_FLAG_INDEX = 72
HCN_CRN_INDEX = 76
WMO_INDEX = 80
# Create a connection to the NOAA site and download the stations file.
ftp = FTP("ftp.ncdc.noaa.gov")
ftp.login()
ftp.cwd("pub/data/ghcn/daily")
stationInfos = []
def parseLine(line):
id = line[0: Indexes.LATITUDE_INDEX - 1]
latitude = float(line[Indexes.LATITUDE_INDEX: Indexes.LONGITUDE_INDEX - 1])
longitude = float(line[Indexes.LONGITUDE_INDEX: Indexes.ELEVATION_INDEX - 1])
elevation = float(line[Indexes.ELEVATION_INDEX: Indexes.STATE_INDEX - 1])
state = line[Indexes.STATE_INDEX: Indexes.NAME_INDEX - 1]
name = line[Indexes.NAME_INDEX: Indexes.GSN_FLAG_INDEX - 1]
return StationInfo(id, latitude, longitude, elevation, state, name, line)
ftp.retrlines("RETR ghcnd-stations.txt", lambda l: stationInfos.append(parseLine(l)))
return stationInfos
def getStation(stationId):
class Indexes:
YEAR = 11
MONTH = 15
ELEMENT_NAME = 17
START_DATA = 21
DATA_SIZE = 8
VALUE_LENGTH = 5
ftp = FTP("ftp.ncdc.noaa.gov")
ftp.login()
ftp.cwd("pub/data/ghcn/daily/all")
station = Station(stationId)
def parseData(line):
year = int(line[Indexes.YEAR: Indexes.MONTH])
month = int(line[Indexes.MONTH: Indexes.ELEMENT_NAME])
element = line[Indexes.ELEMENT_NAME: Indexes.START_DATA]
data = [int(line[i: i + DATA_SIZE][0: VALUE_LENGTH]) for i in range(Indexes.START_DATA, len(line), DATA_SIZE)]
station.addData(element, MonthData(year, month, data, line))
ftp.retrlines(f"RETR {stationId}.dly", lambda l: parseData(l))
return station | [
"john.m.christensen@icloud.com"
] | john.m.christensen@icloud.com |
a1014d52d825b419662540a87f34f68956584d91 | 3b56942de9b298d048f73a813a54d496811213e9 | /sdk/python/pulumi_kubernetes/admissionregistration/v1/MutatingWebhookConfiguration.py | d1df52d5d2309fba31ef336a575778b53d8c39f3 | [
"Apache-2.0"
] | permissive | timo955/pulumi-kubernetes | 6b90a180b2ea3e1c48ecfe105bdaf79654edc670 | 155b56cd0718c05fe46e50d5fdbcc2f9a27d9f8f | refs/heads/master | 2023-03-25T07:45:12.781830 | 2021-03-17T20:41:06 | 2021-03-17T20:41:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,634 | py | # coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ... import meta as _meta
from ._inputs import *
__all__ = ['MutatingWebhookConfiguration']
class MutatingWebhookConfiguration(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
webhooks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MutatingWebhookArgs']]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MutatingWebhookArgs']]]] webhooks: Webhooks is a list of webhooks and the affected resources and operations.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['api_version'] = 'admissionregistration.k8s.io/v1'
__props__['kind'] = 'MutatingWebhookConfiguration'
__props__['metadata'] = metadata
__props__['webhooks'] = webhooks
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:admissionregistration.k8s.io/v1beta1:MutatingWebhookConfiguration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MutatingWebhookConfiguration, __self__).__init__(
'kubernetes:admissionregistration.k8s.io/v1:MutatingWebhookConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MutatingWebhookConfiguration':
"""
Get an existing MutatingWebhookConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["api_version"] = None
__props__["kind"] = None
__props__["metadata"] = None
__props__["webhooks"] = None
return MutatingWebhookConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> pulumi.Output[Optional[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]:
"""
Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def webhooks(self) -> pulumi.Output[Optional[Sequence['outputs.MutatingWebhook']]]:
"""
Webhooks is a list of webhooks and the affected resources and operations.
"""
return pulumi.get(self, "webhooks")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"noreply@github.com"
] | noreply@github.com |
c21de1b9fb27178980aa03e7d7e2310507622237 | 9b30a517b5b74b58e5ffd1e63378b79d77fb6e32 | /Python/ER para AFD mínimo/python/state implementation/ER_AFD.py | db3025e77656020b48aef2505fa287004b7e7613 | [] | no_license | Daniel-Aragao/Compiladores | 067d044984473cfdfd84a942b0a8c6fbeaf8d353 | 5720bd0689a6144f81d44fbd2f8f300f70eab039 | refs/heads/master | 2020-03-10T01:41:42.721942 | 2018-04-07T16:57:21 | 2018-04-07T16:57:21 | 129,116,573 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | from Lexical import LexicalAnalyzer as LxA
from ER_AFNE import ERtoAFNE
from AFNE_AFD import AFNEtoAFD
# entry= 'e(e|d)*' falta implementar os ()
# entry = 'e|d'
# entry = 'e*'
entry = '78d2'
tokens = LxA.analyzer(entry)
root = ERtoAFNE().convert(tokens)
matrix = AFNEtoAFD().convert(root)
print(root) | [
"thiago.maia971@gmail.com"
] | thiago.maia971@gmail.com |
52ae3a1a8d1d8f8f7503b9181f015b165f68bf00 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_scrubs.py | 6efbee2e9b847a91a88e3d43d8c1023f95e3fd07 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.nouns._scrub import _SCRUB
#calss header
class _SCRUBS(_SCRUB, ):
def __init__(self,):
_SCRUB.__init__(self)
self.name = "SCRUBS"
self.specie = 'nouns'
self.basic = "scrub"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
07ef9b577bbbf09e05099ffba90fc0e592f3e466 | c0e45bc202a50f4b0dcc645b5f805596d10958b8 | /datasets/kitti_raw_monosf.py | 606ea8a37d8df1728bc577c923ea30545dcbea4d | [
"Apache-2.0"
] | permissive | visinf/self-mono-sf | 532d3a09ebdce9abb4177517ba521f5f2dc66687 | eec356d95038da49e6705194e6dc0780b750f2b0 | refs/heads/master | 2022-05-17T07:47:37.688855 | 2022-04-18T07:45:27 | 2022-04-18T07:45:27 | 246,596,511 | 240 | 55 | Apache-2.0 | 2022-04-18T07:45:28 | 2020-03-11T14:40:46 | Python | UTF-8 | Python | false | false | 9,440 | py | from __future__ import absolute_import, division, print_function
import os.path
import torch
import torch.utils.data as data
import numpy as np
from torchvision import transforms as vision_transforms
from .common import read_image_as_byte, read_calib_into_dict
from .common import kitti_crop_image_list, kitti_adjust_intrinsic
class KITTI_Raw(data.Dataset):
def __init__(self,
args,
images_root=None,
flip_augmentations=True,
preprocessing_crop=True,
crop_size=[370, 1224],
num_examples=-1,
index_file=None):
self._args = args
self._seq_len = 1
self._flip_augmentations = flip_augmentations
self._preprocessing_crop = preprocessing_crop
self._crop_size = crop_size
path_dir = os.path.dirname(os.path.realpath(__file__))
path_index_file = os.path.join(path_dir, index_file)
if not os.path.exists(path_index_file):
raise ValueError("Index File '%s' not found!", path_index_file)
index_file = open(path_index_file, 'r')
## loading image -----------------------------------
if not os.path.isdir(images_root):
raise ValueError("Image directory '%s' not found!")
filename_list = [line.rstrip().split(' ') for line in index_file.readlines()]
self._image_list = []
view1 = 'image_02/data'
view2 = 'image_03/data'
ext = '.jpg'
for item in filename_list:
date = item[0][:10]
scene = item[0]
idx_src = item[1]
idx_tgt = '%.10d' % (int(idx_src) + 1)
name_l1 = os.path.join(images_root, date, scene, view1, idx_src) + ext
name_l2 = os.path.join(images_root, date, scene, view1, idx_tgt) + ext
name_r1 = os.path.join(images_root, date, scene, view2, idx_src) + ext
name_r2 = os.path.join(images_root, date, scene, view2, idx_tgt) + ext
if os.path.isfile(name_l1) and os.path.isfile(name_l2) and os.path.isfile(name_r1) and os.path.isfile(name_r2):
self._image_list.append([name_l1, name_l2, name_r1, name_r2])
if num_examples > 0:
self._image_list = self._image_list[:num_examples]
self._size = len(self._image_list)
## loading calibration matrix
self.intrinsic_dict_l = {}
self.intrinsic_dict_r = {}
self.intrinsic_dict_l, self.intrinsic_dict_r = read_calib_into_dict(path_dir)
self._to_tensor = vision_transforms.Compose([
vision_transforms.ToPILImage(),
vision_transforms.transforms.ToTensor()
])
def __getitem__(self, index):
index = index % self._size
# read images and flow
# im_l1, im_l2, im_r1, im_r2
img_list_np = [read_image_as_byte(img) for img in self._image_list[index]]
# example filename
im_l1_filename = self._image_list[index][0]
basename = os.path.basename(im_l1_filename)[:6]
dirname = os.path.dirname(im_l1_filename)[-51:]
datename = dirname[:10]
k_l1 = torch.from_numpy(self.intrinsic_dict_l[datename]).float()
k_r1 = torch.from_numpy(self.intrinsic_dict_r[datename]).float()
# input size
h_orig, w_orig, _ = img_list_np[0].shape
input_im_size = torch.from_numpy(np.array([h_orig, w_orig])).float()
# cropping
if self._preprocessing_crop:
# get starting positions
crop_height = self._crop_size[0]
crop_width = self._crop_size[1]
x = np.random.uniform(0, w_orig - crop_width + 1)
y = np.random.uniform(0, h_orig - crop_height + 1)
crop_info = [int(x), int(y), int(x + crop_width), int(y + crop_height)]
# cropping images and adjust intrinsic accordingly
img_list_np = kitti_crop_image_list(img_list_np, crop_info)
k_l1, k_r1 = kitti_adjust_intrinsic(k_l1, k_r1, crop_info)
# to tensors
img_list_tensor = [self._to_tensor(img) for img in img_list_np]
im_l1 = img_list_tensor[0]
im_l2 = img_list_tensor[1]
im_r1 = img_list_tensor[2]
im_r2 = img_list_tensor[3]
common_dict = {
"index": index,
"basename": basename,
"datename": datename,
"input_size": input_im_size
}
# random flip
if self._flip_augmentations is True and torch.rand(1) > 0.5:
_, _, ww = im_l1.size()
im_l1_flip = torch.flip(im_l1, dims=[2])
im_l2_flip = torch.flip(im_l2, dims=[2])
im_r1_flip = torch.flip(im_r1, dims=[2])
im_r2_flip = torch.flip(im_r2, dims=[2])
k_l1[0, 2] = ww - k_l1[0, 2]
k_r1[0, 2] = ww - k_r1[0, 2]
example_dict = {
"input_l1": im_r1_flip,
"input_r1": im_l1_flip,
"input_l2": im_r2_flip,
"input_r2": im_l2_flip,
"input_k_l1": k_r1,
"input_k_r1": k_l1,
"input_k_l2": k_r1,
"input_k_r2": k_l1,
}
example_dict.update(common_dict)
else:
example_dict = {
"input_l1": im_l1,
"input_r1": im_r1,
"input_l2": im_l2,
"input_r2": im_r2,
"input_k_l1": k_l1,
"input_k_r1": k_r1,
"input_k_l2": k_l1,
"input_k_r2": k_r1,
}
example_dict.update(common_dict)
return example_dict
def __len__(self):
return self._size
class KITTI_Raw_KittiSplit_Train(KITTI_Raw):
def __init__(self,
args,
root,
flip_augmentations=True,
preprocessing_crop=True,
crop_size=[370, 1224],
num_examples=-1):
super(KITTI_Raw_KittiSplit_Train, self).__init__(
args,
images_root=root,
flip_augmentations=flip_augmentations,
preprocessing_crop=preprocessing_crop,
crop_size=crop_size,
num_examples=num_examples,
index_file="index_txt/kitti_train.txt")
class KITTI_Raw_KittiSplit_Valid(KITTI_Raw):
def __init__(self,
args,
root,
flip_augmentations=False,
preprocessing_crop=False,
crop_size=[370, 1224],
num_examples=-1):
super(KITTI_Raw_KittiSplit_Valid, self).__init__(
args,
images_root=root,
flip_augmentations=flip_augmentations,
preprocessing_crop=preprocessing_crop,
crop_size=crop_size,
num_examples=num_examples,
index_file="index_txt/kitti_valid.txt")
class KITTI_Raw_KittiSplit_Full(KITTI_Raw):
def __init__(self,
args,
root,
flip_augmentations=True,
preprocessing_crop=True,
crop_size=[370, 1224],
num_examples=-1):
super(KITTI_Raw_KittiSplit_Full, self).__init__(
args,
images_root=root,
flip_augmentations=flip_augmentations,
preprocessing_crop=preprocessing_crop,
crop_size=crop_size,
num_examples=num_examples,
index_file="index_txt/kitti_full.txt")
class KITTI_Raw_EigenSplit_Train(KITTI_Raw):
def __init__(self,
args,
root,
flip_augmentations=True,
preprocessing_crop=True,
crop_size=[370, 1224],
num_examples=-1):
super(KITTI_Raw_EigenSplit_Train, self).__init__(
args,
images_root=root,
flip_augmentations=flip_augmentations,
preprocessing_crop=preprocessing_crop,
crop_size=crop_size,
num_examples=num_examples,
index_file="index_txt/eigen_train.txt")
class KITTI_Raw_EigenSplit_Valid(KITTI_Raw):
def __init__(self,
args,
root,
flip_augmentations=False,
preprocessing_crop=False,
crop_size=[370, 1224],
num_examples=-1):
super(KITTI_Raw_EigenSplit_Valid, self).__init__(
args,
images_root=root,
flip_augmentations=flip_augmentations,
preprocessing_crop=preprocessing_crop,
crop_size=crop_size,
num_examples=num_examples,
index_file="index_txt/eigen_valid.txt")
class KITTI_Raw_EigenSplit_Full(KITTI_Raw):
def __init__(self,
args,
root,
flip_augmentations=True,
preprocessing_crop=True,
crop_size=[370, 1224],
num_examples=-1):
super(KITTI_Raw_EigenSplit_Full, self).__init__(
args,
images_root=root,
flip_augmentations=flip_augmentations,
preprocessing_crop=preprocessing_crop,
crop_size=crop_size,
num_examples=num_examples,
index_file="index_txt/eigen_full.txt") | [
"hurjunhwa@gmail.com"
] | hurjunhwa@gmail.com |
00884fcc431f3b0fc1c306f662977b63ebc1c16c | 743da4642ac376e5c4e1a3b63c079533a5e56587 | /build/lib.win-amd64-3.6/fairseq/modules/quantization/pq/modules/__init__.py | b6881e26bb167f75f55dacfac72238979dd74f80 | [
"MIT"
] | permissive | tmtmaj/Exploiting-PrLM-for-NLG-tasks | cdae1b6e451b594b11d8ecef3c1cd4e12fe51c9b | e8752593d3ee881cf9c0fb5ed26d26fcb02e6dd5 | refs/heads/main | 2023-06-16T08:26:32.560746 | 2021-07-14T17:50:19 | 2021-07-14T17:50:19 | 371,899,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qconv import PQConv2d # NOQA
from .qlinear import PQLinear # NOQA
from .qemb import PQEmbedding # NOQA
| [
"qkrwjdgur09@naver.com"
] | qkrwjdgur09@naver.com |
b4936261a2e0ae4ad21557800d4eb153185b267b | 94a35fa71116c33cbad52522415ffa394b2e98fb | /vanda_jobs/scripts/utils/extract.py | 283b04a009d3e35b1c33b425f57a3e0609234176 | [
"MIT"
] | permissive | TheScienceMuseum/heritage-connector | c6caafe1126737c0bd7e98ed1facf562bd69dc27 | 77c994d7ba7253bd81140a2202bf8b03b6082e43 | refs/heads/master | 2023-02-19T08:40:02.619873 | 2022-10-10T13:37:04 | 2022-10-10T13:37:04 | 248,162,073 | 16 | 3 | MIT | 2023-02-14T22:20:49 | 2020-03-18T07:08:40 | Jupyter Notebook | UTF-8 | Python | false | false | 1,304 | py | import bz2
import gzip
import fnmatch
import json
import os
def bz2Reader(folder_path):
print(f"checking {folder_path}, {os.path.isdir(folder_path)}")
for dirpath, dirnames, files in os.walk(folder_path):
count = 0
for f in fnmatch.filter(files, "*.jsonl.bz2"):
fileName = dirpath + "/" + f
print(fileName)
with bz2.open(fileName, "rb") as bz_file:
try:
for line in bz_file:
count += 1
yield json.loads(line)
finally:
print(
f'Stopped at {count} iterations and line {line}')
def gzipReader(folder_path):
print(f"checking {folder_path}, {os.path.isdir(folder_path)}")
for dirpath, dirnames, files in os.walk(folder_path):
count = 0
for f in fnmatch.filter(files, "*.jsonl.gz"):
fileName = dirpath + "/" + f
print(fileName)
with gzip.open(fileName, "rb") as gz_file:
try:
for line in gz_file:
count += 1
yield json.loads(line)
finally:
print(
f'Stopped at {count} iterations and line {line}')
| [
"warriorwomenblog@gmail.com"
] | warriorwomenblog@gmail.com |
0b5be72076ca98cc30b6d85fe652a33a31717b3d | cec83c5873317bf8f7e9f963115278c874430cb2 | /c09/enter_text_count_words.py | 66792654428d85e6ce3aa19f5c0da4b930073e67 | [] | no_license | qualityland/Python_for_Everybody | 9ab7bb5094158e9eed63b69cb1bc2bd7f34a603f | 280265ddde40787190a503d880b30dad3ea5d141 | refs/heads/master | 2023-08-19T08:46:37.272285 | 2021-10-14T14:50:30 | 2021-10-14T14:50:30 | 409,518,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | text = input('Enter text: ')
words = text.split()
print(words)
d = dict()
for word in words :
d[word] = d.get(word, 0) + 1
print(d)
| [
"stefan.joachim.schmidt@gmail.com"
] | stefan.joachim.schmidt@gmail.com |
40fcdbd078d6c61bc662a0c83e634ccf29e1c9d0 | 32a921f29eb9e2478e1482ba5df26877b6c004db | /myapp/views.py | b7294f518de61601507d7d94993073ebbacfd0ca | [] | no_license | GowriShanker98/P7 | 429f009c892bb07c5a1bd430691c3cce99198e7e | 8705bfff8a59cb07400e9332083bdf5b0a0444bd | refs/heads/master | 2022-11-22T02:31:48.645501 | 2020-07-27T15:38:15 | 2020-07-27T15:38:15 | 282,940,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | from django.shortcuts import render
from django.http import HttpResponse
from math import factorial
# Create your views here.
def index(request):
return HttpResponse("<h1>welcome to views of an app</h1>")
def home(request):
return render(request,"myapp/home.html",{'name':"CHITI"})
def fact(request,n):
n=int(n)
return HttpResponse("<h4>factorial is {}</h4>".format(factorial(n)))
def child(request):
return render(request,"child.html")
| [
"gowrishankarkoothappan@gmail.com"
] | gowrishankarkoothappan@gmail.com |
71a4f664ac45fcad7b14d47b66424851e5d160ea | 4a06d9c889b5db2b7f9cbce0c39dedfce27876c4 | /Project3_fast.py | 9f4c92e0e604bb16a02fc9f237ec2d1ca281de91 | [] | no_license | Oleksandr-Olefirenko/AlgorithmicThinking | f75fe8a98877deb4ed724cdb680f893fe5830f4b | 3c5986d6003ee1a00e05e736f81540001480469e | refs/heads/master | 2021-05-29T22:03:01.457838 | 2015-08-04T19:08:25 | 2015-08-04T19:08:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,994 | py | """
Student template code for Project 3
Student will implement five functions:
slow_closest_pair(cluster_list)
fast_closest_pair(cluster_list)
closest_pair_strip(cluster_list, horiz_center, half_width)
hierarchical_clustering(cluster_list, num_clusters)
kmeans_clustering(cluster_list, num_clusters, num_iterations)
where cluster_list is a 2D list of clusters in the plane
"""
import math
import alg_cluster
######################################################
# Code for closest pairs of clusters
def pair_distance(cluster_list, idx1, idx2):
"""
Helper function that computes Euclidean distance between two clusters in a list
Input: cluster_list is list of clusters, idx1 and idx2 are integer indices for two clusters
Output: tuple (dist, idx1, idx2) where dist is distance between
cluster_list[idx1] and cluster_list[idx2]
"""
return (cluster_list[idx1].distance(cluster_list[idx2]), min(idx1, idx2), max(idx1, idx2))
def slow_closest_pair(cluster_list, l_b, r_b):
"""
Compute the distance between the closest pair of clusters in a list (slow)
Input: cluster_list is the list of clusters
Output: tuple of the form (dist, idx1, idx2) where the centers of the clusters
cluster_list[idx1] and cluster_list[idx2] have minimum distance dist.
"""
result = (float("inf"), -1, -1)
if l_b == r_b:
return result
for idx1 in xrange(l_b, r_b):
for idx2 in xrange(idx1 + 1, r_b + 1):
current_d = pair_distance(cluster_list, idx1, idx2)
if current_d < result:
result = current_d
return result
def fast_closest_pair(cluster_list, l_b, r_b, v_i):
"""
Compute the distance between the closest pair of clusters in a list (fast)
Input: cluster_list is list of clusters SORTED such that horizontal positions of their
centers are in ascending order
Output: tuple of the form (dist, idx1, idx2) where the centers of the clusters
cluster_list[idx1] and cluster_list[idx2] have minimum distance dist.
"""
num = r_b - l_b + 1
if num <= 3:
return slow_closest_pair(cluster_list, l_b, r_b)
else:
mid = int(math.floor(0.5 * (r_b + l_b)))
v_i_l = [v_i[idx] for idx in xrange(len(v_i))
if v_i[idx] < mid]
v_i_r = [v_i[idx] for idx in xrange(len(v_i))
if v_i[idx] >= mid]
result = fast_closest_pair(cluster_list, l_b, mid - 1, v_i_l)
new_res = fast_closest_pair(cluster_list, mid, r_b, v_i_r)
#new_res = (new_res[0], new_res[1] + mid, new_res[2] + mid)
if new_res < result:
result = new_res
mid = 0.5 * (cluster_list[mid - 1].horiz_center()
+ cluster_list[mid].horiz_center())
new_res = closest_pair_strip(cluster_list,
mid, result[0], v_i)
if new_res < result:
result = new_res
return result
def closest_pair_strip(cluster_list, horiz_center, half_width, v_i):
"""
Helper function to compute the closest pair of clusters in a vertical strip
Input: cluster_list is a list of clusters produced by fast_closest_pair
horiz_center is the horizontal position of the strip's vertical center line
half_width is the half the width of the strip (i.e; the maximum horizontal distance
that a cluster can lie from the center line)
Output: tuple of the form (dist, idx1, idx2) where the centers of the clusters
cluster_list[idx1] and cluster_list[idx2] lie in the strip and have minimum distance dist.
"""
mid = [v_i[idx] for idx in xrange(len(v_i))
if abs(cluster_list[v_i[idx]].horiz_center()
- horiz_center) < half_width]
#mid.sort(key = lambda idx: cluster_list[idx].vert_center())
num = len(mid)
result = (float("inf"), -1, -1)
for idx1 in xrange(num - 1):
for idx2 in xrange(idx1 + 1, min(idx1 + 4, num)):
current_d = pair_distance(cluster_list, mid[idx1], mid[idx2])
if current_d < result:
result = current_d
if result[1] > result[2]:
result = (result[0], result[2], result[1])
return result
######################################################################
# Code for hierarchical clustering
def hierarchical_clustering(cluster_list, num_clusters):
"""
Compute a hierarchical clustering of a set of clusters
Note: the function may mutate cluster_list
Input: List of clusters, integer number of clusters
Output: List of clusters whose length is num_clusters
"""
num = len(cluster_list)
cluster_list.sort(key = lambda clu: clu.horiz_center())
v_i = [idx for idx in xrange(num)]
v_i.sort(key = lambda idx: cluster_list[idx].vert_center())
while num > num_clusters:
#print num_clusters, num
#cluster_list.sort(key = lambda clu: clu.horiz_center())
idx = fast_closest_pair(cluster_list, 0, num - 1, v_i)
#cluster_list[idx[1]].merge_clusters(cluster_list[idx[2]])
#cluster_list.pop(idx[2])
arrange_h(cluster_list, idx[1], idx[2])
arrange(v_i, cluster_list, idx[1], idx[2])
num -= 1
return cluster_list
def arrange(v_i, cluster_list, idx1, idx2):
pos = min(v_i.index(idx1), v_i.index(idx2))
vert = cluster_list[idx1].vert_center()
v_i.remove(idx1)
v_i.remove(idx2)
for idx in xrange(len(v_i)):
if v_i[idx] > idx2:
v_i[idx] -= 1
while pos < len (v_i):
if vert < cluster_list[pos].vert_center():
break
else:
pos += 1
v_i.insert(pos, idx1)
def arrange_h(cluster_list, idx1, idx2):
pos = idx1
cluster = cluster_list[idx1].copy()
cluster = cluster.merge_clusters(cluster_list[idx2])
horiz = cluster_list[idx1].horiz_center()
cluster_list.pop(idx2)
cluster_list.pop(idx1)
while pos < len (cluster_list):
if horiz < cluster_list[pos].horiz_center():
break
else:
pos += 1
cluster_list.insert(pos, cluster)
######################################################################
# Code for k-means clustering
def kmeans_clustering(cluster_list, num_clusters, num_iterations):
"""
Compute the k-means clustering of a set of clusters
Note: the function may not mutate cluster_list
Input: List of clusters, integers number of clusters and number of iterations
Output: List of clusters whose length is num_clusters
"""
# position initial clusters at the location of clusters with largest populations
num = len(cluster_list)
points = [idx for idx in xrange(num)]
points.sort(reverse = True, key = lambda idx:
cluster_list[idx].total_population())
points = [[cluster_list[points[idx]].horiz_center(),
cluster_list[points[idx]].vert_center()]
for idx in xrange(num_clusters)]
clusters = [-1 for _ in xrange(num)]
population = [0 for _ in xrange(num_clusters)]
for _ in xrange(num_iterations):
for cidx in xrange(num):
mind = (float("inf"), -1, -1)
for idx in xrange(num_clusters):
dist = cluster_point_distance(cluster_list,
points,
cidx, idx)
if mind > dist:
mind = dist
clusters[cidx] = mind[2]
for idx in xrange(num_clusters):
points[idx][0] = 0.0
points[idx][1] = 0.0
population[idx] = 0
for cidx in xrange(num):
idx = clusters[cidx]
cpopul = cluster_list[cidx].total_population()
population[idx] += cpopul
points[idx][0] += cluster_list[cidx].horiz_center() * cpopul
points[idx][1] += cluster_list[cidx].vert_center() * cpopul
for idx in xrange(num_clusters):
points[idx][0] /= population[idx]
points[idx][1] /= population[idx]
result = [0 for _ in xrange(num_clusters)]
for cidx in xrange(num):
idx = clusters[cidx]
if result[idx] == 0:
result[idx] = cluster_list[cidx].copy()
else:
result[idx].merge_clusters(cluster_list[cidx])
return result
def cluster_point_distance(cluster_list, points, cidx, idx):
"""
Helper function that computes Euclidean distance between cluster and point
Input: cluster_list is list of clusters, points is list of points,
cidx1 and idx are integer indices for cluster and point
Output: tuple (dist, cidx, idx) where dist is distance between
cluster_list[cidx] and points[idx]
"""
d_x = cluster_list[cidx].horiz_center() - points[idx][0]
d_y = cluster_list[cidx].vert_center() - points[idx][1]
return (math.sqrt(d_x ** 2 + d_y ** 2), cidx, idx)
| [
"TogusaRusso@gmail.com"
] | TogusaRusso@gmail.com |
9c765fca0194129caa59e74b70cc204fc59bce14 | cf1e19f7b6354302037bca563b42218df7d79400 | /최단경로/[2307]도로검문.py | 3540a2ab6f4b48e1f02290e4e11b12bf476f0669 | [] | no_license | kim-kiwon/Baekjoon-Algorithm | 680565ddeced2d44506ae6720cf32d8004db42f8 | 4699e6551d3e7451648b9256c54ea4318b71bd4d | refs/heads/master | 2023-04-13T11:10:21.031969 | 2021-04-26T10:50:08 | 2021-04-26T10:50:08 | 325,209,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py | #다익스트라 + 경로추적
import heapq
n, m = map(int, input().split())
INF = int(1e9)
graph = [[] for _ in range(n+1)]
previous = [1] * (n+1) #이전 노드 저장
for _ in range(m):
a, b, dist = map(int, input().split())
graph[a].append((b, dist))
graph[b].append((a, dist))
def dijkstra():
distance = [INF] * (n+1)
distance[1] = 0
q = []
q.append((1, 0))
while q:
now, dist = heapq.heappop(q)
if distance[now] < dist:
continue
for i in graph[now]:
cost = dist + i[1]
if cost < distance[i[0]]:
distance[i[0]] = cost
heapq.heappush(q, (i[0], cost))
previous[i[0]] = now
return distance[n]
init_val = dijkstra() #다익스트라 수행. 초기 최단경로 저장.
temp = [] #1->n 까지 최단경로에 거치는 간선들 저장할 리스트.
now = n #n부터 1까지 역순으로 탐지할것.
while True:
if now == 1: break #1까지 탐지 완료시 종료
a = previous[now] #a : 이전노드
b = now #b : 현재노드
for i in graph[now]: #dist = 이전노드 -> 현재노드 거리.
if i[0] == previous[now]:
dist = i[1]
break
temp.append((a, b, dist)) #temp에 이전노드 현재노드 거리 삽입.
now = previous[now]
max_val = -1e9
#최단경로에 사용하는 간선들 없애는게 아니면
#반드시 최단경로 사용할 것이기에 cost변화 없다.
while True:
if len(temp) == 0: break
#최단경로에 사용한 간선 중 하나 삭제 -> 다익스트라로 거리측정 -> 다시 추가
a, b, dist = temp.pop()
graph[a].remove((b, dist))
graph[b].remove((a, dist))
max_val = max(max_val, dijkstra())
graph[a].append((b, dist))
graph[b].append((a, dist))
if max_val >= 1e9:
print(-1)
else:
print(max_val - init_val) | [
"76721493+kim-kiwon@users.noreply.github.com"
] | 76721493+kim-kiwon@users.noreply.github.com |
ef2c1f1eda59fbff3084875ec80f14ae00e10fa1 | 01773eac9e1ae4b8477a1ed5b7d2c76e31ee44de | /utils/meter/msemeter.py | a696a1193821d27ebaa03acdb0194a8e9a8b958d | [] | no_license | yifanfeng97/coding | a54615025a4dfa0fa51fed313891f58b39ada128 | 9be1f58b67198a930de76b0b44569770ad0d7719 | refs/heads/master | 2021-04-03T05:53:49.809968 | 2018-03-25T13:23:49 | 2018-03-25T13:23:49 | 124,754,833 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | import math
from . import meter
import torch
class MSEMeter(meter.Meter):
def __init__(self, root=False):
super(MSEMeter, self).__init__()
self.reset()
self.root = root
def reset(self):
self.n = 0
self.sesum = 0.0
def add(self, output, target):
if not torch.is_tensor(output) and not torch.is_tensor(target):
output = torch.from_numpy(output)
target = torch.from_numpy(target)
self.n += output.numel()
self.sesum += torch.sum((output - target) ** 2)
def value(self):
mse = self.sesum / max(1, self.n)
return math.sqrt(mse) if self.root else mse
| [
"czqofnju@gmail.com"
] | czqofnju@gmail.com |
4d43820c4aea93ba07694e36222ea28055300864 | d2939926801729eb91c4075bb6f2b443295af18d | /backend/tests/test_token_auth.py | a85085a059b7f12a26bcfadead307418e3d01d5c | [] | no_license | releaseChecker/release_checker | cfb2e7bb4ab45e025ba15dc90378bd85f16a5a62 | 02fbaf2d74c96586f651cf32eed301adc809c4ff | refs/heads/main | 2023-01-14T07:00:23.576722 | 2020-11-26T03:50:59 | 2020-11-26T03:50:59 | 300,520,624 | 0 | 0 | null | 2020-11-26T03:51:01 | 2020-10-02T06:21:50 | Python | UTF-8 | Python | false | false | 599 | py | import pytest
from rest_framework import status
from rest_framework.reverse import reverse
class TestAuth:
@pytest.fixture
def requiring_auth_url(self, live_server):
return live_server.url + reverse("tag-list")
def test_no_auth(self, client, requiring_auth_url):
response = client.get(requiring_auth_url)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_jwt_auth(self, authenticated_client, requiring_auth_url):
response = authenticated_client.get(requiring_auth_url)
assert response.status_code == status.HTTP_200_OK
| [
"roqkfwkehlwk@naver.com"
] | roqkfwkehlwk@naver.com |
23bdac21856e5f762bf57fd760d7ca15c75acd9f | c09f78b725502d7a1aeed9d1b0de3ba47796d211 | /mmd_tools/import_pmx.py | 5a40423541c21fc1b20992f542305892381dac04 | [
"MIT"
] | permissive | usodaraki/blender_mmd_tools | 1d880871c395eb67ad00bf4a6092c30f9004be4c | a12e5b615526248c669170b86e02d0a7b6118ffd | refs/heads/master | 2021-01-17T12:47:34.592505 | 2013-06-29T19:01:23 | 2013-06-29T19:01:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,067 | py | # -*- coding: utf-8 -*-
from . import pmx
from . import utils
from . import bpyutils
import math
import bpy
import os
import mathutils
import collections
import logging
import time
class PMXImporter:
TO_BLE_MATRIX = mathutils.Matrix([
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
def __init__(self):
self.__model = None
self.__targetScene = bpy.context.scene
self.__scale = None
self.__root = None
self.__armObj = None
self.__meshObj = None
self.__vertexTable = None
self.__vertexGroupTable = None
self.__textureTable = None
self.__boneTable = []
self.__rigidTable = []
self.__nonCollisionJointTable = None
self.__jointTable = []
self.__materialFaceCountTable = None
self.__nonCollisionConstraints = []
# object groups
self.__allObjGroup = None # a group which contains all objects created for the target model by mmd_tools.
self.__mainObjGroup = None # a group which contains armature and mesh objects.
self.__rigidObjGroup = None # a group which contains objects of rigid bodies imported from a pmx model.
self.__jointObjGroup = None # a group which contains objects of joints imported from a pmx model.
self.__tempObjGroup = None # a group which contains temporary objects.
@staticmethod
def flipUV_V(uv):
u, v = uv
return [u, 1.0-v]
def __getMaterialIndexFromFaceIndex(self, face_index):
count = 0
for i, c in enumerate(self.__materialFaceCountTable):
if face_index < count + c:
return i
count += c
raise Exception('invalid face index.')
def __createObjects(self):
""" Create main objects and link them to scene.
"""
pmxModel = self.__model
self.__root = bpy.data.objects.new(name=pmxModel.name, object_data=None)
self.__targetScene.objects.link(self.__root)
mesh = bpy.data.meshes.new(name=pmxModel.name)
self.__meshObj = bpy.data.objects.new(name=pmxModel.name+'_mesh', object_data=mesh)
arm = bpy.data.armatures.new(name=pmxModel.name)
self.__armObj = bpy.data.objects.new(name=pmxModel.name+'_arm', object_data=arm)
self.__meshObj.parent = self.__armObj
self.__targetScene.objects.link(self.__meshObj)
self.__targetScene.objects.link(self.__armObj)
self.__armObj.parent = self.__root
self.__allObjGroup.objects.link(self.__root)
self.__allObjGroup.objects.link(self.__armObj)
self.__allObjGroup.objects.link(self.__meshObj)
self.__mainObjGroup.objects.link(self.__armObj)
self.__mainObjGroup.objects.link(self.__meshObj)
def __createGroups(self):
pmxModel = self.__model
self.__mainObjGroup = bpy.data.groups.new(name=pmxModel.name)
logging.debug('Create main group: %s', self.__mainObjGroup.name)
self.__allObjGroup = bpy.data.groups.new(name=pmxModel.name + '_all')
logging.debug('Create all group: %s', self.__allObjGroup.name)
self.__rigidObjGroup = bpy.data.groups.new(name=pmxModel.name + '_rigids')
logging.debug('Create rigid group: %s', self.__rigidObjGroup.name)
self.__jointObjGroup = bpy.data.groups.new(name=pmxModel.name + '_joints')
logging.debug('Create joint group: %s', self.__jointObjGroup.name)
self.__tempObjGroup = bpy.data.groups.new(name=pmxModel.name + '_temp')
logging.debug('Create temporary group: %s', self.__tempObjGroup.name)
def __importVertexGroup(self):
self.__vertexGroupTable = []
for i in self.__model.bones:
self.__vertexGroupTable.append(self.__meshObj.vertex_groups.new(name=i.name))
def __importVertices(self):
self.__importVertexGroup()
pmxModel = self.__model
mesh = self.__meshObj.data
mesh.vertices.add(count=len(self.__model.vertices))
for i, pv in enumerate(pmxModel.vertices):
bv = mesh.vertices[i]
bv.co = mathutils.Vector(pv.co) * self.TO_BLE_MATRIX * self.__scale
bv.normal = pv.normal
if isinstance(pv.weight.weights, pmx.BoneWeightSDEF):
self.__vertexGroupTable[pv.weight.bones[0]].add(index=[i], weight=pv.weight.weights.weight, type='REPLACE')
self.__vertexGroupTable[pv.weight.bones[1]].add(index=[i], weight=1.0-pv.weight.weights.weight, type='REPLACE')
elif len(pv.weight.bones) == 1:
self.__vertexGroupTable[pv.weight.bones[0]].add(index=[i], weight=1.0, type='REPLACE')
elif len(pv.weight.bones) == 2:
self.__vertexGroupTable[pv.weight.bones[0]].add(index=[i], weight=pv.weight.weights[0], type='REPLACE')
self.__vertexGroupTable[pv.weight.bones[1]].add(index=[i], weight=1.0-pv.weight.weights[0], type='REPLACE')
elif len(pv.weight.bones) == 4:
self.__vertexGroupTable[pv.weight.bones[0]].add(index=[i], weight=pv.weight.weights[0], type='REPLACE')
self.__vertexGroupTable[pv.weight.bones[1]].add(index=[i], weight=pv.weight.weights[1], type='REPLACE')
self.__vertexGroupTable[pv.weight.bones[2]].add(index=[i], weight=pv.weight.weights[2], type='REPLACE')
self.__vertexGroupTable[pv.weight.bones[3]].add(index=[i], weight=pv.weight.weights[3], type='REPLACE')
else:
raise Exception('unkown bone weight type.')
def __importTextures(self):
pmxModel = self.__model
self.__textureTable = []
for i in pmxModel.textures:
name = os.path.basename(i.path).split('.')[0]
tex = bpy.data.textures.new(name=name, type='IMAGE')
try:
tex.image = bpy.data.images.load(filepath=i.path)
except Exception:
logging.warning('failed to load %s', str(i.path))
self.__textureTable.append(tex)
def __createEditBones(self, obj, pmx_bones):
""" create EditBones from pmx file data.
@return the list of bone names which can be accessed by the bone index of pmx data.
"""
editBoneTable = []
nameTable = []
dependency_cycle_ik_bones = []
for i, p_bone in enumerate(pmx_bones):
if p_bone.isIK:
if p_bone.target != -1:
t = pmx_bones[p_bone.target]
if p_bone.parent == t.parent:
dependency_cycle_ik_bones.append(i)
with bpyutils.edit_object(obj):
for i in pmx_bones:
bone = obj.data.edit_bones.new(name=i.name)
loc = mathutils.Vector(i.location) * self.__scale * self.TO_BLE_MATRIX
bone.head = loc
editBoneTable.append(bone)
nameTable.append(bone.name)
for i, (b_bone, m_bone) in enumerate(zip(editBoneTable, pmx_bones)):
if m_bone.parent != -1:
if i not in dependency_cycle_ik_bones:
b_bone.parent = editBoneTable[m_bone.parent]
else:
b_bone.parent = editBoneTable[m_bone.parent].parent
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if isinstance(m_bone.displayConnection, int):
if m_bone.displayConnection != -1:
b_bone.tail = editBoneTable[m_bone.displayConnection].head
else:
b_bone.tail = b_bone.head
else:
loc = mathutils.Vector(m_bone.displayConnection) * self.TO_BLE_MATRIX * self.__scale
b_bone.tail = b_bone.head + loc
for b_bone in editBoneTable:
# Set the length of too short bones to 1 because Blender delete them.
if b_bone.length < 0.001:
loc = mathutils.Vector([0, 0, 1]) * self.__scale
b_bone.tail = b_bone.head + loc
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if b_bone.parent is not None and b_bone.parent.tail == b_bone.head:
if not m_bone.isMovable:
b_bone.use_connect = True
return nameTable
def __sortPoseBonesByBoneIndex(self, pose_bones, bone_names):
r = []
for i in bone_names:
r.append(pose_bones[i])
return r
def __applyIk(self, index, pmx_bone, pose_bones):
""" create a IK bone constraint
If the IK bone and the target bone is separated, a dummy IK target bone is created as a child of the IK bone.
@param index the bone index
@param pmx_bone pmx.Bone
@param pose_bones the list of PoseBones sorted by the bone index
"""
ik_bone = pose_bones[pmx_bone.target].parent
target_bone = pose_bones[index]
if (mathutils.Vector(ik_bone.tail) - mathutils.Vector(target_bone.head)).length > 0.001:
logging.info('Found a seperated IK constraint: IK: %s, Target: %s', ik_bone.name, target_bone.name)
with bpyutils.edit_object(self.__armObj):
s_bone = self.__armObj.data.edit_bones.new(name='shadow')
logging.info(' Create a proxy bone: %s', s_bone.name)
s_bone.head = ik_bone.tail
s_bone.tail = s_bone.head + mathutils.Vector([0, 0, 1])
s_bone.layers = (False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)
s_bone.parent = self.__armObj.data.edit_bones[target_bone.name]
logging.info(' Set parent: %s -> %s', target_bone.name, s_bone.name)
# Must not access to EditBones from outside of the 'with' section.
s_bone_name = s_bone.name
logging.info(' Use %s as IK target bone instead of %s', s_bone_name, target_bone.name)
target_bone = self.__armObj.pose.bones[s_bone_name]
target_bone.is_mmd_shadow_bone = True
ikConst = ik_bone.constraints.new('IK')
ikConst.chain_count = len(pmx_bone.ik_links)
ikConst.target = self.__armObj
ikConst.subtarget = target_bone.name
if pmx_bone.isRotatable and not pmx_bone.isMovable :
ikConst.use_location = pmx_bone.isMovable
ikConst.use_rotation = pmx_bone.isRotatable
for i in pmx_bone.ik_links:
if i.maximumAngle is not None:
bone = pose_bones[i.target]
bone.use_ik_limit_x = True
bone.use_ik_limit_y = True
bone.use_ik_limit_z = True
bone.ik_max_x = -i.minimumAngle[0]
bone.ik_max_y = i.maximumAngle[1]
bone.ik_max_z = i.maximumAngle[2]
bone.ik_min_x = -i.maximumAngle[0]
bone.ik_min_y = i.minimumAngle[1]
bone.ik_min_z = i.minimumAngle[2]
@staticmethod
def __findNoneAdditionalBone(target, pose_bones, visited_bones=None):
if visited_bones is None:
visited_bones = []
if target in visited_bones:
raise ValueError('Detected cyclic dependency.')
for i in filter(lambda x: x.type == 'CHILD_OF', target.constraints):
if i.subtarget != target.parent.name:
return PMXImporter.__findNoneAdditionalBone(pose_bones[i.subtarget], pose_bones, visited_bones)
return target
def __applyAdditionalTransform(self, obj, src, dest, influence, pose_bones, rotation=False, location=False):
""" apply additional transform to the bone.
@param obj the object of the target armature
@param src the PoseBone that apply the transform to another bone.
@param dest the PoseBone that another bone apply the transform to.
"""
if not rotation and not location:
return
bone_name = None
# If src has been applied the additional transform by another bone,
# copy the constraint of it to dest.
src = self.__findNoneAdditionalBone(src, pose_bones)
with bpyutils.edit_object(obj):
src_bone = obj.data.edit_bones[src.name]
s_bone = obj.data.edit_bones.new(name='shadow')
s_bone.head = src_bone.head
s_bone.tail = src_bone.tail
s_bone.parent = src_bone.parent
#s_bone.use_connect = src_bone.use_connect
s_bone.layers = (False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)
s_bone.use_inherit_rotation = False
s_bone.use_local_location = True
s_bone.use_inherit_scale = False
bone_name = s_bone.name
dest_bone = obj.data.edit_bones[dest.name]
dest_bone.use_inherit_rotation = not rotation
dest_bone.use_local_location = not location
p_bone = obj.pose.bones[bone_name]
p_bone.is_mmd_shadow_bone = True
if rotation:
c = p_bone.constraints.new('COPY_ROTATION')
c.target = obj
c.subtarget = src.name
c.target_space = 'LOCAL'
c.owner_space = 'LOCAL'
if influence > 0:
c.influence = influence
else:
c.influence = -influence
c.invert_x = True
c.invert_y = True
c.invert_z = True
if location:
c = p_bone.constraints.new('COPY_LOCATION')
c.target = obj
c.subtarget = src.name
c.target_space = 'LOCAL'
c.owner_space = 'LOCAL'
if influence > 0:
c.influence = influence
else:
c.influence = -influence
c.invert_x = True
c.invert_y = True
c.invert_z = True
c = dest.constraints.new('CHILD_OF')
c.target = obj
c.subtarget = p_bone.name
c.use_location_x = location
c.use_location_y = location
c.use_location_z = location
c.use_rotation_x = rotation
c.use_rotation_y = rotation
c.use_rotation_z = rotation
c.use_scale_x = False
c.use_scale_y = False
c.use_scale_z = False
c.inverse_matrix = mathutils.Matrix(src.matrix).inverted()
if dest.parent is not None:
parent = dest.parent
c = dest.constraints.new('CHILD_OF')
c.target = obj
c.subtarget = parent.name
c.use_location_x = False
c.use_location_y = False
c.use_location_z = False
c.use_scale_x = False
c.use_scale_y = False
c.use_scale_z = False
c.inverse_matrix = mathutils.Matrix(parent.matrix).inverted()
def __importBones(self):
pmxModel = self.__model
boneNameTable = self.__createEditBones(self.__armObj, pmxModel.bones)
pose_bones = self.__sortPoseBonesByBoneIndex(self.__armObj.pose.bones, boneNameTable)
self.__boneTable = pose_bones
for i, p_bone in sorted(enumerate(pmxModel.bones), key=lambda x: x[1].transform_order):
b_bone = pose_bones[i]
b_bone.mmd_bone_name_e = p_bone.name_e
if not p_bone.isRotatable:
b_bone.lock_rotation = [True, True, True]
if not p_bone.isMovable:
b_bone.lock_location =[True, True, True]
if p_bone.isIK:
if p_bone.target != -1:
self.__applyIk(i, p_bone, pose_bones)
if p_bone.hasAdditionalRotate or p_bone.hasAdditionalLocation:
bone_index, influ = p_bone.additionalTransform
src_bone = pmxModel.bones[bone_index]
self.__applyAdditionalTransform(
self.__armObj,
pose_bones[bone_index],
b_bone,
influ,
self.__armObj.pose.bones,
p_bone.hasAdditionalRotate,
p_bone.hasAdditionalLocation
)
if p_bone.localCoordinate is not None:
b_bone.mmd_enabled_local_axis = True
b_bone.mmd_local_axis_x = p_bone.localCoordinate.x_axis
b_bone.mmd_local_axis_z = p_bone.localCoordinate.z_axis
if len(b_bone.children) == 0:
b_bone.is_mmd_tip_bone = True
b_bone.lock_rotation = [True, True, True]
b_bone.lock_location = [True, True, True]
b_bone.lock_scale = [True, True, True]
b_bone.bone.hide = True
def __importRigids(self):
self.__rigidTable = []
self.__nonCollisionJointTable = {}
start_time = time.time()
collisionGroups = []
for i in range(16):
collisionGroups.append([])
for rigid in self.__model.rigids:
if self.__onlyCollisions and rigid.mode != pmx.Rigid.MODE_STATIC:
continue
loc = mathutils.Vector(rigid.location) * self.TO_BLE_MATRIX * self.__scale
rot = mathutils.Vector(rigid.rotation) * self.TO_BLE_MATRIX * -1
rigid_type = None
if rigid.type == pmx.Rigid.TYPE_SPHERE:
bpy.ops.mesh.primitive_uv_sphere_add(
segments=16,
ring_count=8,
size=1,
view_align=False,
enter_editmode=False
)
size = mathutils.Vector([1,1,1]) * rigid.size[0]
rigid_type = 'SPHERE'
bpy.ops.object.shade_smooth()
elif rigid.type == pmx.Rigid.TYPE_BOX:
bpy.ops.mesh.primitive_cube_add(
view_align=False,
enter_editmode=False
)
size = mathutils.Vector(rigid.size) * self.TO_BLE_MATRIX
rigid_type = 'BOX'
elif rigid.type == pmx.Rigid.TYPE_CAPSULE:
obj = utils.makeCapsule(radius=rigid.size[0], height=rigid.size[1])
size = mathutils.Vector([1,1,1])
rigid_type = 'CAPSULE'
bpy.ops.object.shade_smooth()
else:
raise Exception('Invalid rigid type')
if rigid.type != pmx.Rigid.TYPE_CAPSULE:
obj = bpy.context.selected_objects[0]
obj.name = rigid.name
obj.scale = size * self.__scale
obj.hide_render = True
obj.draw_type = 'WIRE'
obj.is_mmd_rigid = True
self.__rigidObjGroup.objects.link(obj)
utils.selectAObject(obj)
bpy.ops.object.transform_apply(location=False, rotation=True, scale=True)
obj.location = loc
obj.rotation_euler = rot
bpy.ops.rigidbody.object_add(type='ACTIVE')
if rigid.mode == pmx.Rigid.MODE_STATIC and rigid.bone is not None:
bpy.ops.object.modifier_add(type='COLLISION')
utils.setParentToBone(obj, self.__armObj, self.__boneTable[rigid.bone].name)
elif rigid.bone is not None:
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
bpy.context.scene.objects.active = self.__root
bpy.ops.object.parent_set(type='OBJECT', xmirror=False, keep_transform=True)
target_bone = self.__boneTable[rigid.bone]
empty = bpy.data.objects.new(
'mmd_bonetrack',
None)
bpy.context.scene.objects.link(empty)
empty.location = target_bone.tail
empty.empty_draw_size = 0.5 * self.__scale
empty.empty_draw_type = 'ARROWS'
empty.is_mmd_rigid_track_target = True
self.__tempObjGroup.objects.link(empty)
utils.selectAObject(empty)
bpy.context.scene.objects.active = obj
bpy.ops.object.parent_set(type='OBJECT', xmirror=False, keep_transform=False)
empty.hide = True
for i in target_bone.constraints:
if i.type == 'IK':
i.influence = 0
const = target_bone.constraints.new('DAMPED_TRACK')
const.target = empty
else:
obj.parent = self.__armObj
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
obj.rigid_body.collision_shape = rigid_type
group_flags = []
rb = obj.rigid_body
rb.friction = rigid.friction
rb.mass = rigid.mass
rb.angular_damping = rigid.rotation_attenuation
rb.linear_damping = rigid.velocity_attenuation
rb.restitution = rigid.bounce
if rigid.mode == pmx.Rigid.MODE_STATIC:
rb.kinematic = True
for i in range(16):
if rigid.collision_group_mask & (1<<i) == 0:
for j in collisionGroups[i]:
s = time.time()
self.__makeNonCollisionConstraint(obj, j)
collisionGroups[rigid.collision_group_number].append(obj)
self.__rigidTable.append(obj)
logging.debug('Finished importing rigid bodies in %f seconds.', time.time() - start_time)
def __makeNonCollisionConstraint(self, obj_a, obj_b):
if (mathutils.Vector(obj_a.location) - mathutils.Vector(obj_b.location)).length > self.__distance_of_ignore_collisions:
return
t = bpy.data.objects.new(
'ncc.%d'%len(self.__nonCollisionConstraints),
None)
bpy.context.scene.objects.link(t)
t.location = [0, 0, 0]
t.empty_draw_size = 0.5 * self.__scale
t.empty_draw_type = 'ARROWS'
t.is_mmd_non_collision_joint = True
t.hide_render = True
t.parent = self.__root
utils.selectAObject(t)
bpy.ops.rigidbody.constraint_add(type='GENERIC')
rb = t.rigid_body_constraint
rb.disable_collisions = True
rb.object1 = obj_a
rb.object2 = obj_b
self.__nonCollisionConstraints.append(t)
self.__nonCollisionJointTable[frozenset((obj_a, obj_b))] = t
self.__tempObjGroup.objects.link(t)
def __makeSpring(self, target, base_obj, spring_stiffness):
utils.selectAObject(target)
bpy.ops.object.duplicate()
spring_target = bpy.context.scene.objects.active
spring_target.is_mmd_spring_goal = True
spring_target.rigid_body.kinematic = True
spring_target.rigid_body.collision_groups = (False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True)
bpy.context.scene.objects.active = base_obj
bpy.ops.object.parent_set(type='OBJECT', xmirror=False, keep_transform=True)
self.__rigidObjGroup.objects.unlink(spring_target)
self.__tempObjGroup.objects.link(spring_target)
obj = bpy.data.objects.new(
'S.'+target.name,
None)
bpy.context.scene.objects.link(obj)
obj.location = target.location
obj.empty_draw_size = 0.5 * self.__scale
obj.empty_draw_type = 'ARROWS'
obj.hide_render = True
obj.is_mmd_spring_joint = True
obj.parent = self.__root
self.__tempObjGroup.objects.link(obj)
utils.selectAObject(obj)
bpy.ops.rigidbody.constraint_add(type='GENERIC_SPRING')
rbc = obj.rigid_body_constraint
rbc.object1 = target
rbc.object2 = spring_target
rbc.use_spring_x = True
rbc.use_spring_y = True
rbc.use_spring_z = True
rbc.spring_stiffness_x = spring_stiffness[0]
rbc.spring_stiffness_y = spring_stiffness[1]
rbc.spring_stiffness_z = spring_stiffness[2]
def __importJoints(self):
if self.__onlyCollisions:
return
self.__jointTable = []
for joint in self.__model.joints:
loc = mathutils.Vector(joint.location) * self.TO_BLE_MATRIX * self.__scale
rot = mathutils.Vector(joint.rotation) * self.TO_BLE_MATRIX * -1
obj = bpy.data.objects.new(
'J.'+joint.name,
None)
bpy.context.scene.objects.link(obj)
obj.location = loc
obj.rotation_euler = rot
obj.empty_draw_size = 0.5 * self.__scale
obj.empty_draw_type = 'ARROWS'
obj.hide_render = True
obj.is_mmd_joint = True
obj.parent = self.__root
self.__jointObjGroup.objects.link(obj)
utils.selectAObject(obj)
bpy.ops.rigidbody.constraint_add(type='GENERIC_SPRING')
rbc = obj.rigid_body_constraint
rigid1 = self.__rigidTable[joint.src_rigid]
rigid2 = self.__rigidTable[joint.dest_rigid]
rbc.object1 = rigid1
rbc.object2 = rigid2
if not self.__ignoreNonCollisionGroups:
non_collision_joint = self.__nonCollisionJointTable.get(frozenset((rigid1, rigid2)), None)
if non_collision_joint is None:
rbc.disable_collisions = False
else:
utils.selectAObject(non_collision_joint)
bpy.ops.object.delete(use_global=False)
rbc.disable_collisions = True
elif rigid1.rigid_body.kinematic and not rigid2.rigid_body.kinematic or not rigid1.rigid_body.kinematic and rigid2.rigid_body.kinematic:
rbc.disable_collisions = False
rbc.use_limit_ang_x = True
rbc.use_limit_ang_y = True
rbc.use_limit_ang_z = True
rbc.use_limit_lin_x = True
rbc.use_limit_lin_y = True
rbc.use_limit_lin_z = True
rbc.use_spring_x = True
rbc.use_spring_y = True
rbc.use_spring_z = True
max_loc = mathutils.Vector(joint.maximum_location) * self.TO_BLE_MATRIX * self.__scale
min_loc = mathutils.Vector(joint.minimum_location) * self.TO_BLE_MATRIX * self.__scale
rbc.limit_lin_x_upper = max_loc[0]
rbc.limit_lin_y_upper = max_loc[1]
rbc.limit_lin_z_upper = max_loc[2]
rbc.limit_lin_x_lower = min_loc[0]
rbc.limit_lin_y_lower = min_loc[1]
rbc.limit_lin_z_lower = min_loc[2]
max_rot = mathutils.Vector(joint.maximum_rotation) * self.TO_BLE_MATRIX
min_rot = mathutils.Vector(joint.minimum_rotation) * self.TO_BLE_MATRIX
rbc.limit_ang_x_upper = -min_rot[0]
rbc.limit_ang_y_upper = -min_rot[1]
rbc.limit_ang_z_upper = -min_rot[2]
rbc.limit_ang_x_lower = -max_rot[0]
rbc.limit_ang_y_lower = -max_rot[1]
rbc.limit_ang_z_lower = -max_rot[2]
# spring_damp = mathutils.Vector(joint.spring_constant) * self.TO_BLE_MATRIX
# rbc.spring_damping_x = spring_damp[0]
# rbc.spring_damping_y = spring_damp[1]
# rbc.spring_damping_z = spring_damp[2]
self.__jointTable.append(obj)
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
bpy.context.scene.objects.active = self.__armObj
bpy.ops.object.parent_set(type='OBJECT', xmirror=False, keep_transform=True)
# spring_stiff = mathutils.Vector()
# rbc.spring_stiffness_x = spring_stiff[0]
# rbc.spring_stiffness_y = spring_stiff[1]
# rbc.spring_stiffness_z = spring_stiff[2]
if rigid1.rigid_body.kinematic:
self.__makeSpring(rigid2, rigid1, mathutils.Vector(joint.spring_rotation_constant) * self.TO_BLE_MATRIX)
if rigid2.rigid_body.kinematic:
self.__makeSpring(rigid1, rigid2, mathutils.Vector(joint.spring_rotation_constant) * self.TO_BLE_MATRIX)
def __importMaterials(self):
self.__importTextures()
bpy.types.Material.ambient_color = bpy.props.FloatVectorProperty(name='ambient color')
pmxModel = self.__model
self.__materialTable = []
self.__materialFaceCountTable = []
for i in pmxModel.materials:
mat = bpy.data.materials.new(name=i.name)
mat.diffuse_color = i.diffuse[0:3]
mat.alpha = i.diffuse[3]
mat.ambient_color = i.ambient
mat.specular_color = i.specular[0:3]
mat.specular_alpha = i.specular[3]
self.__materialFaceCountTable.append(int(i.vertex_count/3))
self.__meshObj.data.materials.append(mat)
if i.texture != -1:
texture_slot = mat.texture_slots.add()
texture_slot.use_map_alpha = True
texture_slot.texture = self.__textureTable[i.texture]
texture_slot.texture_coords = 'UV'
mat.use_transparency = True
mat.transparency_method = 'Z_TRANSPARENCY'
mat.alpha = 0
def __importFaces(self):
pmxModel = self.__model
mesh = self.__meshObj.data
mesh.tessfaces.add(len(pmxModel.faces))
uvLayer = mesh.tessface_uv_textures.new()
for i, f in enumerate(pmxModel.faces):
bf = mesh.tessfaces[i]
bf.vertices_raw = list(f) + [0]
bf.use_smooth = True
face_count = 0
uv = uvLayer.data[i]
uv.uv1 = self.flipUV_V(pmxModel.vertices[f[0]].uv)
uv.uv2 = self.flipUV_V(pmxModel.vertices[f[1]].uv)
uv.uv3 = self.flipUV_V(pmxModel.vertices[f[2]].uv)
bf.material_index = self.__getMaterialIndexFromFaceIndex(i)
def __importVertexMorphs(self):
pmxModel = self.__model
utils.selectAObject(self.__meshObj)
bpy.ops.object.shape_key_add()
for morph in filter(lambda x: isinstance(x, pmx.VertexMorph), pmxModel.morphs):
shapeKey = self.__meshObj.shape_key_add(morph.name)
for md in morph.offsets:
shapeKeyPoint = shapeKey.data[md.index]
offset = mathutils.Vector(md.offset) * self.TO_BLE_MATRIX
shapeKeyPoint.co = shapeKeyPoint.co + offset * self.__scale
def __hideRigidsAndJoints(self, obj):
if obj.is_mmd_rigid or obj.is_mmd_joint or obj.is_mmd_non_collision_joint or obj.is_mmd_spring_joint or obj.is_mmd_spring_goal:
obj.hide = True
for i in obj.children:
self.__hideRigidsAndJoints(i)
def __addArmatureModifier(self, meshObj, armObj):
armModifier = meshObj.modifiers.new(name='Armature', type='ARMATURE')
armModifier.object = armObj
armModifier.use_vertex_groups = True
def __renameLRBones(self):
pose_bones = self.__armObj.pose.bones
for i in pose_bones:
if i.is_mmd_shadow_bone:
continue
i.mmd_bone_name_j = i.name
i.name = utils.convertNameToLR(i.name)
self.__meshObj.vertex_groups[i.mmd_bone_name_j].name = i.name
def execute(self, **args):
if 'pmx' in args:
self.__model = args['pmx']
else:
self.__model = pmx.load(args['filepath'])
self.__scale = args.get('scale', 1.0)
renameLRBones = args.get('rename_LR_bones', False)
self.__onlyCollisions = args.get('only_collisions', False)
self.__ignoreNonCollisionGroups = args.get('ignore_non_collision_groups', True)
self.__distance_of_ignore_collisions = args.get('distance_of_ignore_collisions', 1) # 衝突を考慮しない距離(非衝突グループ設定を無視する距離)
self.__distance_of_ignore_collisions *= self.__scale
logging.info('****************************************')
logging.info(' mmd_tools.import_pmx module')
logging.info('----------------------------------------')
logging.info(' Start to load model data form a pmx file')
logging.info(' by the mmd_tools.pmx modlue.')
logging.info('')
start_time = time.time()
self.__createGroups()
self.__createObjects()
self.__importVertices()
self.__importBones()
self.__importMaterials()
self.__importFaces()
self.__importRigids()
self.__importJoints()
self.__importVertexMorphs()
if renameLRBones:
self.__renameLRBones()
self.__addArmatureModifier(self.__meshObj, self.__armObj)
self.__meshObj.data.update()
bpy.types.Object.pmx_import_scale = bpy.props.FloatProperty(name='pmx_import_scale')
if args.get('hide_rigids', False):
self.__hideRigidsAndJoints(self.__root)
self.__armObj.pmx_import_scale = self.__scale
for i in [self.__rigidObjGroup.objects, self.__jointObjGroup.objects, self.__tempObjGroup.objects]:
for j in i:
self.__allObjGroup.objects.link(j)
bpy.context.scene.gravity[2] = -9.81 * 10 * self.__scale
logging.info(' Finished importing the model in %f seconds.', time.time() - start_time)
logging.info('----------------------------------------')
logging.info(' mmd_tools.import_pmx module')
logging.info('****************************************')
| [
"melanitta_nigra@yahoo.co.jp"
] | melanitta_nigra@yahoo.co.jp |
e4ba71bf1ba724c0db53d8730a07c16ea26d3366 | ef8f2a5dee38b6355ffa9c23dedde2fc112298ff | /examples/simpyx/simpyx1.py | 6b5aff7839e7082eecd241e3f04c0f362d28fe3e | [] | no_license | ambrosiano/python-x | 3c873f27f17c8bcc9f3dfd40ac9a10372055373c | 09d032e7824472a58d9ee7f9908aeae43eb550f9 | refs/heads/master | 2021-01-16T23:05:52.179326 | 2019-09-19T04:00:59 | 2019-09-19T04:00:59 | 70,008,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | from simpy import *
def car(env):
while True:
print('Start parking at %d' % env.now)
parking_duration = 5
yield env.timeout(parking_duration)
print('Start driving at %d' % env.now)
trip_duration = 2
yield env.timeout(trip_duration)
if __name__=='__main__':
print('simpy test ')
env = Environment()
env.process(car(env))
env.run(until=15)
| [
"ambro@lanl.gov"
] | ambro@lanl.gov |
089468f8e1f36838097225d9dd164abf436d4917 | 25d6f09c8157dfc70becd19aa43361eb7b52de1b | /tests.py | 2413f1ebdf7d52f4f8c6c594ba207be724d5ad1e | [] | no_license | eguzmanf/s17c126-microblog-git | b78d8f9c5a1964e934a4d2ac40765c91a47555da | 420a6873f88c06bff6db636705c7558fb5a0430b | refs/heads/master | 2022-12-28T04:59:41.477266 | 2018-08-08T21:12:29 | 2018-08-08T21:12:29 | 144,066,834 | 0 | 0 | null | 2022-12-08T02:21:19 | 2018-08-08T20:57:29 | Python | UTF-8 | Python | false | false | 3,616 | py | #!/usr/bin/env python
from datetime import datetime, timedelta
import unittest
from app import create_app, db
from app.models import User, Post
from config import Config
class TestConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
class UserModelCase(unittest.TestCase):
# setUp() => which the testing framework will automatically call for every single test we run
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
# tearDown() => method that tidies up after the test method has been run
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_hashing(self):
u = User(username='susan')
u.set_password('cat')
self.assertFalse(u.check_password('dog'))
self.assertTrue(u.check_password('cat'))
def test_avatar(self):
u = User(username='john', email='john@example.com')
self.assertEqual(u.avatar(128), ('https://www.gravatar.com/avatar/'
'd4c74594d841139328695756648b6bd6'
'?d=identicon&s=128'))
def test_follow(self):
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertEqual(u1.followed.all(), [])
self.assertEqual(u1.followers.all(), [])
u1.follow(u2)
db.session.commit()
self.assertTrue(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 1)
self.assertEqual(u1.followed.first().username, 'susan')
self.assertEqual(u2.followers.count(), 1)
self.assertEqual(u2.followers.first().username, 'john')
u1.unfollow(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 0)
self.assertEqual(u2.followers.count(), 0)
def test_follow_posts(self):
# create four users
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
u3 = User(username='mary', email='mary@example.com')
u4 = User(username='david', email='david@example.com')
db.session.add_all([u1, u2, u3, u4])
# create four posts
now = datetime.utcnow()
p1 = Post(body="post from john", author=u1, timestamp=now + timedelta(seconds=1))
p2 = Post(body="post from susan", author=u2, timestamp=now + timedelta(seconds=4))
p3 = Post(body="post from mary", author=u3, timestamp=now + timedelta(seconds=3))
p4 = Post(body="post from david", author=u4, timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
# setup the followers
u1.follow(u2) # john follows susan
u1.follow(u4) # john follows david
u2.follow(u3) # susan follows mary
u3.follow(u4) # mary follows david
db.session.commit()
# check the followed posts of each user
f1 = u1.followed_posts().all()
f2 = u2.followed_posts().all()
f3 = u3.followed_posts().all()
f4 = u4.followed_posts().all()
self.assertEqual(f1, [p2, p4, p1])
self.assertEqual(f2, [p2, p3])
self.assertEqual(f3, [p3, p4])
self.assertEqual(f4, [p4])
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"guzman.exe@gmail.com"
] | guzman.exe@gmail.com |
51d7428ae284b0c6df2a73dad8baa582447f9273 | 05276898508e103401ec4dba3b214e192ee38c2f | /pj_tools/test.py | 41fa5513ca3bf8653ba6ed5171f77e0ac64dcee0 | [] | no_license | yiyilinghun/pj | fe7b7ee021d59342ab33456e55fd606f0e6d692e | 70c820c9c101871b9f6b6da58d77b6a8beaaa3d3 | refs/heads/master | 2019-08-31T03:12:44.142984 | 2018-01-05T05:06:14 | 2018-01-05T05:06:14 | 94,193,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,612 | py | ###!/usr/bin/env python
###--coding:utf-8--
### import ptvsd
##import threading
##from multiprocessing.dummy import Pool as ThreadPool
##from http.server import BaseHTTPRequestHandler, HTTPServer
##from os import path
##from urllib.parse import urlparse
##import MySQLdb
##import time
##import os
##import sys
##if len(sys.argv) != 3:
## print('参数个数不符')
## quit()
##if sys.argv[1] == 'out':
## use_outside_addr = True
##elif sys.argv[1] == 'in':
## use_outside_addr = False
##else:
## quit()
##try:
## int(sys.argv[2])
##except :
## quit()
##g_server_outside_addr_list = {
## '****混服':{
## '****-混服1服':'111.231.86.238',
## '****-混服2服':'111.231.54.113',
## '****-混服3服':'111.231.62.240',
## '****-混服4服':'122.152.204.111',
## '****-混服5服':'111.231.107.13',
## '****-混服6服':'111.231.87.237',
## '****-混服7服':'111.231.104.28',
## '****-混服8服':'111.231.85.61',
## '****-混服9服':'122.152.192.228',
## '****-混服10服':'111.231.135.225',
## '****-混服11服':'111.231.54.33',
## '****-混服12服':'111.231.99.28',
## '****-混服13服':'122.152.209.167',
## '****-混服14服':'111.231.93.135',
## '****-混服15服':'111.231.90.141',
## '****-混服16服':'111.231.66.25',
## '****-混服17服':'111.231.76.75',
## '****-混服18服':'111.231.75.181',
## '****-混服19服':'111.231.94.188',
## '****-混服20服':'111.231.94.223',
## '****-混服21服':'111.231.91.40',
## },
## '****工会':{
## '****-工会1服':'111.231.91.142',
## '****-工会2服':'122.152.217.185',
## '****-工会3服':'111.231.89.88',
## '****-工会4服':'111.231.88.75',
## '****-工会5服':'111.231.112.13',
## '****-工会6服':'111.231.86.159',
## '****-工会7服':'111.231.105.206',
## '****-工会8服':'122.152.212.43',
## '****-工会9服':'211.159.216.48',
## '****-工会10服':'111.231.63.144',
## },
##}
##g_server_inside_addr_list = {
## '****混服':{
## '****-混服1服':'10.154.151.226',
## '****-混服2服':'10.105.85.208',
## '****-混服3服':'10.105.224.166',
## '****-混服4服':'10.105.0.212',
## '****-混服5服':'10.105.122.219',
## '****-混服6服':'10.105.115.188',
## '****-混服7服':'10.105.2.198',
## '****-混服8服':'10.105.13.185',
## '****-混服9服':'10.105.120.23',
## '****-混服10服':'10.105.11.213',
## '****-混服11服':'10.105.23.174',
## '****-混服12服':'10.105.41.250',
## '****-混服13服':'10.105.102.231',
## '****-混服14服':'10.105.64.184',
## '****-混服15服':'10.105.251.226',
## '****-混服16服':'10.105.56.210',
## '****-混服17服':'10.105.99.252',
## '****-混服18服':'10.105.111.34',
## '****-混服19服':'10.105.203.73',
## '****-混服20服':'10.105.104.92',
## '****-混服21服':'10.105.16.161',
## },
## '****工会':{
## '****-工会1服':'10.154.10.131',
## '****-工会2服':'10.133.192.157',
## '****-工会3服':'10.105.213.47',
## '****-工会4服':'10.105.193.118',
## '****-工会5服':'10.105.221.111',
## '****-工会6服':'10.105.18.48',
## '****-工会7服':'10.105.206.235',
## '****-工会8服':'10.105.31.160',
## '****-工会9服':'10.105.29.210',
## '****-工会10服':'10.105.193.178',
## },
##}
##const_html_js_reload_str = '''<script language="JavaScript">
##function myrefresh(){window.location.reload();}
##setTimeout('myrefresh()',5000);</script>
##<style>
##td {
##white-space: nowrap;
##font-size :1.8rem;
##}
##th{
## font-size :1.8rem;
##}
##</style>'''
### MIME-TYPE
##mimedic = [('.py', 'zip/py'),
## ('.html', 'text/html'),
## ('.htm', 'text/html'),
## ('.js', 'application/javascript'),
## ('.css', 'text/css'),
## ('.json', 'application/json'),
## ('.png', 'image/png'),
## ('.jpg', 'image/jpeg'),
## ('.gif', 'image/gif'),
## ('.txt', 'text/plain'),
## ('.avi', 'video/x-msvideo'),]
### 统计信息
##def db_get_num_info(name, dbaddr):
## try:
## conn = MySQLdb.connect(host=dbaddr,port=3306,user='read',passwd='Haymaker@88',db='projectdl',charset='utf8')
## cursor = conn.cursor()
## #cursor.execute("select count(*) from player;")
## cursor.execute("""SELECT COUNT(DISTINCT DeviceID)
##FROM projectdl.loginoutlog
##WHERE ThirdChannel LIKE '%android_quick%' """)
## rs_device_sum = cursor.fetchone()
## rs_device_sum = (rs_device_sum[0] is not None and int(rs_device_sum[0])) or 0
## cursor.execute("""SELECT COUNT(DISTINCT RoleID)
##FROM projectdl.loginoutlog
##WHERE ThirdChannel LIKE '%android_quick%' """)
## rs_role_sum = cursor.fetchone()
## rs_role_sum = (rs_role_sum[0] is not None and int(rs_role_sum[0])) or 0
## cursor.close()
## conn.close()
## plyaer_sum = db_get_online_info(dbaddr)
## except :
## rs_device_sum = 0
## rs_role_sum = 0
## name = name
## finally:
## return rs_device_sum, rs_role_sum, name
####################################################################
##def db_get_online_info(dbaddr):
## conn = MySQLdb.connect(host=dbaddr,port=3306,user='read',passwd='Haymaker@88',db='count',charset='utf8')
## cursor = conn.cursor()
## #cursor.execute("select count(*) from player;")
## cursor.execute("select number from online where port = 52113 ;")
## rs_online_in_cell_sum = cursor.fetchone()
## rs_online_in_cell_sum = (rs_online_in_cell_sum[0] is not None and int(rs_online_in_cell_sum[0])) or 0
## cursor.close()
## conn.close()
## return rs_online_in_cell_sum
####################################################################
##def _parallel_get_channel_info(*servers):
## subtotal_rs_device_sum = 0
## subtotal_rs_role_sum = 0
## result = []
## tPool = ThreadPool(100)
## for name,addr in servers[1].items():
## result.append(tPool.apply_async(db_get_num_info, [name, addr]))
## tPool.close()
## tPool.join()
## for res in result:
## rs_device_sum, rs_role_sum, name = res.get()
## subtotal_rs_device_sum += rs_device_sum
## subtotal_rs_role_sum += rs_role_sum
## return subtotal_rs_device_sum, subtotal_rs_role_sum
####################################################################
##def get_info():
## total_rs_device_sum = 0
## total_rs_role_sum = 0
## server_list = None
## if use_outside_addr:
## server_list = g_server_outside_addr_list
## else:
## server_list = g_server_inside_addr_list
## strlist = []
## result = []
## tPool = ThreadPool(10)
## for servers in server_list.items():
## result.append(tPool.apply_async(_parallel_get_channel_info, servers))
## tPool.close()
## tPool.join()
## for res in result:
## subtotal_rs_device_sum, subtotal_rs_role_sum = res.get()
## total_rs_device_sum += subtotal_rs_device_sum
## total_rs_role_sum += subtotal_rs_role_sum
## return total_rs_device_sum, total_rs_role_sum
##g_bytes_hunfu_info = None
##g_bytes_gonghui_info = None
##g_bytes_yyb_info = None
##g_bytesinfo = None
##def thread_tar():
## #for x in range(2):
## while True:
## try:
## print('开始拉取')
## global g_bytesinfo
## total_rs_device_sum, total_rs_role_sum = get_info()
## print('拉取完毕账号%d,设备%d' %(total_rs_role_sum, total_rs_device_sum))
## finally:
## print('拉取结束')
## time.sleep(5)
### main
##t = threading.Thread(target=thread_tar)
##t.start()
##curdir = path.dirname(path.realpath(__file__))
##sep = '/'
##class testHTTPServer_RequestHandler(BaseHTTPRequestHandler):
## # GET
## def do_GET(self):
## sendReply = False
## querypath = urlparse(self.path)
## filepath, query = querypath.path, querypath.query
## if filepath.endswith('/') or filepath.endswith('/show'):
## try:
## global g_bytesinfo
## if g_bytesinfo is None:
## content = bytes('''<table border="1" align="center" border="8" width="1000">
## <tr><th>新服开启,页面初始化中...</th></tr><br/>''' + const_html_js_reload_str, encoding = "gbk")
## else:
## content = g_bytesinfo
## self.send_response(200)
## self.send_header('Content-type','text/html')
## self.end_headers()
## self.wfile.write(content)
## except IOError:
## self.send_error(404,'File Not Found: %s' % self.path)
## filename, fileext = path.splitext(filepath)
## for e in mimedic:
## if e[0] == fileext:
## mimetype = e[1]
## sendReply = True
## if sendReply == True:
## try:
## with open(path.realpath(curdir + sep + filepath),'rb') as f:
## content = f.read()
## self.send_response(200)
## self.send_header('Content-type',mimetype)
## self.end_headers()
## self.wfile.write(content)
## except IOError:
## self.send_error(404,'File Not Found: %s' % self.path)
##def run():
## port = int(sys.argv[2])
## print('starting server, port', port)
## # Server settings
## server_address = ('0.0.0.0', port)
## httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)
## print('running server...')
## httpd.serve_forever()
##if __name__ == '__main__':
## run()
#####import httplib2
#####while True:
##### try:
##### h = httplib2.Http()
##### resp, content = h.request("http://192.168.4.227:8000/", "GET")
##### #resp, content = h.request("http://118.89.157.62:8000/", "GET")
##### #print('ok')
##### del(h)
##### except Exception as e:
##### print(e)
##### fail+=1
##### print('error')
#####!/usr/bin/env python
#####--coding:utf-8--
##### import ptvsd
####import threading
####from multiprocessing.dummy import Pool as ThreadPool
####from http.server import BaseHTTPRequestHandler, HTTPServer
####from os import path
####from urllib.parse import urlparse
####import MySQLdb
####import time
####import sys
####def loop():
#### while True:
#### pass
####tPool = ThreadPool(10)
####for x in range(5000):
#### tPool.apply_async(loop, ())
####tPool.close()
####tPool.join()
#####if len(sys.argv) != 2:
##### print('参数个数不符')
##### quit()
#####if sys.argv[1] == 'out':
####use_outside_addr = True
#####elif sys.argv[1] == 'in':
##### use_outside_addr = False
#####else:
##### quit()
####lock = threading.Lock()
#####use_outside_addr = True
#####use_outside_addr = False
####g_server_outside_addr_list = {
#### '超燃混服':{
#### '超燃-混服1服':'111.231.86.238',
#### '超燃-混服2服':'111.231.54.113',
#### '超燃-混服3服':'111.231.62.240',
#### '超燃-混服4服':'122.152.204.111',
#### '超燃-混服5服':'111.231.107.13',
#### '超燃-混服6服':'111.231.87.237',
#### '超燃-混服7服':'111.231.104.28',
#### '超燃-混服8服':'111.231.85.61',
#### '超燃-混服9服':'122.152.192.228',
#### '超燃-混服10服':'111.231.135.225',
#### '超燃-混服11服':'111.231.54.33',
#### '超燃-混服12服':'111.231.99.28',
#### '超燃-混服13服':'122.152.209.167',
#### '超燃-混服14服':'111.231.93.135',
#### '超燃-混服15服':'111.231.90.141',
#### '超燃-混服16服':'111.231.66.25',
#### '超燃-混服17服':'111.231.76.75',
#### '超燃-混服18服':'111.231.75.181',
#### '超燃-混服19服':'111.231.94.188',
#### '超燃-混服20服':'111.231.94.223',
#### '超燃-混服21服':'111.231.91.40',
#### },
#### '超燃工会':{
#### '超燃-工会1服':'111.231.91.142',
#### '超燃-工会2服':'122.152.217.185',
#### '超燃-工会3服':'111.231.89.88',
#### '超燃-工会4服':'111.231.88.75',
#### '超燃-工会5服':'111.231.112.13',
#### '超燃-工会6服':'111.231.86.159',
#### '超燃-工会7服':'111.231.105.206',
#### '超燃-工会8服':'122.152.212.43',
#### '超燃-工会9服':'211.159.216.48',
#### '超燃-工会10服':'111.231.63.144',
#### },
#### '超燃应用宝':{
#### '超燃-应用宝1服':'111.231.91.194',
#### '超燃-应用宝2服':'111.231.89.41',
#### '超燃-应用宝3服':'111.231.88.82',
#### '超燃-应用宝4服':'111.231.89.118',
#### '超燃-应用宝5服':'111.231.89.45',
#### '超燃-应用宝6服':'111.231.90.171',
#### '超燃-应用宝7服':'111.231.50.85',
#### '超燃-应用宝8服':'111.231.87.23',
#### '超燃-应用宝9服':'111.231.142.248',
#### '超燃-应用宝10服':'122.152.210.139',
#### },
####}
####g_server_inside_addr_list = {
#### '超燃混服':{
#### '超燃-混服1服':'10.154.151.226',
#### '超燃-混服2服':'10.105.85.208',
#### '超燃-混服3服':'10.105.224.166',
#### '超燃-混服4服':'10.105.0.212',
#### '超燃-混服5服':'10.105.122.219',
#### '超燃-混服6服':'10.105.115.188',
#### '超燃-混服7服':'10.105.2.198',
#### '超燃-混服8服':'10.105.13.185',
#### '超燃-混服9服':'10.105.120.23',
#### '超燃-混服10服':'10.105.11.213',
#### '超燃-混服11服':'10.105.23.174',
#### '超燃-混服12服':'10.105.41.250',
#### '超燃-混服13服':'10.105.102.231',
#### '超燃-混服14服':'10.105.64.184',
#### '超燃-混服15服':'10.105.251.226',
#### '超燃-混服16服':'10.105.56.210',
#### '超燃-混服17服':'10.105.99.252',
#### '超燃-混服18服':'10.105.111.34',
#### '超燃-混服19服':'10.105.203.73',
#### '超燃-混服20服':'10.105.104.92',
#### '超燃-混服21服':'10.105.16.161',
#### },
#### '超燃工会':{
#### '超燃-工会1服':'10.154.10.131',
#### '超燃-工会2服':'10.133.192.157',
#### '超燃-工会3服':'10.105.213.47',
#### '超燃-工会4服':'10.105.193.118',
#### '超燃-工会5服':'10.105.221.111',
#### '超燃-工会6服':'10.105.18.48',
#### '超燃-工会7服':'10.105.206.235',
#### '超燃-工会8服':'10.105.31.160',
#### '超燃-工会9服':'10.105.29.210',
#### '超燃-工会10服':'10.105.193.178',
#### },
#### '超燃应用宝':{
#### '超燃-应用宝1服':'10.154.134.161',
#### '超燃-应用宝2服':'10.105.116.149',
#### '超燃-应用宝3服':'10.105.73.43',
#### '超燃-应用宝4服':'10.105.122.78',
#### '超燃-应用宝5服':'10.105.16.108',
#### '超燃-应用宝6服':'10.105.89.74',
#### '超燃-应用宝7服':'10.105.243.29',
#### '超燃-应用宝8服':'10.105.127.179',
#### '超燃-应用宝9服':'10.105.210.66',
#### '超燃-应用宝10服':'10.105.223.248',
#### },}
####def test_db_get_num_info():
#### name = '超燃-混服1服'
#### dbaddr = '111.231.86.238'
#### conn =
#### MySQLdb.connect(host=dbaddr,port=3306,user='read',passwd='Haymaker@88',db='projectdl',charset='utf8')
#### cursor = conn.cursor()
#### cursor.execute("select DeviceID from loginoutlog where Type = 3 and Time <
#### 1513872000;")
#### rs = cursor.fetchmany(cursor.rowcount)
#### DeviceID1 = {}
#### for x in rs:
#### DeviceID1.update({x[0]:x[0]})
#### cursor.execute("select DeviceID from account where((CreateTime/100)-28800)
#### < 1513872000;")
#### rs = cursor.fetchmany(cursor.rowcount)
#### DeviceID2 = {}
#### for x in rs:
#### DeviceID2.update({x[0]:x[0]})
#### DeviceID3 = {}
#### for x in DeviceID1:
#### if DeviceID2.get(x) is None:
#### DeviceID3.update({x : x})
#### print(x)
#### DeviceID4 = {}
#### for x in DeviceID2:
#### if DeviceID1.get(x) is None:
#### DeviceID4.update({x : x})
#### print(x)
#### os.system('pause')
####test_db_get_num_info()
##### 统计信息
####def db_get_num_info(name, dbaddr):
#### conn =
#### MySQLdb.connect(host=dbaddr,port=3306,user='read',passwd='Haymaker@88',db='projectdl',charset='utf8')
#### cursor = conn.cursor()
#### xsum = 0
#### test = {}
#### #cursor.execute("select count(*) from player;")
#### #cursor.execute("select DeviceID from account
#### where((CreateTime/100)-28800)
#### #< 1513872000;")
#### cursor.execute("select count(distinct DeviceID) from loginoutlog where
#### Type = 3 and Time < 1513872000;")
#### rs = cursor.fetchmany(cursor.rowcount)
#### for x in rs:
#### if test.get(x[0]) is not None:
#### xsum += 1
#### test.update({name : x[0]})
#### cursor.close()
#### conn.close()
#### if xsum > 0:
#### lock.acquire()
#### print(xsum)
#### lock.release()
#### return test
######################################################################
####def db_get_online_info(dbaddr):
#### conn =
#### MySQLdb.connect(host=dbaddr,port=3306,user='read',passwd='Haymaker@88',db='count',charset='utf8')
#### cursor = conn.cursor()
#### #cursor.execute("select count(*) from player;")
#### cursor.execute("select number from online where port = 52113 ;")
#### rs_online_in_cell_sum = cursor.fetchone()
#### rs_online_in_cell_sum = (rs_online_in_cell_sum[0] is not None and
#### int(rs_online_in_cell_sum[0])) or 0
#### cursor.close()
#### conn.close()
#### return rs_online_in_cell_sum
######################################################################
####def _parallel_get_channel_info(*servers):
#### subtotal_strdict = ''
#### strdict = '''<table border="1" align="center" border="8" width="1000">
#### <tr>
#### <th>%s</th>
#### <th>设备数量</th>
#### <th>在线数量</th>
#### <th>充值金额</th>
#### <th>客服充值</th>
#### <th>手机绑定</th>
#### </tr><br/>''' % (servers[0])
#### subtotal_devicesum = 0
#### subtotal_online_cell_sum = 0
#### subtotal_moneysum = 0
#### subtotal_csmoneysum = 0
#### subtotal_phone_sum = 0
#### result = []
#### tPool = ThreadPool(100)
#### for name,addr in servers[1].items():
#### result.append(tPool.apply_async(db_get_num_info, [name, addr]))
#### tPool.close()
#### tPool.join()
#### xsum = 0
#### test = {}
#### for res in result:
#### temp = res.get()
#### test.update(temp)
#### if xsum > 0:
#### lock.acquire()
#### print(xsum)
#### lock.release()
#### return test
######################################################################
####def get_info():
#### total_devicesum = 0
#### total_online_cell_sum = 0
#### total_moneysum = 0
#### total_csmoneysum = 0
#### total_phone_sum = 0
#### subtotal_strdict = '''<table border="1" align="center" border="8"
#### width="1000">
#### <tr>
#### <th>渠道小计</th>
#### <th>设备数量</th>
#### <th>在线数量</th>
#### <th>充值金额</th>
#### <th>客服充值</th>
#### <th>手机绑定</th>
#### </tr><br/>
#### '''
#### server_list = None
#### if use_outside_addr:
#### server_list = g_server_outside_addr_list
#### else:
#### server_list = g_server_inside_addr_list
#### strlist = []
#### result = []
#### tPool = ThreadPool(10)
#### for servers in server_list.items():
#### result.append(tPool.apply_async(_parallel_get_channel_info, servers))
#### tPool.close()
#### tPool.join()
#### test = {}
#### for res in result:
#### temp = res.get()
#### for x in temp.items():
#### test.update({x[0]:x[1]})
#### temp_sum = 0
#### for x in test.values():
#### temp_sum+=x
#### print(temp_sum)
#### return test
####g_bytes_hunfu_info = None
####g_bytes_gonghui_info = None
####g_bytes_yyb_info = None
####g_bytesinfo = None
####def thread_tar():
#### while True:
#### try:
#### print('开始拉取')
#### #lock.acquire()
#### global g_bytesinfo
#### get_info()
#### return
#### temp = ''
#### for x in server_info:
#### temp += x
#### g_bytesinfo = bytes(total_info + temp + '''<script
#### language="JavaScript">
####function myrefresh(){window.location.reload();}
####setTimeout('myrefresh()',5000);</script>''' + '''
####<style>
####td {
####white-space: nowrap;
####font-size :2.0rem;
####}
####th{
#### font-size :2.0rem;
####}
####</style>
#### ''', encoding = "gbk")
#### finally:
#### print('拉取完毕')
#### #lock.release()
#### time.sleep(5)
##### main
####t = threading.Thread(target=thread_tar)
####t.start()
####curdir = path.dirname(path.realpath(__file__))
####sep = '/'
##### MIME-TYPE
####mimedic = [('.py', 'zip/py'),
#### ('.html', 'text/html'),
#### ('.htm', 'text/html'),
#### ('.js', 'application/javascript'),
#### ('.css', 'text/css'),
#### ('.json', 'application/json'),
#### ('.png', 'image/png'),
#### ('.jpg', 'image/jpeg'),
#### ('.gif', 'image/gif'),
#### ('.txt', 'text/plain'),
#### ('.avi', 'video/x-msvideo'),]
####class testHTTPServer_RequestHandler(BaseHTTPRequestHandler):
#### # GET
#### def do_GET(self):
#### sendReply = False
#### querypath = urlparse(self.path)
#### filepath, query = querypath.path, querypath.query
#### if filepath.endswith('/'):
#### filepath += 'index.html'
#### if filepath.endswith('/show'):
#### lock.acquire()
#### try:
#### global g_bytesinfo
#### content = g_bytesinfo
#### self.send_response(200)
#### self.send_header('Content-type','text/html')
#### self.end_headers()
#### self.wfile.write(content)
#### except IOError:
#### self.send_error(404,'File Not Found: %s' % self.path)
#### finally:
#### lock.release()
#### return
#### filename, fileext = path.splitext(filepath)
#### for e in mimedic:
#### if e[0] == fileext:
#### mimetype = e[1]
#### sendReply = True
#### if sendReply == True:
#### try:
#### with open(path.realpath(curdir + sep + filepath),'rb') as f:
#### content = f.read()
#### self.send_response(200)
#### self.send_header('Content-type',mimetype)
#### self.end_headers()
#### self.wfile.write(content)
#### except IOError:
#### self.send_error(404,'File Not Found: %s' % self.path)
####def run():
#### port = 8000
#### print('starting server, port', port)
#### # Server settings
#### server_address = ('0.0.0.0', port)
#### httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)
#### print('running server...')
#### httpd.serve_forever()
###if __name__ == '__main__':
### try:
### a = 100
### b = 0
### c = a / b
### #except :
### # pass
### finally:
### pass
### #file = open(r'C:\Users\ms\Desktop\sum.log')
### #sum = 0
### #for line in file:
### # #print(line)
### # line = line[line.find(r'(int)') + 5:-1]
### # #print(line)
### # line = line[line.find(r'(int)') + 5:-1]
### # #print(line)
### # line = line[line.find(r'(int)') + 5:-1]
### # #print(line)
### # line = line[:line.find(r',')]
### # sum += int(line)
### #print(sum)
| [
"200866850@qq.com"
] | 200866850@qq.com |
1c9dd7e26081592920c899a26d9fb7f590119f91 | 867f6ad1c978af2e410742220720bb8d689a01ac | /adminapp/migrations/0006_alter_products_price.py | 0a4ec7df19e0c24370db88f9f9343153571bac89 | [] | no_license | sumisalam/Django_Ecommerce_shop | b12f0c16638de02913baf0280ef74eef604593b2 | 25a1c31aa15c8f9ef3382fd1d36575c05eed8b23 | refs/heads/master | 2023-07-18T14:43:14.338467 | 2021-09-06T13:14:32 | 2021-09-06T13:14:32 | 403,626,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 3.2.3 on 2021-06-21 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adminapp', '0005_orders'),
]
operations = [
migrations.AlterField(
model_name='products',
name='price',
field=models.IntegerField(),
),
]
| [
"sumeenasalam746@gmail.com"
] | sumeenasalam746@gmail.com |
f89d77ec7050a0b3fad826498e49acf3dae1ad69 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4060/157004060.py | 58e52da8c9b818275d320822a6d5a4d065d5c91c | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,347 | py | from bots.botsconfig import *
from records004060 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'NP',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
]},
{ID: 'HL', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'NM1', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'SPY', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
1b77f9b58df747702b7f5db704f9356a3d158fde | 1fb512a12fab72900a708bc30efa545118f7d4a4 | /freezeatoms.py | 0ce8d5cd27d287ce10ae766e9b61bc6666958dd8 | [] | no_license | mrnechay/mikescripts | e0cf5c3f5ba85e47e25c1cc5a46fdf026a1a55a1 | ea2552e2a6364234d478bc68dabb01de04d47764 | refs/heads/master | 2020-05-25T11:04:16.366633 | 2015-05-07T21:16:14 | 2015-05-07T21:16:14 | 24,625,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | #!/usr/bin/python
# usage:
# freezeatoms.py coord 1,2,5,10,20
# will freeze atoms 1, 2, 5, 10, and 20 in 'coord' file
import sys, os
coord = sys.argv[1]
frozenList = [int(x) for x in sys.argv[2].split(',')]
cartesianSection = False
atomIndex = 1
newCoord = open("__coord",'w')
with open(coord) as coordFile:
for line in coordFile:
if line == '$coord\n':
cartesianSection = True
newCoord.write(line)
elif line[0] == '$':
cartesianSection = False
newCoord.write(line)
elif cartesianSection == True:
ls = filter(None, [x.strip().split(' ') for x in line.split('\n') if x.strip()][0])
if atomIndex in frozenList:
newCoord.write("%20.14f %20.14f %20.14f %5s f\n" % (float(ls[0]), float(ls[1]), float(ls[2]), ls[3]))
else:
newCoord.write("%20.14f %20.14f %20.14f %5s\n" % (float(ls[0]), float(ls[1]), float(ls[2]), ls[3]))
atomIndex += 1
elif cartesianSection == False:
newCoord.write(line)
newCoord.close()
os.rename("__coord", "coord")
| [
"michaelnechay@gmail.com"
] | michaelnechay@gmail.com |
b6ae88cb05a5a7feabddf34c2073a2f2ab4db368 | 489f363c571ee3121922feebc8bf8e92e2179f9d | /wagers/migrations/0001_initial.py | b34af4c88efdc5bbcce1b0376668eb593ea51f69 | [] | no_license | ryanchoe1205/wagering | 4485c6fca5c7050139781193ec90c93b0094ae3c | 6372d5c7ba79b6f6b2aa60a864955f56863ad86d | refs/heads/master | 2021-01-16T18:12:14.342153 | 2013-08-28T13:19:48 | 2013-08-28T13:19:48 | 12,314,621 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,457 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'WagerSettingSingleton'
db.create_table(u'wagers_wagersettingsingleton', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('default_credits', self.gf('django.db.models.fields.DecimalField')(default=10, max_digits=100, decimal_places=10)),
))
db.send_create_signal(u'wagers', ['WagerSettingSingleton'])
# Adding model 'EditableHTML'
db.create_table(u'wagers_editablehtml', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('html', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'wagers', ['EditableHTML'])
# Adding model 'Wager'
db.create_table(u'wagers_wager', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('proposition', self.gf('django.db.models.fields.TextField')()),
('is_open', self.gf('django.db.models.fields.BooleanField')(default=True)),
('winning_position', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'wagers', ['Wager'])
# Adding model 'Bet'
db.create_table(u'wagers_bet', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('amount_bet', self.gf('django.db.models.fields.DecimalField')(max_digits=100, decimal_places=10)),
('on_prop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wagers.Wager'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('position', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'wagers', ['Bet'])
# Adding unique constraint on 'Bet', fields ['user', 'on_prop']
db.create_unique(u'wagers_bet', ['user_id', 'on_prop_id'])
# Adding model 'UserProfile'
db.create_table(u'wagers_userprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('credits', self.gf('django.db.models.fields.DecimalField')(max_digits=100, decimal_places=10)),
))
db.send_create_signal(u'wagers', ['UserProfile'])
def backwards(self, orm):
# Removing unique constraint on 'Bet', fields ['user', 'on_prop']
db.delete_unique(u'wagers_bet', ['user_id', 'on_prop_id'])
# Deleting model 'WagerSettingSingleton'
db.delete_table(u'wagers_wagersettingsingleton')
# Deleting model 'EditableHTML'
db.delete_table(u'wagers_editablehtml')
# Deleting model 'Wager'
db.delete_table(u'wagers_wager')
# Deleting model 'Bet'
db.delete_table(u'wagers_bet')
# Deleting model 'UserProfile'
db.delete_table(u'wagers_userprofile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'wagers.bet': {
'Meta': {'unique_together': "[('user', 'on_prop')]", 'object_name': 'Bet'},
'amount_bet': ('django.db.models.fields.DecimalField', [], {'max_digits': '100', 'decimal_places': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wagers.Wager']"}),
'position': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'wagers.editablehtml': {
'Meta': {'object_name': 'EditableHTML'},
'html': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'wagers.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'credits': ('django.db.models.fields.DecimalField', [], {'max_digits': '100', 'decimal_places': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'wagers.wager': {
'Meta': {'object_name': 'Wager'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'proposition': ('django.db.models.fields.TextField', [], {}),
'winning_position': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'wagers.wagersettingsingleton': {
'Meta': {'object_name': 'WagerSettingSingleton'},
'default_credits': ('django.db.models.fields.DecimalField', [], {'default': '10', 'max_digits': '100', 'decimal_places': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['wagers'] | [
"jColeChanged@gmail.com"
] | jColeChanged@gmail.com |
e4ed98e057375a8da643e377f7a420d297c0a54c | bfba1e0065ef482971941d3b1aa7eeb7bfdae523 | /visualization/new_get_model_data.py | 820dd76ab62465d9a25c77a61fb290d3b99cb7cd | [] | no_license | mcoppolino/are.na-analysis | d440c87d6320225d8892beab13342b6eb7033b9d | d8cb8ccde5a1573333fd0b02b49d66f09646cd2a | refs/heads/master | 2022-06-25T22:30:44.287120 | 2020-05-08T02:18:12 | 2020-05-08T02:18:12 | 242,257,816 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | import numpy as np
def get_model_data(path):
with np.load(path + "/svd.npz") as data:
return_dict = {}
types_of_matrcies = ['M', 'T', 'M_U', 'M_D', 'M_V', 'M_U_trunc', 'M_D_trunc', 'M_V_trunc', 'T_U', 'T_D', 'T_V', 'T_U_trunc', 'T_D_trunc', 'T_V_trunc', 'M_hat', 'T_hat']
for t in types_of_matrcies:
return_dict[t] = data[t]
print(t + ":")
print(data[t])
return return_dict | [
"x.e.loinaz@gmail.com"
] | x.e.loinaz@gmail.com |
9473a6fe8f89b5541d5abb47dac6dc45376dbe01 | bd8fd0c735daeb93ae10dbdd58a224204790e05d | /sale_order_line_margins/models/sale.py | 8a00bcf4c3661b2a7dd392799f9b0411d8c4f153 | [] | no_license | Liyben/vertical-instaladores | 87f3906240d2802c90b24e4402d48f33f468311b | 623a4ee3745c84cff383fa52f65edf7e8806435e | refs/heads/master | 2023-08-30T14:55:39.681612 | 2021-05-28T18:39:43 | 2021-05-28T18:39:43 | 235,099,352 | 0 | 0 | null | 2021-05-28T18:39:45 | 2020-01-20T12:44:53 | Python | UTF-8 | Python | false | false | 895 | py | # © 2020 Liyben
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
class SaleOrderLine(models.Model):
_inherit='sale.order.line'
margin_benefit = fields.Float (string='Margen', digits=dp.get_precision('Product Price'))
@api.onchange('product_id')
def _onchange_product_id_change_margin_benefit(self):
for line in self:
line.margin_benefit = 0.0
@api.onchange('margin_benefit','purchase_price')
def _onchange_margin_benefit(self):
for line in self:
if line.margin_benefit != 0.0:
currency = line.order_id.pricelist_id.currency_id
line.price_unit = currency.round(line.purchase_price / (1-(line.margin_benefit / 100)))
else:
if (line.auto_create_task):
line._onchange_task_materials_works_workforce()
else:
line.product_uom_change() | [
"soporte@liyben.com"
] | soporte@liyben.com |
70fc859e6f0c1a2b989734ed88fb10fcc3455899 | d9936f30ec5010587fad0ac707207bb344fcac5b | /Team 2/slave.py | b8a1f504c247ee0ecf8c9100c6956e79efb6cc01 | [] | no_license | ncss/projects-2018-8 | 4aff4acdbf4f0eab9a3f2469ce084d101628cc7e | abc61f3c5d9d06546847e5e98818d076fc740340 | refs/heads/master | 2021-09-04T01:04:49.759738 | 2018-01-13T19:26:15 | 2018-01-13T19:26:15 | 114,090,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | from microbit import *
import radio
radio.on()
radio.config(channel=43)
upper_th = -200
lower_th = -1600
motion_state = "not_moving"
round_start = False
while True:
message = radio.receive()
#(start transmitting time)
if message == "1":
round_start = True
#stop transmitting time
if message == "0":
round_start = False
#detecting jump
if round_start == True:
display.clear()
accelerometer.get_z()
if upper_th < accelerometer.get_z(): #up
motion_state = "move_up"
if lower_th > accelerometer.get_z(): #down
motion_state = "move_down"
if motion_state == "move_up" or motion_state == "move_down":
radio.send("x")
display.show(Image.HAPPY)
print("JUMPED")
motion_state = "not_moving"
| [
"noreply@github.com"
] | noreply@github.com |
e0a92a7830861071ff26621538d111156e36a394 | b6524221787c0bcd1a57484ce562c6bdf2b6c001 | /logger.py | 7a4fc8613dcf460a93b2f46dde1ac7678c774e29 | [] | no_license | kobimic/boiler | 9a489e8c03f37a3fecb85601bae3eac0d405c13b | 21e70158476b7baaf418b9d7da8e1f04307d8f83 | refs/heads/main | 2023-05-02T07:34:11.163150 | 2021-05-15T18:07:47 | 2021-05-15T18:07:47 | 366,294,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | import logging
import os
app_name = os.getenv('APP_NAME', 'Boiler')
logger = logging.getLogger(app_name)
console_handler = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
console_format = logging.Formatter("%(name)s | %(levelname)s | %(module)s | %(message)s")
console_handler.setFormatter(console_format)
logger.addHandler(console_handler)
logger.info("Logger init done")
| [
"kobi.m@claroty.com"
] | kobi.m@claroty.com |
7da78a1be6b833505b7f856351bba9a44121d0c4 | b1c5dd3763542558e22f7e81b0cfca94b99a5da5 | /geo.py | f6ae775f7680297f3a45d2bdeffa177f862ce859 | [] | no_license | jsmcallister98/GeodataVisualization | 6d89fc9044af313396603aaa2e79c7b5eeced679 | ac6255481ffbc9b9ac7b0f5b00f40e57b239032b | refs/heads/main | 2023-05-14T12:19:19.358241 | 2021-05-13T09:21:44 | 2021-05-13T09:21:44 | 366,982,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,243 | py | import numpy as np
import pandas as pd
import shapefile as shp
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime, timedelta
import geopandas as gpd
# set the filepath
fp = "India_Districts.shp"
#read the file stored in variable fp
map_df = gpd.read_file(fp)
# check data type so we can see that this is a GEOdataframe
map_df.head()
#Isolate the UP districts
map_df_up = map_df[map_df['stname'] == 'UTTAR PRADESH']
#Check the resulting UP Plot
map_df_up.plot()
#Get the data CSV file
df = pd.read_csv('UP_dummy_data.csv')
df.head()
#Get district wise installation count
df_district = df['installation_district'].value_counts().to_frame()
df_district.reset_index(inplace=True)
df_district.columns = ['district','count']
df_district.head()
#Merge the districts df with the geopandas df
merged = map_df_up.set_index('dtname').join(df_district.set_index('district'))
merged.head()
#Fill NA values
merged['count'].fillna(0,inplace=True)
#Get max count
max_installs = merged['count'].max()
#Generate the choropleth map
fig, ax = plt.subplots(1, figsize=(20, 12))
merged.plot(column='count', cmap='Blues', linewidth=0.8, ax=ax, edgecolor='0.8')
# remove the axis
ax.axis('off')
# add a title
ax.set_title('District-wise Dummy Data', fontdict={'fontsize': '25', 'fontweight' : '3'})
# Create colorbar as a legend
sm = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=0, vmax=max_installs))
# add the colorbar to the figure
cbar = fig.colorbar(sm)
# create date-wise images
df['Installed On'] = df['Installed On'].apply(lambda x: x.split('T')[0])
df['Installed On'] = pd.to_datetime(df['Installed On'],format="%Y-%m-%d")
date_min = df['Installed On'].min()
n_days = df['Installed On'].nunique()
fig, ax = plt.subplots(1, figsize=(20, 12))
for i in range(0, n_days):
date = date_min + timedelta(days=i)
# Get cumulative df till that date
df_c = df[df['Installed On'] <= date]
# Generate the temporary df
df_t = df_c['installation_district'].value_counts().to_frame()
df_t.reset_index(inplace=True)
df_t.columns = ['dist', 'count']
# Get the merged df
df_m = map_df_up.set_index('dtname').join(df_t.set_index('dist'))
df_m['count'].fillna(0, inplace=True)
fig, ax = plt.subplots(1, figsize=(20, 12))
df_m.plot(column='count',
cmap='Blues', linewidth=0.8, ax=ax, edgecolor='0.8')
# remove the axis
ax.axis('off')
# add a title
ax.set_title('District-wise Dummy Data',
fontdict={'fontsize': '25', 'fontweight': '3'})
# Create colorbar as a legend
sm = plt.cm.ScalarMappable(cmap='Blues',
norm=plt.Normalize(vmin=0, vmax=df_t['count'].iloc[0]))
# add the colorbar to the figure
cbar = fig.colorbar(sm)
fontsize = 36
# Positions for the date
date_x = 82
date_y = 29
ax.text(date_x, date_y,
f"{date.strftime('%b %d, %Y')}",
color='black',
fontsize=fontsize)
fig.savefig(f"frames_gpd/frame_{i:03d}.png",
dpi=100, bbox_inches='tight')
plt.close()
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
280f96bebe498d018a6a7490f7d48898c669dd97 | edc0ce97f4b2a33c738a93d1b4421dab8680bd87 | /other_querying_strategies/s2_querying.py | 99226b25f7f4153c74c7dbec32b8f25701aab957 | [] | no_license | maksim96/active_graph_halfspaces | b76f5a9857c60ec3bacefab68bb8702893c20bf6 | aa6d38b56250d034ed8584d6ab73140981de5629 | refs/heads/main | 2023-06-17T16:16:57.245651 | 2021-07-10T18:26:55 | 2021-07-10T18:26:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,340 | py | import itertools
import graph_tool
import graph_tool.topology
import numpy as np
from labelled_graph_primitives.cuts import get_cut_vertices
from prediction_strategies.labelpropgation import label_propagation
'''
this is a naive implementation of Dasarathy et al.'s S^2 '15
'''
def local_global_strategy(Y, W, alpha=0.5, iterations=200, eps=0.000001):
np.fill_diagonal(W, 0)
D = np.sum(W, axis=0)
if np.any(D == 0):
D += D[D > 0].min() / 2
Dhalfinverse = 1 / np.sqrt(D)
Dhalfinverse = np.diag(Dhalfinverse)
S = np.dot(np.dot(Dhalfinverse, W), Dhalfinverse)
F = np.zeros((Y.shape[0], Y.shape[1]))
oldF = np.ones((Y.shape[0], Y.shape[1]))
oldF[:Y.shape[1], :Y.shape[1]] = np.eye(Y.shape[1])
i = 0
while (np.abs(oldF - F) > eps).any() or i >= iterations:
oldF = F
F = np.dot(alpha * S, F) + (1 - alpha) * Y
result = np.zeros(Y.shape[0])
# uniform argmax
for i in range(Y.shape[0]):
result[i] = np.random.choice(np.flatnonzero(F[i] == F[i].max()))
return result
# return np.argmax(F, axis=1)
def label_propagation2(W, known_labels, labels):
W = np.exp(-W * W / 2) # similarity
Y = np.zeros((W.shape[0], labels.size))
for i, label in enumerate(labels):
Y[known_labels == label, i] = 1
return local_global_strategy(Y, W)
def mssp(g: graph_tool.Graph, weight_prop: graph_tool.EdgePropertyMap, L, known_labels):
n = g.num_vertices()
dist_map = np.ones((n, n)) * np.inf
for i, j in itertools.combinations(L, 2):
if known_labels[i] != known_labels[j]:
dist_map[i, j] = graph_tool.topology.shortest_distance(g, i, j, weight_prop)
i, j = np.unravel_index(dist_map.argmin(), dist_map.shape)
if weight_prop is None:
total_weight = g.num_edges() + 1
else:
total_weight = np.sum(weight_prop.a) + 1
if dist_map[i, j] < total_weight:
path, _ = graph_tool.topology.shortest_path(g, i, j, weight_prop)
mid_point = path[len(path) // 2]
return mid_point
else:
return None
def s2(g: graph_tool.Graph, weight_prop: graph_tool.EdgePropertyMap, labels, budget=20, use_adjacency=False, starting_vertex = None):
L = set()
n = g.num_vertices()
known_labels = -np.ones(n) * np.inf
W = graph_tool.topology.shortest_distance(g, weights=weight_prop).get_2d_array(range(n)) # original distance map
if starting_vertex is None:
x = np.random.choice(list(set(range(n)).difference(L)))
else:
x = starting_vertex
true_cut = get_cut_vertices(g, labels)
cut_vertices = set()
total_budget = budget
queries = []
removed_edges = []
accs = []
while budget > 0:
known_labels[x] = labels[x]
L.add(x)
if len(L) == n:
break
budget -= 1
to_remove = []
for e in g.get_out_edges(x):
if known_labels[e[1]] > -np.inf and known_labels[e[1]] != known_labels[x]:
to_remove.append(e)
cut_vertices.add(e[0])
cut_vertices.add(e[1])
for e in to_remove:
g.remove_edge(g.edge(e[0], e[1]))
removed_edges.append(e)
mid_point = mssp(g, weight_prop, L, known_labels)
if mid_point is not None:
x = int(mid_point)
else:
x = np.random.choice(list(set(range(n)).difference(L)))
queries.append(list(L))
prediction = label_propagation(W, known_labels, labels, use_adjacency=use_adjacency)
np.set_printoptions(formatter={'float': lambda x: "{0:0.2f}".format(x)})
larger_class = max(np.where(labels == labels[0])[0].size,
labels.size - np.where(labels == labels[0])[0].size) / labels.size
acc = np.sum(prediction == labels) / labels.size
accs.append(acc)
print("labels: %2d/%2d (%0.2f), cut_vertices: %2d/%2d (%0.2f), accuracy: %0.2f, larger_class: %0.2f" % (
total_budget - budget, total_budget, (total_budget - budget) / total_budget, len(cut_vertices),
len(true_cut),
len(cut_vertices) / len(true_cut), acc, larger_class))
# print("accuracy", np.sum(prediction == labels) / labels.size)
if len(cut_vertices) == len(true_cut):
break
g.add_edge_list(removed_edges)
return queries, accs
def random_not_s2(g: graph_tool.Graph, weight_prop: graph_tool.EdgePropertyMap, labels, budget=20, use_adjacency=False, starting_vertex=None):
L = set()
n = g.num_vertices()
known_labels = -np.ones(n) * np.inf
W = graph_tool.topology.shortest_distance(g, weights=weight_prop).get_2d_array(range(n)) # original distance map
if starting_vertex is None:
x = np.random.choice(list(set(range(n)).difference(L)))
else:
x = starting_vertex
true_cut = get_cut_vertices(g, labels)
cut_vertices = set()
total_budget = budget
queries = []
removed_edges = []
accs = []
while budget > 0:
known_labels[x] = labels[x]
L.add(x)
if len(L) == n:
break
budget -= 1
mid_point = None#mssp(g, weight_prop, L, known_labels)
if mid_point is not None:
x = int(mid_point)
else:
x = np.random.choice(list(set(range(n)).difference(L)))
queries.append(list(L))
prediction = label_propagation(W, known_labels, labels, use_adjacency=use_adjacency)
np.set_printoptions(formatter={'float': lambda x: "{0:0.2f}".format(x)})
larger_class = max(np.where(labels == labels[0])[0].size,
labels.size - np.where(labels == labels[0])[0].size) / labels.size
acc = np.sum(prediction == labels) / labels.size
accs.append(acc)
print("labels: %2d/%2d (%0.2f), cut_vertices: %2d/%2d (%0.2f), accuracy: %0.2f, larger_class: %0.2f" % (
total_budget - budget, total_budget, (total_budget - budget) / total_budget, len(cut_vertices),
len(true_cut),
len(cut_vertices) / len(true_cut), acc, larger_class))
# print("accuracy", np.sum(prediction == labels) / labels.size)
if len(cut_vertices) == len(true_cut):
break
g.add_edge_list(removed_edges)
return queries, accs
| [
"maximilian.thiessen@tuwien.ac.at"
] | maximilian.thiessen@tuwien.ac.at |
3d771670cf4e1f444d8547474dd5ddcfff23f50a | e6af27b11dc53f61f04ce0fa4761298c840b91b1 | /demo.py | 3ee73aeff055e47b0f1834bea8af54cbb0535e93 | [] | no_license | rcsevinc/hw1 | a07822a6a0af5bb7fac6d7521455b2cedaec2e9d | 7124182dfa72daffda92ef4a804dd7ddc8b3af2f | refs/heads/master | 2020-02-26T15:49:55.488678 | 2016-10-13T17:13:36 | 2016-10-13T17:13:36 | 70,799,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | from flask import Flask
from flask import render_template
from flask import request
from algorithm import *
import yaml
app = Flask(__name__)
import logging
logging.basicConfig(filename='example.log',level=logging.DEBUG)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/compute', methods=['GET', 'POST'])
def compute():
if request.method == 'GET':
return render_template('compute.html')
else:
input1 = request.form['input1']
app.logger.debug(input1)
print 'input1: ' + input1
input2 = request.form['input2']
app.logger.debug(input2)
print 'input2: ' + input2
input3 = request.form['input3']
app.logger.debug(input3)
print 'input3: ' + input3
yamlInput1 = yaml.safe_load(input1)
app.logger.debug(yamlInput1)
print 'yamlInput1: ' + str(yamlInput1)
print yamlInput1
result = search(yamlInput1, input2, input3)
print result
return render_template('compute.html', result=result)
| [
"rcsevinc@gmail.com"
] | rcsevinc@gmail.com |
3101dbfa94b6660b8fac02963f5b7480e805d946 | 5a612067f77ae86eea36142460e9cfae5d9e64ee | /.zsh/gitstatus.py | 964fc650f737b12292b546f5ca616bae09924f0c | [
"MIT"
] | permissive | snehesht/dotfiles | d14d9d3622506c003002445b963087413e1ae591 | aa0181ac35517c8217facd4e97ad66610a334a6d | refs/heads/master | 2021-09-28T05:59:50.195167 | 2021-09-17T23:23:49 | 2021-09-17T23:23:49 | 72,886,356 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,803 | py | #!/usr/bin/env python
from __future__ import print_function
import sys
import re
import shlex
from subprocess import Popen, PIPE, check_output
def get_hash():
cmd = 'git log -1 --format="%h"'
output = check_output(shlex.split(cmd)).decode('utf-8').strip()
return output
def get_tagname_or_hash():
"""return tagname if exists else hash"""
cmd = 'git log -1 --format="%h%d"'
output = check_output(shlex.split(cmd)).decode('utf-8').strip()
hash_, tagname = None, None
# get hash
m = re.search('\(.*\)$', output)
if m:
hash_ = output[:m.start()-1]
# get tagname
m = re.search('tag: .*[,\)]', output)
if m:
tagname = 'tags/' + output[m.start()+len('tag: '): m.end()-1]
if tagname:
return tagname
elif hash_:
return hash_
return None
# `git status --porcelain --branch` can collect all information
# branch, remote_branch, untracked, staged, changed, conflicts, ahead, behind
po = Popen(['git', 'status', '--porcelain', '--branch'], stdout=PIPE, stderr=PIPE)
stdout, sterr = po.communicate()
if po.returncode != 0:
sys.exit(0) # Not a git repository
# collect git status information
untracked, staged, changed, conflicts = [], [], [], []
ahead, behind = 0, 0
status = [(line[0], line[1], line[2:]) for line in stdout.decode('utf-8').splitlines()]
for st in status:
if st[0] == '#' and st[1] == '#':
if re.search('Initial commit on', st[2]):
branch = st[2].split(' ')[-1]
elif re.search('no branch', st[2]): # detached status
branch = get_tagname_or_hash()
elif len(st[2].strip().split('...')) == 1:
branch = st[2].strip()
else:
# current and remote branch info
branch, rest = st[2].strip().split('...')
if len(rest.split(' ')) == 1:
# remote_branch = rest.split(' ')[0]
pass
else:
# ahead or behind
divergence = ' '.join(rest.split(' ')[1:])
divergence = divergence.lstrip('[').rstrip(']')
for div in divergence.split(', '):
if 'ahead' in div:
ahead = int(div[len('ahead '):].strip())
elif 'behind' in div:
behind = int(div[len('behind '):].strip())
elif st[0] == '?' and st[1] == '?':
untracked.append(st)
else:
if st[1] == 'M':
changed.append(st)
if st[0] == 'U':
conflicts.append(st)
elif st[0] != ' ':
staged.append(st)
out = ' '.join([
branch,
get_hash(),
str(ahead),
str(behind),
str(len(staged)),
str(len(conflicts)),
str(len(changed)),
str(len(untracked)),
])
print(out, end='')
| [
"mail@snehesh.me"
] | mail@snehesh.me |
95357fa5d7cf18a24347b9bda601d56234546e34 | 69b72b3c9c01db229733f9a217ed0a109e916108 | /ex03-algorithm-selection/tests/test_hybrid_models.py | 687d59bf06e3b4c61d707155f43877633e19e380 | [
"MIT"
] | permissive | shushu-qin/automl-hw | 113bf698ff886374877269582b72a2ff6119f372 | 6a810a641c297ecd1d222f912274ff2654a42e99 | refs/heads/main | 2023-08-17T18:29:24.498582 | 2021-10-21T12:46:53 | 2021-10-21T12:46:53 | 409,740,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | import unittest
import logging
from src.aslib import select, get_stats
class TestSelectionHybridModels(unittest.TestCase):
def setUp(self): # This Method is executed once before each test
logging.basicConfig(level=logging.DEBUG)
data = [['a', 1., 'A', 1., 'ok'],
['a', 1., 'B', 3., 'ok'],
['a', 1., 'C', 4., 'timeout'],
['b', 1., 'A', 2., 'ok'],
['b', 1., 'B', 1., 'ok'],
['b', 1., 'C', 4., 'timeout'],
['c', 1., 'A', 3., 'ok'],
['c', 1., 'B', 1., 'ok'],
['c', 1., 'C', 4., 'timeout'],
['d', 1., 'A', 1., 'ok'],
['d', 1., 'B', 3., 'ok'],
['d', 1., 'C', 4., 'timeout'],
['e', 1., 'A', 1., 'ok'],
['e', 1., 'B', 4., 'timeout'],
['e', 1., 'C', 0., 'ok'],
['f', 1., 'A', 5., 'timeout'],
['f', 1., 'B', 3., 'ok'],
['f', 1., 'C', 0., 'ok']]
features = [['a', 0],
['b', 1],
['c', 1],
['d', 0],
['e', 2],
['f', 2]]
cv = [['a', 1, 1],
['b', 1, 1],
['c', 1, 2],
['d', 1, 2],
['e', 1, 3],
['f', 1, 3]]
self.data = data
self.features = features
self.cv = cv
def test_toy_data_simple(self):
"""
With this simple toy data it should be easy to overfit such that we get oracle performance
:return:
"""
m, selection = select(self.data, self.features, self.cv, 4, 2, None, None, individual=False)
o, s = get_stats(self.data, 4, 2)
print(o, m, s)
self.assertTrue(o <= m <= s)
for feature, sel in zip(self.features, selection): # selection should be perfectly matched to feature
self.assertEqual(feature[1], sel)
# Feel free to add more tests
| [
"s.qin-1@tudelft.com"
] | s.qin-1@tudelft.com |
ece41af7422e8232631b6226812697070bf3291c | ab3aa69dcaee04ad1ee3f0c4b5086c02f759fa4c | /setup.py | dbb17a87378e6c2de407b53d3b7e6fd0f4f7c75c | [] | no_license | akshaygh0sh/ChessAI | 0030a94e6516c4b92f960f90badc1aebcc61ff2e | 7674ff5c3e73c8d4d847faa9162c0ca3a3f955d6 | refs/heads/main | 2023-06-24T01:09:58.543150 | 2021-07-27T19:37:52 | 2021-07-27T19:37:52 | 339,857,370 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from setuptools import setup
from Cython.Build import cythonize
import numpy as np
setup(
ext_modules = cythonize("ChessGame.pyx", language_level = "3", annotate = True),
include_dirs = [np.get_include()],
) | [
"akshayghosh@ucsb.edu"
] | akshayghosh@ucsb.edu |
afc625c1e0d85c204130fec0ec4348513bb6c180 | db8692575378366957b246a5b5938f67de936386 | /2021/feb/5feb/chef_meetings_codechef.py | 360f3315f7013ff3c4aabecde5f7d98ceb10e7f3 | [] | no_license | adarhp0/coding | 24f474f2a3ef4ef578e33509dc57514563929956 | 63db62efdcdeaae934066c337437afcf52a6e28c | refs/heads/master | 2021-07-10T20:04:54.375292 | 2021-04-03T15:11:06 | 2021-04-03T15:11:06 | 242,801,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | def time_score(ctime):
chef_am_pm = 0
if ctime[6] == "A":
chef_am_pm = 0
else:
chef_am_pm = 12*60
chef_hh = int(ctime[:2]) % 12
chef_mm = int(ctime[3:5])
chef_score = chef_am_pm+(chef_hh*60)+chef_mm
return chef_score
tes = int(input())
for t in range(tes):
chef_time = input()
fn = int(input())
chef_score = time_score(chef_time)
for f in range(fn):
fr_time = input()
fr_time_lower = fr_time[:7]
fr_time_upper = fr_time[9:]
fr_l_score = time_score(fr_time_lower)
fr_u_score = time_score(fr_time_upper)
if fr_l_score <= chef_score <= fr_u_score:
print("1", end="")
else:
print("0", end="")
print()
| [
"adarshahp0@gmail.com"
] | adarshahp0@gmail.com |
ca5fb4440d58f32f5c49783d4b7a5aa445777168 | 430b6cc9acf78e68af427dc48822117e16012484 | /P9.py | 1598c37a5790c323d055b83fe2c37bb4b703aaf0 | [] | no_license | ManishBhat/Project-Euler-solutions-in-Python | 86da08e31b3c09b39eb5a6fededf0ef5229b8239 | ae982f4bd72adf6aafb9881c4b63f3244575287e | refs/heads/master | 2023-01-14T02:07:12.745176 | 2020-11-02T05:11:04 | 2020-11-02T05:11:04 | 233,341,400 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 02:03:02 2019
@author: manis
"""
n=1000
for i in range(1,n):
for j in range(i,n-i):
k=n-i-j
if k*k==i*i+j*j:
print(i,j,k)
print(i*j*k) | [
"manish.bhat@gmail.com"
] | manish.bhat@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.